diff --git a/.circleci/config.yml b/.circleci/config.yml index 5fcb831454c..b1566e6b0cf 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -7,12 +7,12 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.19.7 + - image: cimg/go:1.19.12 resource_class: medium+ golang-2xl: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.19.7 + - image: cimg/go:1.19.12 resource_class: 2xlarge ubuntu: docker: diff --git a/.circleci/template.yml b/.circleci/template.yml index cd8aeb663c9..33f62ee3ad8 100644 --- a/.circleci/template.yml +++ b/.circleci/template.yml @@ -7,12 +7,12 @@ executors: golang: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.19.7 + - image: cimg/go:1.19.12 resource_class: medium+ golang-2xl: docker: # Must match GO_VERSION_MIN in project root - - image: cimg/go:1.19.7 + - image: cimg/go:1.19.12 resource_class: 2xlarge ubuntu: docker: diff --git a/.gitignore b/.gitignore index 2e9dcd0ffd0..23a0631c304 100644 --- a/.gitignore +++ b/.gitignore @@ -52,3 +52,4 @@ dist/ # The following files are checked into git and result # in dirty git state if removed from the docker context !extern/filecoin-ffi/rust/filecoin.pc +!extern/test-vectors diff --git a/.golangci.yml b/.golangci.yml index fe663ef7b1f..a4cca9babd0 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,7 +12,6 @@ linters: - unconvert - staticcheck - varcheck - - structcheck - deadcode - scopelint diff --git a/CHANGELOG.md b/CHANGELOG.md index 68932d7f6c4..5045d877327 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,111 @@ # Lotus changelog +# UNRELEASED + +# v1.23.3 / 2023-08-01 + +This feature release of Lotus includes numerous improvements and enhancements for node operators, ETH RPC-providers and storage providers. + +This feature release requires a **minimum Go version of v1.19.12 or higher to successfully build Lotus**. Go version 1.20 is also supported, but 1.21 is NOT. + +## Highlights + +- [Lotus now includes a Slasher tool](https://github.com/filecoin-project/lotus/pull/10928) to monitor the network for Consensus Faults, and report them as appropriate + - The Slasher investigates all incoming blocks, and assesses whether they trigger any of the three Consensus Faults defined in the Filecoin protocol + - If any faults are detected, the Slasher sends a `ReportConsensusFault` message to the faulty miner + - For more information on the Slasher, including how to run it, please find the documentation [here](https://lotus.filecoin.io/lotus/manage/slasher-and-disputer/) +- The Ethereum-like RPC exposed by Lotus is now compatible with EIP-1898: https://github.com/filecoin-project/lotus/pull/10815 +- The lotus-miner PieceReader now supports parallel reads: https://github.com/filecoin-project/lotus/pull/10913 +- Added new environment variable `LOTUS_EXEC_TRACE_CACHE_SIZE` to configure execution trace cache size ([filecoin-project/lotus#10585](https://github.com/filecoin-project/lotus/pull/10585)) + - If unset, we default to caching 16 most recent execution traces. Storage Providers may want to set this to 0, while exchanges may want to crank it up. + +## New features + - feat: miner cli: sectors list upgrade-bounds tool ([filecoin-project/lotus#10923](https://github.com/filecoin-project/lotus/pull/10923)) + - Add new RPC stress testing tool (lotus-bench rpc) with rich reporting ([filecoin-project/lotus#10761](https://github.com/filecoin-project/lotus/pull/10761)) + - feat: alert: Add FVM_CONCURRENCY alert ([filecoin-project/lotus#10933](https://github.com/filecoin-project/lotus/pull/10933)) + - feat: Add eth_syncing RPC method ([filecoin-project/lotus#10719](https://github.com/filecoin-project/lotus/pull/10719)) + - feat: sealing: flag to run data_cid untied from addpiece ([filecoin-project/lotus#10797](https://github.com/filecoin-project/lotus/pull/10797)) + - feat: Lotus Gateway: add MpoolPending, ChainGetBlock and MinerGetBaseInfo ([filecoin-project/lotus#10929](https://github.com/filecoin-project/lotus/pull/10929)) + +## Improvements + - chore: update ffi & fvm ([filecoin-project/lotus#11040](https://github.com/filecoin-project/lotus/pull/11040)) + - feat: Make sure we don't store duplidate actor events caused to reorgs in events.db ([filecoin-project/lotus#11015](https://github.com/filecoin-project/lotus/pull/11015)) + - sealing: Use only non-assigned deals when selecting snap sectors ([filecoin-project/lotus#11002](https://github.com/filecoin-project/lotus/pull/11002)) + - chore: not display privatekey ([filecoin-project/lotus#11006](https://github.com/filecoin-project/lotus/pull/11006)) + - chore: shed: update actor version ([filecoin-project/lotus#11020](https://github.com/filecoin-project/lotus/pull/11020)) + - chore: migrate to boxo ([filecoin-project/lotus#10921](https://github.com/filecoin-project/lotus/pull/10921)) + - feat: deflake TestDealsWithFinalizeEarly ([filecoin-project/lotus#10978](https://github.com/filecoin-project/lotus/pull/10978)) + - fix: pubsub: do not treat ErrExistingNonce as Reject ([filecoin-project/lotus#10973](https://github.com/filecoin-project/lotus/pull/10973)) + - feat: deflake TestDMLevelPartialRetrieval (#10972) ([filecoin-project/lotus#10972](https://github.com/filecoin-project/lotus/pull/10972)) + - fix: eth: ensure that the event topics are non-nil ([filecoin-project/lotus#10971](https://github.com/filecoin-project/lotus/pull/10971)) + - Add comment stating msgIndex is an experimental feature ([filecoin-project/lotus#10968](https://github.com/filecoin-project/lotus/pull/10968)) + - feat: cli(compute-state) default to the tipset at the given epoch ([filecoin-project/lotus#10965](https://github.com/filecoin-project/lotus/pull/10965)) + - Upgrade urfave dependency which now supports DisableSliceFlagSeparato… ([filecoin-project/lotus#10950](https://github.com/filecoin-project/lotus/pull/10950)) + - Add new lotus-shed command for computing eth hash for a given message cid (#10961) ([filecoin-project/lotus#10961](https://github.com/filecoin-project/lotus/pull/10961)) + - Prefill GetTipsetByHeight skiplist cache on lotus startup ([filecoin-project/lotus#10955](https://github.com/filecoin-project/lotus/pull/10955)) + - Add lotus-shed command for backfilling txhash.db ([filecoin-project/lotus#10932](https://github.com/filecoin-project/lotus/pull/10932)) + - chore: deps: update to go-libp2p 0.27.5 ([filecoin-project/lotus#10948](https://github.com/filecoin-project/lotus/pull/10948)) + - Small improvement to make gen output ([filecoin-project/lotus#10951](https://github.com/filecoin-project/lotus/pull/10951)) + - fix: improve perf of msgindex backfill ([filecoin-project/lotus#10941](https://github.com/filecoin-project/lotus/pull/10941)) + - deps: update libp2p ([filecoin-project/lotus#10936](https://github.com/filecoin-project/lotus/pull/10936)) + - sealing: Improve upgrade sector selection ([filecoin-project/lotus#10915](https://github.com/filecoin-project/lotus/pull/10915)) + - Add timing test for mpool select with a large mpool dump ([filecoin-project/lotus#10650](https://github.com/filecoin-project/lotus/pull/10650)) + - feat: slashfilter: drop outdated near-upgrade check ([filecoin-project/lotus#10925](https://github.com/filecoin-project/lotus/pull/10925)) + - opt: MinerInfo adds the PendingOwnerAddress field ([filecoin-project/lotus#10927](https://github.com/filecoin-project/lotus/pull/10927)) + - feat: itest: force PoSt more aggressively around deadline closure ([filecoin-project/lotus#10926](https://github.com/filecoin-project/lotus/pull/10926)) + - test: messagepool: gas rewards are negative if GasFeeCap too low ([filecoin-project/lotus#10649](https://github.com/filecoin-project/lotus/pull/10649)) + - fix: types: error out on decoding BlockMsg with extraneous data ([filecoin-project/lotus#10863](https://github.com/filecoin-project/lotus/pull/10863)) + - update interop upgrade schedule ([filecoin-project/lotus#10879](https://github.com/filecoin-project/lotus/pull/10879)) + - itests: Test PoSt V1_1 on workers ([filecoin-project/lotus#10732](https://github.com/filecoin-project/lotus/pull/10732)) + - Update gas_balancing.md ([filecoin-project/lotus#10924](https://github.com/filecoin-project/lotus/pull/10924)) + - feat: cli: Make compact partitions cmd better ([filecoin-project/lotus#9070](https://github.com/filecoin-project/lotus/pull/9070)) + - fix: include extra messages in ComputeState InvocResult output ([filecoin-project/lotus#10628](https://github.com/filecoin-project/lotus/pull/10628)) + - feat: pubsub: treat ErrGasFeeCapTooLow as ignore, not reject ([filecoin-project/lotus#10652](https://github.com/filecoin-project/lotus/pull/10652)) + - feat: run lotus-shed commands in context that is cancelled on sigterm ([filecoin-project/lotus#10877](https://github.com/filecoin-project/lotus/pull/10877)) + - fix:lotus-fountain:set default data-cap same as MinVerifiedDealSize ([filecoin-project/lotus#10920](https://github.com/filecoin-project/lotus/pull/10920)) + - pass the right g-recaptcha data + - fix: not call RUnlock ([filecoin-project/lotus#10912](https://github.com/filecoin-project/lotus/pull/10912)) + - opt: cli: If present, print Events Root ([filecoin-project/lotus#10893](https://github.com/filecoin-project/lotus/pull/10893)) + - Calibration faucet UI improvements ([filecoin-project/lotus#10905](https://github.com/filecoin-project/lotus/pull/10905)) + - chore: chain: replace storetheindex with go-libipni ([filecoin-project/lotus#10841](https://github.com/filecoin-project/lotus/pull/10841)) + - Add alerts to `Lotus info` cmd ([filecoin-project/lotus#10894](https://github.com/filecoin-project/lotus/pull/10894)) + - fix: cli: make redeclare cmd work properly ([filecoin-project/lotus#10860](https://github.com/filecoin-project/lotus/pull/10860)) + - fix: shed remove datacap not working with ledger ([filecoin-project/lotus#10880](https://github.com/filecoin-project/lotus/pull/10880)) + - Check if epoch is negative in GetTipsetByHeight ([filecoin-project/lotus#10878](https://github.com/filecoin-project/lotus/pull/10878)) + - chore: update go-fil-markets ([filecoin-project/lotus#10867](https://github.com/filecoin-project/lotus/pull/10867)) + - feat: alerts: Add lotus-miner legacy-markets alert ([filecoin-project/lotus#10868](https://github.com/filecoin-project/lotus/pull/10868)) + - feat:fountain:add grant-datacap support ([filecoin-project/lotus#10856](https://github.com/filecoin-project/lotus/pull/10856)) + - feat: itests: add logs to blockminer.go failure case ([filecoin-project/lotus#10861](https://github.com/filecoin-project/lotus/pull/10861)) + - feat: eth: Add support for blockHash param in eth_getLogs ([filecoin-project/lotus#10782](https://github.com/filecoin-project/lotus/pull/10782)) + - lotus-fountain: make compatible with 0x addresses #10560 ([filecoin-project/lotus#10784](https://github.com/filecoin-project/lotus/pull/10784)) + - feat: deflake sector_import_simple ([filecoin-project/lotus#10858](https://github.com/filecoin-project/lotus/pull/10858)) + - fix: splitstore: remove deadlock around waiting for sync ([filecoin-project/lotus#10857](https://github.com/filecoin-project/lotus/pull/10857)) + - fix: sched: Address GET_32G_MAX_CONCURRENT regression (#10850) ([filecoin-project/lotus#10850](https://github.com/filecoin-project/lotus/pull/10850)) + - feat: fix deadlock in splitstore-mpool interaction ([filecoin-project/lotus#10840](https://github.com/filecoin-project/lotus/pull/10840)) + - chore: update go-libp2p to v0.27.3 ([filecoin-project/lotus#10671](https://github.com/filecoin-project/lotus/pull/10671)) + - libp2p: add QUIC and WebTransport to default listen addresses ([filecoin-project/lotus#10848](https://github.com/filecoin-project/lotus/pull/10848)) + - fix: ci: Debugging m1 build ([filecoin-project/lotus#10749](https://github.com/filecoin-project/lotus/pull/10749)) + - Validate that FromBlock/ToBlock epoch is indeed a hex value (#10780) ([filecoin-project/lotus#10780](https://github.com/filecoin-project/lotus/pull/10780)) + - fix: remove invalid field UpgradePriceListOopsHeight ([filecoin-project/lotus#10772](https://github.com/filecoin-project/lotus/pull/10772)) + - feat: deflake eth_balance_test ([filecoin-project/lotus#10847](https://github.com/filecoin-project/lotus/pull/10847)) + - fix: tests: Use mutex-wrapped datastore in storage tests ([filecoin-project/lotus#10846](https://github.com/filecoin-project/lotus/pull/10846)) + - Make lotus-fountain UI slightly friendlier ([filecoin-project/lotus#10785](https://github.com/filecoin-project/lotus/pull/10785)) + - Make (un)subscribe and filter RPC methods require only read perm ([filecoin-project/lotus#10825](https://github.com/filecoin-project/lotus/pull/10825)) + - deps: Update go-jsonrpc to v0.3.1 ([filecoin-project/lotus#10845](https://github.com/filecoin-project/lotus/pull/10845)) + - feat: deflake paych_api_test ([filecoin-project/lotus#10843](https://github.com/filecoin-project/lotus/pull/10843)) + - fix: Eth RPC: do not occlude block param errors. ([filecoin-project/lotus#10534](https://github.com/filecoin-project/lotus/pull/10534)) + - feat: cli: More ux-friendly batching cmds ([filecoin-project/lotus#10837](https://github.com/filecoin-project/lotus/pull/10837)) + - fix: cli: Hide legacy markets cmds ([filecoin-project/lotus#10842](https://github.com/filecoin-project/lotus/pull/10842)) + - feat: chainstore: exit early in MaybeTakeHeavierTipset ([filecoin-project/lotus#10839](https://github.com/filecoin-project/lotus/pull/10839)) + - fix: itest: fix eth deploy test flake ([filecoin-project/lotus#10829](https://github.com/filecoin-project/lotus/pull/10829)) + - style: mempool: chain errors using xerrors.Errorf ([filecoin-project/lotus#10836](https://github.com/filecoin-project/lotus/pull/10836)) + - feat: deflake msgindex_test.go ([filecoin-project/lotus#10826](https://github.com/filecoin-project/lotus/pull/10826)) + - feat: deflake TestEthFeeHistory ([filecoin-project/lotus#10816](https://github.com/filecoin-project/lotus/pull/10816)) + - feat: make RunClientTest louder when deals fail ([filecoin-project/lotus#10817](https://github.com/filecoin-project/lotus/pull/10817)) + - fix: cli: Change arg wording in change-beneficiary cmd ([filecoin-project/lotus#10823](https://github.com/filecoin-project/lotus/pull/10823)) + - refactor: streamline error handling in CheckPendingMessages (#10818) ([filecoin-project/lotus#10818](https://github.com/filecoin-project/lotus/pull/10818)) + - feat: Add tmp indices to events table while performing migration to V2 + # v1.23.2 / 2023-06-28 This is a patch release on top of 1.23.1 containing the fix for https://github.com/filecoin-project/lotus/issues/10906 diff --git a/Dockerfile b/Dockerfile index dfdfedce328..99625eea103 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ ##################################### -FROM golang:1.19.7-buster AS lotus-builder +FROM golang:1.19.12-bullseye AS lotus-builder MAINTAINER Lotus Development Team RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev @@ -58,7 +58,7 @@ COPY --from=lotus-builder /lib/*/libgcc_s.so.1 /lib/ COPY --from=lotus-builder /lib/*/libutil.so.1 /lib/ COPY --from=lotus-builder /usr/lib/*/libltdl.so.7 /lib/ COPY --from=lotus-builder /usr/lib/*/libnuma.so.1 /lib/ -COPY --from=lotus-builder /usr/lib/*/libhwloc.so.5 /lib/ +COPY --from=lotus-builder /usr/lib/*/libhwloc.so.* /lib/ COPY --from=lotus-builder /usr/lib/*/libOpenCL.so.1 /lib/ RUN useradd -r -u 532 -U fc \ diff --git a/Dockerfile.lotus b/Dockerfile.lotus deleted file mode 100644 index 91373b62f8d..00000000000 --- a/Dockerfile.lotus +++ /dev/null @@ -1,273 +0,0 @@ -##### DEPRECATED - -FROM golang:1.19.7-buster AS builder-deps -MAINTAINER Lotus Development Team - -RUN apt-get update && apt-get install -y ca-certificates build-essential clang ocl-icd-opencl-dev ocl-icd-libopencl1 jq libhwloc-dev - -ENV XDG_CACHE_HOME="/tmp" - -### taken from https://github.com/rust-lang/docker-rust/blob/master/1.63.0/buster/Dockerfile -ENV RUSTUP_HOME=/usr/local/rustup \ - CARGO_HOME=/usr/local/cargo \ - PATH=/usr/local/cargo/bin:$PATH \ - RUST_VERSION=1.63.0 - -RUN set -eux; \ - dpkgArch="$(dpkg --print-architecture)"; \ - case "${dpkgArch##*-}" in \ - amd64) rustArch='x86_64-unknown-linux-gnu'; rustupSha256='5cc9ffd1026e82e7fb2eec2121ad71f4b0f044e88bca39207b3f6b769aaa799c' ;; \ - arm64) rustArch='aarch64-unknown-linux-gnu'; rustupSha256='e189948e396d47254103a49c987e7fb0e5dd8e34b200aa4481ecc4b8e41fb929' ;; \ - *) echo >&2 "unsupported architecture: ${dpkgArch}"; exit 1 ;; \ - esac; \ - url="https://static.rust-lang.org/rustup/archive/1.25.1/${rustArch}/rustup-init"; \ - wget "$url"; \ - echo "${rustupSha256} *rustup-init" | sha256sum -c -; \ - chmod +x rustup-init; \ - ./rustup-init -y --no-modify-path --profile minimal --default-toolchain $RUST_VERSION --default-host ${rustArch}; \ - rm rustup-init; \ - chmod -R a+w $RUSTUP_HOME $CARGO_HOME; \ - rustup --version; \ - cargo --version; \ - rustc --version; -### end rust - -FROM builder-deps AS builder-local -MAINTAINER Lotus Development Team - -COPY ./ /opt/filecoin -WORKDIR /opt/filecoin - -### make configurable filecoin-ffi build -ARG FFI_BUILD_FROM_SOURCE=0 -ENV FFI_BUILD_FROM_SOURCE=${FFI_BUILD_FROM_SOURCE} - -RUN make clean deps - - -FROM builder-local AS builder-test -MAINTAINER Lotus Development Team - -WORKDIR /opt/filecoin - -RUN make debug - - -FROM builder-local AS builder -MAINTAINER Lotus Development Team - -WORKDIR /opt/filecoin - -ARG RUSTFLAGS="" -ARG GOFLAGS="" - -RUN make lotus lotus-miner lotus-worker lotus-shed lotus-wallet lotus-gateway lotus-stats - - -FROM ubuntu:20.04 AS base -MAINTAINER Lotus Development Team - -# Base resources -COPY --from=builder /etc/ssl/certs /etc/ssl/certs -COPY --from=builder /lib/*/libdl.so.2 /lib/ -COPY --from=builder /lib/*/librt.so.1 /lib/ -COPY --from=builder /lib/*/libgcc_s.so.1 /lib/ -COPY --from=builder /lib/*/libutil.so.1 /lib/ -COPY --from=builder /usr/lib/*/libltdl.so.7 /lib/ -COPY --from=builder /usr/lib/*/libnuma.so.1 /lib/ -COPY --from=builder /usr/lib/*/libhwloc.so.5 /lib/ -COPY --from=builder /usr/lib/*/libOpenCL.so.1 /lib/ - -RUN useradd -r -u 532 -U fc \ - && mkdir -p /etc/OpenCL/vendors \ - && echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd - -### -FROM base AS lotus -MAINTAINER Lotus Development Team - -COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ -COPY scripts/docker-lotus-entrypoint.sh / - -ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV LOTUS_PATH /var/lib/lotus -ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car -ENV DOCKER_LOTUS_IMPORT_WALLET "" - -RUN mkdir /var/lib/lotus /var/tmp/filecoin-proof-parameters -RUN chown fc: /var/lib/lotus /var/tmp/filecoin-proof-parameters - -VOLUME /var/lib/lotus -VOLUME /var/tmp/filecoin-proof-parameters - -USER fc - -EXPOSE 1234 - -ENTRYPOINT ["/docker-lotus-entrypoint.sh"] - -CMD ["-help"] - -### -FROM base AS lotus-wallet -MAINTAINER Lotus Development Team - -COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ - -ENV WALLET_PATH /var/lib/lotus-wallet - -RUN mkdir /var/lib/lotus-wallet -RUN chown fc: /var/lib/lotus-wallet - -VOLUME /var/lib/lotus-wallet - -USER fc - -EXPOSE 1777 - -ENTRYPOINT ["/usr/local/bin/lotus-wallet"] - -CMD ["-help"] - -### -FROM base AS lotus-gateway -MAINTAINER Lotus Development Team - -COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ - -USER fc - -EXPOSE 1234 - -ENTRYPOINT ["/usr/local/bin/lotus-gateway"] - -CMD ["-help"] - - -### -FROM base AS lotus-miner -MAINTAINER Lotus Development Team - -COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ -COPY scripts/docker-lotus-miner-entrypoint.sh / - -ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV LOTUS_MINER_PATH /var/lib/lotus-miner - -RUN mkdir /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters -RUN chown fc: /var/lib/lotus-miner /var/tmp/filecoin-proof-parameters - -VOLUME /var/lib/lotus-miner -VOLUME /var/tmp/filecoin-proof-parameters - -USER fc - -EXPOSE 2345 - -ENTRYPOINT ["/docker-lotus-miner-entrypoint.sh"] - -CMD ["-help"] - - -### -FROM base AS lotus-worker -MAINTAINER Lotus Development Team - -COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ - -ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV LOTUS_WORKER_PATH /var/lib/lotus-worker - -RUN mkdir /var/lib/lotus-worker -RUN chown fc: /var/lib/lotus-worker - -VOLUME /var/lib/lotus-worker - -USER fc - -EXPOSE 3456 - -ENTRYPOINT ["/usr/local/bin/lotus-worker"] - -CMD ["-help"] - - -### -from base as lotus-all-in-one - -ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV LOTUS_MINER_PATH /var/lib/lotus-miner -ENV LOTUS_PATH /var/lib/lotus -ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV WALLET_PATH /var/lib/lotus-wallet -ENV DOCKER_LOTUS_IMPORT_SNAPSHOT https://fil-chain-snapshots-fallback.s3.amazonaws.com/mainnet/minimal_finality_stateroots_latest.car - -COPY --from=builder /opt/filecoin/lotus /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-shed /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-wallet /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-gateway /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-miner /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-worker /usr/local/bin/ -COPY --from=builder /opt/filecoin/lotus-stats /usr/local/bin/ - -RUN mkdir /var/tmp/filecoin-proof-parameters -RUN mkdir /var/lib/lotus -RUN mkdir /var/lib/lotus-miner -RUN mkdir /var/lib/lotus-worker -RUN mkdir /var/lib/lotus-wallet -RUN chown fc: /var/tmp/filecoin-proof-parameters -RUN chown fc: /var/lib/lotus -RUN chown fc: /var/lib/lotus-miner -RUN chown fc: /var/lib/lotus-worker -RUN chown fc: /var/lib/lotus-wallet - - -VOLUME /var/tmp/filecoin-proof-parameters -VOLUME /var/lib/lotus -VOLUME /var/lib/lotus-miner -VOLUME /var/lib/lotus-worker -VOLUME /var/lib/lotus-wallet - -EXPOSE 1234 -EXPOSE 2345 -EXPOSE 3456 -EXPOSE 1777 - -### -from base as lotus-test - -ENV FILECOIN_PARAMETER_CACHE /var/tmp/filecoin-proof-parameters -ENV LOTUS_MINER_PATH /var/lib/lotus-miner -ENV LOTUS_PATH /var/lib/lotus -ENV LOTUS_WORKER_PATH /var/lib/lotus-worker -ENV WALLET_PATH /var/lib/lotus-wallet - -COPY --from=builder-test /opt/filecoin/lotus /usr/local/bin/ -COPY --from=builder-test /opt/filecoin/lotus-miner /usr/local/bin/ -COPY --from=builder-test /opt/filecoin/lotus-worker /usr/local/bin/ -COPY --from=builder-test /opt/filecoin/lotus-seed /usr/local/bin/ - -RUN mkdir /var/tmp/filecoin-proof-parameters -RUN mkdir /var/lib/lotus -RUN mkdir /var/lib/lotus-miner -RUN mkdir /var/lib/lotus-worker -RUN mkdir /var/lib/lotus-wallet -RUN chown fc: /var/tmp/filecoin-proof-parameters -RUN chown fc: /var/lib/lotus -RUN chown fc: /var/lib/lotus-miner -RUN chown fc: /var/lib/lotus-worker -RUN chown fc: /var/lib/lotus-wallet - - -VOLUME /var/tmp/filecoin-proof-parameters -VOLUME /var/lib/lotus -VOLUME /var/lib/lotus-miner -VOLUME /var/lib/lotus-worker -VOLUME /var/lib/lotus-wallet - -EXPOSE 1234 -EXPOSE 2345 -EXPOSE 3456 -EXPOSE 1777 - diff --git a/GO_VERSION_MIN b/GO_VERSION_MIN index 98adfe8e122..e54f3135a7d 100644 --- a/GO_VERSION_MIN +++ b/GO_VERSION_MIN @@ -1 +1 @@ -1.19.7 +1.19.12 diff --git a/Makefile b/Makefile index d1e7d159a11..429e93f536d 100644 --- a/Makefile +++ b/Makefile @@ -355,7 +355,7 @@ fiximports: ./scripts/fiximports gen: actors-code-gen type-gen cfgdoc-gen docsgen api-gen circleci fiximports - @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO MAKE docsgen-cli" + @echo ">>> IF YOU'VE MODIFIED THE CLI OR CONFIG, REMEMBER TO ALSO RUN 'make docsgen-cli'" .PHONY: gen jen: gen diff --git a/README.md b/README.md index b67cb952f18..f6ac7593222 100644 --- a/README.md +++ b/README.md @@ -71,10 +71,10 @@ For other distributions you can find the required dependencies [here.](https://l #### Go -To build Lotus, you need a working installation of [Go 1.19.7 or higher](https://golang.org/dl/): +To build Lotus, you need a working installation of [Go 1.19.12 or higher](https://golang.org/dl/): ```bash -wget -c https://golang.org/dl/go1.19.7.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local +wget -c https://golang.org/dl/go1.19.12.linux-amd64.tar.gz -O - | sudo tar -xz -C /usr/local ``` **TIP:** diff --git a/api/api_full.go b/api/api_full.go index 06a14b76e5a..591799b4883 100644 --- a/api/api_full.go +++ b/api/api_full.go @@ -796,31 +796,32 @@ type FullNode interface { // EthGetBlockTransactionCountByHash returns the number of messages in the TipSet EthGetBlockTransactionCountByHash(ctx context.Context, blkHash ethtypes.EthHash) (ethtypes.EthUint64, error) //perm:read - EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read - EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read - EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read - EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read - EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read - EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read - EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) //perm:read - EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read - EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read - EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read - EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read - - EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) //perm:read - EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) //perm:read - EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) //perm:read - EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read - NetVersion(ctx context.Context) (string, error) //perm:read - NetListening(ctx context.Context) (bool, error) //perm:read - EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read - EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read - EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read - - EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read - EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read - EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error) //perm:read + EthGetBlockByHash(ctx context.Context, blkHash ethtypes.EthHash, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read + EthGetBlockByNumber(ctx context.Context, blkNum string, fullTxInfo bool) (ethtypes.EthBlock, error) //perm:read + EthGetTransactionByHash(ctx context.Context, txHash *ethtypes.EthHash) (*ethtypes.EthTx, error) //perm:read + EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) //perm:read + EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) //perm:read + EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) //perm:read + EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) //perm:read + EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) //perm:read + EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) //perm:read + EthGetTransactionByBlockHashAndIndex(ctx context.Context, blkHash ethtypes.EthHash, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read + EthGetTransactionByBlockNumberAndIndex(ctx context.Context, blkNum ethtypes.EthUint64, txIndex ethtypes.EthUint64) (ethtypes.EthTx, error) //perm:read + + EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read + EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read + EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) //perm:read + EthChainId(ctx context.Context) (ethtypes.EthUint64, error) //perm:read + EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) //perm:read + NetVersion(ctx context.Context) (string, error) //perm:read + NetListening(ctx context.Context) (bool, error) //perm:read + EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) //perm:read + EthGasPrice(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read + EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) //perm:read + + EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) //perm:read + EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) //perm:read + EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) //perm:read EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) //perm:read @@ -829,23 +830,23 @@ type FullNode interface { // Polling method for a filter, returns event logs which occurred since last poll. // (requires write perm since timestamp of last filter execution will be written) - EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write + EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read // Returns event logs matching filter with given id. // (requires write perm since timestamp of last filter execution will be written) - EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:write + EthGetFilterLogs(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) //perm:read // Installs a persistent filter based on given filter spec. - EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:write + EthNewFilter(ctx context.Context, filter *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) //perm:read // Installs a persistent filter to notify when a new block arrives. - EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write + EthNewBlockFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read // Installs a persistent filter to notify when new messages arrive in the message pool. - EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:write + EthNewPendingTransactionFilter(ctx context.Context) (ethtypes.EthFilterID, error) //perm:read // Uninstalls a filter with given id. - EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:write + EthUninstallFilter(ctx context.Context, id ethtypes.EthFilterID) (bool, error) //perm:read // Subscribe to different event types using websockets // eventTypes is one or more of: @@ -854,10 +855,10 @@ type FullNode interface { // - logs: notify new event logs that match a criteria // params contains additional parameters used with the log event type // The client will receive a stream of EthSubscriptionResponse values until EthUnsubscribe is called. - EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:write + EthSubscribe(ctx context.Context, params jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) //perm:read // Unsubscribe from a websocket subscription - EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:write + EthUnsubscribe(ctx context.Context, id ethtypes.EthSubscriptionID) (bool, error) //perm:read // Returns the client version Web3ClientVersion(ctx context.Context) (string, error) //perm:read diff --git a/api/api_gateway.go b/api/api_gateway.go index f4d6c20a03b..f6740e1e067 100644 --- a/api/api_gateway.go +++ b/api/api_gateway.go @@ -33,6 +33,9 @@ import ( // * Generate openrpc blobs type Gateway interface { + MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) + ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*MiningBaseInfo, error) StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (MinerSectors, error) GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) StateReplay(context.Context, types.TipSetKey, cid.Cid) (*InvocResult, error) @@ -91,13 +94,14 @@ type Gateway interface { EthGetTransactionByHashLimited(ctx context.Context, txHash *ethtypes.EthHash, limit abi.ChainEpoch) (*ethtypes.EthTx, error) EthGetTransactionHashByCid(ctx context.Context, cid cid.Cid) (*ethtypes.EthHash, error) EthGetMessageCidByTransactionHash(ctx context.Context, txHash *ethtypes.EthHash) (*cid.Cid, error) - EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkOpt string) (ethtypes.EthUint64, error) + EthGetTransactionCount(ctx context.Context, sender ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) EthGetTransactionReceipt(ctx context.Context, txHash ethtypes.EthHash) (*EthTxReceipt, error) EthGetTransactionReceiptLimited(ctx context.Context, txHash ethtypes.EthHash, limit abi.ChainEpoch) (*EthTxReceipt, error) - EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkOpt string) (ethtypes.EthBytes, error) - EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam string) (ethtypes.EthBytes, error) - EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam string) (ethtypes.EthBigInt, error) + EthGetCode(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) + EthGetStorageAt(ctx context.Context, address ethtypes.EthAddress, position ethtypes.EthBytes, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) + EthGetBalance(ctx context.Context, address ethtypes.EthAddress, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) EthChainId(ctx context.Context) (ethtypes.EthUint64, error) + EthSyncing(ctx context.Context) (ethtypes.EthSyncingResult, error) NetVersion(ctx context.Context) (string, error) NetListening(ctx context.Context) (bool, error) EthProtocolVersion(ctx context.Context) (ethtypes.EthUint64, error) @@ -105,7 +109,7 @@ type Gateway interface { EthFeeHistory(ctx context.Context, p jsonrpc.RawParams) (ethtypes.EthFeeHistory, error) EthMaxPriorityFeePerGas(ctx context.Context) (ethtypes.EthBigInt, error) EthEstimateGas(ctx context.Context, tx ethtypes.EthCall) (ethtypes.EthUint64, error) - EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam string) (ethtypes.EthBytes, error) + EthCall(ctx context.Context, tx ethtypes.EthCall, blkParam ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) EthSendRawTransaction(ctx context.Context, rawTx ethtypes.EthBytes) (ethtypes.EthHash, error) EthGetLogs(ctx context.Context, filter *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) EthGetFilterChanges(ctx context.Context, id ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) diff --git a/api/docgen/docgen.go b/api/docgen/docgen.go index 7a9993bb782..01862960076 100644 --- a/api/docgen/docgen.go +++ b/api/docgen/docgen.go @@ -86,6 +86,7 @@ func init() { } ExampleValues[reflect.TypeOf(addr)] = addr + ExampleValues[reflect.TypeOf(&addr)] = &addr pid, err := peer.Decode("12D3KooWGzxzKZYveHXtpG6AsrUJBcWxHBFS2HsEoGTxrMLvKXtf") if err != nil { @@ -479,6 +480,9 @@ func ExampleValue(method string, t, parent reflect.Type) interface{} { es := exampleStruct(method, t.Elem(), t) ExampleValues[t] = es return es + } else if t.Elem().Kind() == reflect.String { + str := "string value" + return &str } case reflect.Interface: return struct{}{} diff --git a/api/eth_aliases.go b/api/eth_aliases.go index ccf0317d951..ca0f861ac73 100644 --- a/api/eth_aliases.go +++ b/api/eth_aliases.go @@ -21,6 +21,7 @@ func CreateEthRPCAliases(as apitypes.Aliaser) { as.AliasMethod("eth_getStorageAt", "Filecoin.EthGetStorageAt") as.AliasMethod("eth_getBalance", "Filecoin.EthGetBalance") as.AliasMethod("eth_chainId", "Filecoin.EthChainId") + as.AliasMethod("eth_syncing", "Filecoin.EthSyncing") as.AliasMethod("eth_feeHistory", "Filecoin.EthFeeHistory") as.AliasMethod("eth_protocolVersion", "Filecoin.EthProtocolVersion") as.AliasMethod("eth_maxPriorityFeePerGas", "Filecoin.EthMaxPriorityFeePerGas") diff --git a/api/mocks/mock_full.go b/api/mocks/mock_full.go index 12632bc2dd8..a1e9c123015 100644 --- a/api/mocks/mock_full.go +++ b/api/mocks/mock_full.go @@ -1012,7 +1012,7 @@ func (mr *MockFullNodeMockRecorder) EthBlockNumber(arg0 interface{}) *gomock.Cal } // EthCall mocks base method. -func (m *MockFullNode) EthCall(arg0 context.Context, arg1 ethtypes.EthCall, arg2 string) (ethtypes.EthBytes, error) { +func (m *MockFullNode) EthCall(arg0 context.Context, arg1 ethtypes.EthCall, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthCall", arg0, arg1, arg2) ret0, _ := ret[0].(ethtypes.EthBytes) @@ -1087,7 +1087,7 @@ func (mr *MockFullNodeMockRecorder) EthGasPrice(arg0 interface{}) *gomock.Call { } // EthGetBalance mocks base method. -func (m *MockFullNode) EthGetBalance(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBigInt, error) { +func (m *MockFullNode) EthGetBalance(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthGetBalance", arg0, arg1, arg2) ret0, _ := ret[0].(ethtypes.EthBigInt) @@ -1162,7 +1162,7 @@ func (mr *MockFullNodeMockRecorder) EthGetBlockTransactionCountByNumber(arg0, ar } // EthGetCode mocks base method. -func (m *MockFullNode) EthGetCode(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthBytes, error) { +func (m *MockFullNode) EthGetCode(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthGetCode", arg0, arg1, arg2) ret0, _ := ret[0].(ethtypes.EthBytes) @@ -1237,7 +1237,7 @@ func (mr *MockFullNodeMockRecorder) EthGetMessageCidByTransactionHash(arg0, arg1 } // EthGetStorageAt mocks base method. -func (m *MockFullNode) EthGetStorageAt(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBytes, arg3 string) (ethtypes.EthBytes, error) { +func (m *MockFullNode) EthGetStorageAt(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBytes, arg3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthGetStorageAt", arg0, arg1, arg2, arg3) ret0, _ := ret[0].(ethtypes.EthBytes) @@ -1312,7 +1312,7 @@ func (mr *MockFullNodeMockRecorder) EthGetTransactionByHashLimited(arg0, arg1, a } // EthGetTransactionCount mocks base method. -func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 string) (ethtypes.EthUint64, error) { +func (m *MockFullNode) EthGetTransactionCount(arg0 context.Context, arg1 ethtypes.EthAddress, arg2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "EthGetTransactionCount", arg0, arg1, arg2) ret0, _ := ret[0].(ethtypes.EthUint64) @@ -1476,6 +1476,21 @@ func (mr *MockFullNodeMockRecorder) EthSubscribe(arg0, arg1 interface{}) *gomock return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSubscribe", reflect.TypeOf((*MockFullNode)(nil).EthSubscribe), arg0, arg1) } +// EthSyncing mocks base method. +func (m *MockFullNode) EthSyncing(arg0 context.Context) (ethtypes.EthSyncingResult, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "EthSyncing", arg0) + ret0, _ := ret[0].(ethtypes.EthSyncingResult) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// EthSyncing indicates an expected call of EthSyncing. +func (mr *MockFullNodeMockRecorder) EthSyncing(arg0 interface{}) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "EthSyncing", reflect.TypeOf((*MockFullNode)(nil).EthSyncing), arg0) +} + // EthUninstallFilter mocks base method. func (m *MockFullNode) EthUninstallFilter(arg0 context.Context, arg1 ethtypes.EthFilterID) (bool, error) { m.ctrl.T.Helper() diff --git a/api/proxy_gen.go b/api/proxy_gen.go index cbd2acacf1f..ce4ec3d1e34 100644 --- a/api/proxy_gen.go +++ b/api/proxy_gen.go @@ -252,7 +252,7 @@ type FullNodeMethods struct { EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"` - EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) `perm:"read"` + EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"` EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"` @@ -262,7 +262,7 @@ type FullNodeMethods struct { EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"` - EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) `perm:"read"` + EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) `perm:"read"` EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) `perm:"read"` @@ -272,17 +272,17 @@ type FullNodeMethods struct { EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) `perm:"read"` - EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) `perm:"read"` + EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"` - EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"` + EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"` - EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"write"` + EthGetFilterLogs func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `perm:"read"` EthGetLogs func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (*ethtypes.EthFilterResult, error) `perm:"read"` EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) `perm:"read"` - EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) `perm:"read"` + EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `perm:"read"` EthGetTransactionByBlockHashAndIndex func(p0 context.Context, p1 ethtypes.EthHash, p2 ethtypes.EthUint64) (ethtypes.EthTx, error) `perm:"read"` @@ -292,7 +292,7 @@ type FullNodeMethods struct { EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `perm:"read"` - EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `perm:"read"` + EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) `perm:"read"` EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `perm:"read"` @@ -302,21 +302,23 @@ type FullNodeMethods struct { EthMaxPriorityFeePerGas func(p0 context.Context) (ethtypes.EthBigInt, error) `perm:"read"` - EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"` + EthNewBlockFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"` - EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"write"` + EthNewFilter func(p0 context.Context, p1 *ethtypes.EthFilterSpec) (ethtypes.EthFilterID, error) `perm:"read"` - EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"write"` + EthNewPendingTransactionFilter func(p0 context.Context) (ethtypes.EthFilterID, error) `perm:"read"` EthProtocolVersion func(p0 context.Context) (ethtypes.EthUint64, error) `perm:"read"` EthSendRawTransaction func(p0 context.Context, p1 ethtypes.EthBytes) (ethtypes.EthHash, error) `perm:"read"` - EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"write"` + EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `perm:"read"` - EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"write"` + EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `perm:"read"` - EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"write"` + EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `perm:"read"` + + EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `perm:"read"` FilecoinAddressToEthAddress func(p0 context.Context, p1 address.Address) (ethtypes.EthAddress, error) `perm:"read"` @@ -630,6 +632,8 @@ type GatewayStruct struct { } type GatewayMethods struct { + ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `` + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) `` ChainGetGenesis func(p0 context.Context) (*types.TipSet, error) `` @@ -664,7 +668,7 @@ type GatewayMethods struct { EthBlockNumber func(p0 context.Context) (ethtypes.EthUint64, error) `` - EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) `` + EthCall func(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `` EthChainId func(p0 context.Context) (ethtypes.EthUint64, error) `` @@ -674,7 +678,7 @@ type GatewayMethods struct { EthGasPrice func(p0 context.Context) (ethtypes.EthBigInt, error) `` - EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) `` + EthGetBalance func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) `` EthGetBlockByHash func(p0 context.Context, p1 ethtypes.EthHash, p2 bool) (ethtypes.EthBlock, error) `` @@ -684,7 +688,7 @@ type GatewayMethods struct { EthGetBlockTransactionCountByNumber func(p0 context.Context, p1 ethtypes.EthUint64) (ethtypes.EthUint64, error) `` - EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) `` + EthGetCode func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `` EthGetFilterChanges func(p0 context.Context, p1 ethtypes.EthFilterID) (*ethtypes.EthFilterResult, error) `` @@ -694,13 +698,13 @@ type GatewayMethods struct { EthGetMessageCidByTransactionHash func(p0 context.Context, p1 *ethtypes.EthHash) (*cid.Cid, error) `` - EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) `` + EthGetStorageAt func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) `` EthGetTransactionByHash func(p0 context.Context, p1 *ethtypes.EthHash) (*ethtypes.EthTx, error) `` EthGetTransactionByHashLimited func(p0 context.Context, p1 *ethtypes.EthHash, p2 abi.ChainEpoch) (*ethtypes.EthTx, error) `` - EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) `` + EthGetTransactionCount func(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) `` EthGetTransactionHashByCid func(p0 context.Context, p1 cid.Cid) (*ethtypes.EthHash, error) `` @@ -722,6 +726,8 @@ type GatewayMethods struct { EthSubscribe func(p0 context.Context, p1 jsonrpc.RawParams) (ethtypes.EthSubscriptionID, error) `` + EthSyncing func(p0 context.Context) (ethtypes.EthSyncingResult, error) `` + EthUninstallFilter func(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) `` EthUnsubscribe func(p0 context.Context, p1 ethtypes.EthSubscriptionID) (bool, error) `` @@ -730,8 +736,12 @@ type GatewayMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) `` + MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `` + MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `` + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` @@ -2081,14 +2091,14 @@ func (s *FullNodeStub) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, e return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *FullNodeStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthCall == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthCall(p0, p1, p2) } -func (s *FullNodeStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -2136,14 +2146,14 @@ func (s *FullNodeStub) EthGasPrice(p0 context.Context) (ethtypes.EthBigInt, erro return *new(ethtypes.EthBigInt), ErrNotSupported } -func (s *FullNodeStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) { +func (s *FullNodeStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) { if s.Internal.EthGetBalance == nil { return *new(ethtypes.EthBigInt), ErrNotSupported } return s.Internal.EthGetBalance(p0, p1, p2) } -func (s *FullNodeStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) { +func (s *FullNodeStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) { return *new(ethtypes.EthBigInt), ErrNotSupported } @@ -2191,14 +2201,14 @@ func (s *FullNodeStub) EthGetBlockTransactionCountByNumber(p0 context.Context, p return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *FullNodeStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthGetCode == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthGetCode(p0, p1, p2) } -func (s *FullNodeStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -2246,14 +2256,14 @@ func (s *FullNodeStub) EthGetMessageCidByTransactionHash(p0 context.Context, p1 return nil, ErrNotSupported } -func (s *FullNodeStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthGetStorageAt == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthGetStorageAt(p0, p1, p2, p3) } -func (s *FullNodeStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) { +func (s *FullNodeStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -2301,14 +2311,14 @@ func (s *FullNodeStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *et return nil, ErrNotSupported } -func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { +func (s *FullNodeStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) { if s.Internal.EthGetTransactionCount == nil { return *new(ethtypes.EthUint64), ErrNotSupported } return s.Internal.EthGetTransactionCount(p0, p1, p2) } -func (s *FullNodeStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { +func (s *FullNodeStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) { return *new(ethtypes.EthUint64), ErrNotSupported } @@ -2422,6 +2432,17 @@ func (s *FullNodeStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (e return *new(ethtypes.EthSubscriptionID), ErrNotSupported } +func (s *FullNodeStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) { + if s.Internal.EthSyncing == nil { + return *new(ethtypes.EthSyncingResult), ErrNotSupported + } + return s.Internal.EthSyncing(p0) +} + +func (s *FullNodeStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) { + return *new(ethtypes.EthSyncingResult), ErrNotSupported +} + func (s *FullNodeStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) { if s.Internal.EthUninstallFilter == nil { return false, ErrNotSupported @@ -4094,6 +4115,17 @@ func (s *FullNodeStub) Web3ClientVersion(p0 context.Context) (string, error) { return "", ErrNotSupported } +func (s *GatewayStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + if s.Internal.ChainGetBlock == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainGetBlock(p0, p1) +} + +func (s *GatewayStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*BlockMessages, error) { if s.Internal.ChainGetBlockMessages == nil { return nil, ErrNotSupported @@ -4281,14 +4313,14 @@ func (s *GatewayStub) EthBlockNumber(p0 context.Context) (ethtypes.EthUint64, er return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *GatewayStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) { +func (s *GatewayStruct) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthCall == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthCall(p0, p1, p2) } -func (s *GatewayStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 string) (ethtypes.EthBytes, error) { +func (s *GatewayStub) EthCall(p0 context.Context, p1 ethtypes.EthCall, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -4336,14 +4368,14 @@ func (s *GatewayStub) EthGasPrice(p0 context.Context) (ethtypes.EthBigInt, error return *new(ethtypes.EthBigInt), ErrNotSupported } -func (s *GatewayStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) { +func (s *GatewayStruct) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) { if s.Internal.EthGetBalance == nil { return *new(ethtypes.EthBigInt), ErrNotSupported } return s.Internal.EthGetBalance(p0, p1, p2) } -func (s *GatewayStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBigInt, error) { +func (s *GatewayStub) EthGetBalance(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBigInt, error) { return *new(ethtypes.EthBigInt), ErrNotSupported } @@ -4391,14 +4423,14 @@ func (s *GatewayStub) EthGetBlockTransactionCountByNumber(p0 context.Context, p1 return *new(ethtypes.EthUint64), ErrNotSupported } -func (s *GatewayStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) { +func (s *GatewayStruct) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthGetCode == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthGetCode(p0, p1, p2) } -func (s *GatewayStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthBytes, error) { +func (s *GatewayStub) EthGetCode(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -4446,14 +4478,14 @@ func (s *GatewayStub) EthGetMessageCidByTransactionHash(p0 context.Context, p1 * return nil, ErrNotSupported } -func (s *GatewayStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) { +func (s *GatewayStruct) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { if s.Internal.EthGetStorageAt == nil { return *new(ethtypes.EthBytes), ErrNotSupported } return s.Internal.EthGetStorageAt(p0, p1, p2, p3) } -func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 string) (ethtypes.EthBytes, error) { +func (s *GatewayStub) EthGetStorageAt(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBytes, p3 ethtypes.EthBlockNumberOrHash) (ethtypes.EthBytes, error) { return *new(ethtypes.EthBytes), ErrNotSupported } @@ -4479,14 +4511,14 @@ func (s *GatewayStub) EthGetTransactionByHashLimited(p0 context.Context, p1 *eth return nil, ErrNotSupported } -func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { +func (s *GatewayStruct) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) { if s.Internal.EthGetTransactionCount == nil { return *new(ethtypes.EthUint64), ErrNotSupported } return s.Internal.EthGetTransactionCount(p0, p1, p2) } -func (s *GatewayStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 string) (ethtypes.EthUint64, error) { +func (s *GatewayStub) EthGetTransactionCount(p0 context.Context, p1 ethtypes.EthAddress, p2 ethtypes.EthBlockNumberOrHash) (ethtypes.EthUint64, error) { return *new(ethtypes.EthUint64), ErrNotSupported } @@ -4600,6 +4632,17 @@ func (s *GatewayStub) EthSubscribe(p0 context.Context, p1 jsonrpc.RawParams) (et return *new(ethtypes.EthSubscriptionID), ErrNotSupported } +func (s *GatewayStruct) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) { + if s.Internal.EthSyncing == nil { + return *new(ethtypes.EthSyncingResult), ErrNotSupported + } + return s.Internal.EthSyncing(p0) +} + +func (s *GatewayStub) EthSyncing(p0 context.Context) (ethtypes.EthSyncingResult, error) { + return *new(ethtypes.EthSyncingResult), ErrNotSupported +} + func (s *GatewayStruct) EthUninstallFilter(p0 context.Context, p1 ethtypes.EthFilterID) (bool, error) { if s.Internal.EthUninstallFilter == nil { return false, ErrNotSupported @@ -4644,6 +4687,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag return nil, ErrNotSupported } +func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { + if s.Internal.MinerGetBaseInfo == nil { + return nil, ErrNotSupported + } + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +func (s *GatewayStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*MiningBaseInfo, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { if s.Internal.MpoolGetNonce == nil { return 0, ErrNotSupported @@ -4655,6 +4709,17 @@ func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uin return 0, ErrNotSupported } +func (s *GatewayStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + if s.Internal.MpoolPending == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } + return s.Internal.MpoolPending(p0, p1) +} + +func (s *GatewayStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), ErrNotSupported +} + func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { if s.Internal.MpoolPush == nil { return *new(cid.Cid), ErrNotSupported diff --git a/api/types.go b/api/types.go index 7ff4138678a..8db5120a85e 100644 --- a/api/types.go +++ b/api/types.go @@ -299,6 +299,7 @@ type MinerInfo struct { SectorSize abi.SectorSize WindowPoStPartitionSectors uint64 ConsensusFaultElapsed abi.ChainEpoch + PendingOwnerAddress *address.Address Beneficiary address.Address BeneficiaryTerm *miner.BeneficiaryTerm PendingBeneficiaryTerm *miner.PendingBeneficiaryChange @@ -314,31 +315,30 @@ type NetworkParams struct { } type ForkUpgradeParams struct { - UpgradeSmokeHeight abi.ChainEpoch - UpgradeBreezeHeight abi.ChainEpoch - UpgradeIgnitionHeight abi.ChainEpoch - UpgradeLiftoffHeight abi.ChainEpoch - UpgradeAssemblyHeight abi.ChainEpoch - UpgradeRefuelHeight abi.ChainEpoch - UpgradeTapeHeight abi.ChainEpoch - UpgradeKumquatHeight abi.ChainEpoch - UpgradePriceListOopsHeight abi.ChainEpoch - BreezeGasTampingDuration abi.ChainEpoch - UpgradeCalicoHeight abi.ChainEpoch - UpgradePersianHeight abi.ChainEpoch - UpgradeOrangeHeight abi.ChainEpoch - UpgradeClausHeight abi.ChainEpoch - UpgradeTrustHeight abi.ChainEpoch - UpgradeNorwegianHeight abi.ChainEpoch - UpgradeTurboHeight abi.ChainEpoch - UpgradeHyperdriveHeight abi.ChainEpoch - UpgradeChocolateHeight abi.ChainEpoch - UpgradeOhSnapHeight abi.ChainEpoch - UpgradeSkyrHeight abi.ChainEpoch - UpgradeSharkHeight abi.ChainEpoch - UpgradeHyggeHeight abi.ChainEpoch - UpgradeLightningHeight abi.ChainEpoch - UpgradeThunderHeight abi.ChainEpoch + UpgradeSmokeHeight abi.ChainEpoch + UpgradeBreezeHeight abi.ChainEpoch + UpgradeIgnitionHeight abi.ChainEpoch + UpgradeLiftoffHeight abi.ChainEpoch + UpgradeAssemblyHeight abi.ChainEpoch + UpgradeRefuelHeight abi.ChainEpoch + UpgradeTapeHeight abi.ChainEpoch + UpgradeKumquatHeight abi.ChainEpoch + BreezeGasTampingDuration abi.ChainEpoch + UpgradeCalicoHeight abi.ChainEpoch + UpgradePersianHeight abi.ChainEpoch + UpgradeOrangeHeight abi.ChainEpoch + UpgradeClausHeight abi.ChainEpoch + UpgradeTrustHeight abi.ChainEpoch + UpgradeNorwegianHeight abi.ChainEpoch + UpgradeTurboHeight abi.ChainEpoch + UpgradeHyperdriveHeight abi.ChainEpoch + UpgradeChocolateHeight abi.ChainEpoch + UpgradeOhSnapHeight abi.ChainEpoch + UpgradeSkyrHeight abi.ChainEpoch + UpgradeSharkHeight abi.ChainEpoch + UpgradeHyggeHeight abi.ChainEpoch + UpgradeLightningHeight abi.ChainEpoch + UpgradeThunderHeight abi.ChainEpoch } type NonceMapType map[address.Address]uint64 diff --git a/api/v0api/gateway.go b/api/v0api/gateway.go index 30eb0d1c478..9f6c54fa9fc 100644 --- a/api/v0api/gateway.go +++ b/api/v0api/gateway.go @@ -35,6 +35,9 @@ import ( // * Generate openrpc blobs type Gateway interface { + MpoolPending(context.Context, types.TipSetKey) ([]*types.SignedMessage, error) + ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) + MinerGetBaseInfo(context.Context, address.Address, abi.ChainEpoch, types.TipSetKey) (*api.MiningBaseInfo, error) StateMinerSectorCount(context.Context, address.Address, types.TipSetKey) (api.MinerSectors, error) GasEstimateGasPremium(context.Context, uint64, address.Address, int64, types.TipSetKey) (types.BigInt, error) StateReplay(context.Context, types.TipSetKey, cid.Cid) (*api.InvocResult, error) diff --git a/api/v0api/proxy_gen.go b/api/v0api/proxy_gen.go index 069527ae8cb..29f6f67736c 100644 --- a/api/v0api/proxy_gen.go +++ b/api/v0api/proxy_gen.go @@ -431,6 +431,8 @@ type GatewayStruct struct { } type GatewayMethods struct { + ChainGetBlock func(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) `` + ChainGetBlockMessages func(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) `` ChainGetMessage func(p0 context.Context, p1 cid.Cid) (*types.Message, error) `` @@ -453,8 +455,12 @@ type GatewayMethods struct { GasEstimateMessageGas func(p0 context.Context, p1 *types.Message, p2 *api.MessageSendSpec, p3 types.TipSetKey) (*types.Message, error) `` + MinerGetBaseInfo func(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) `` + MpoolGetNonce func(p0 context.Context, p1 address.Address) (uint64, error) `` + MpoolPending func(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) `` + MpoolPush func(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) `` MsigGetAvailableBalance func(p0 context.Context, p1 address.Address, p2 types.TipSetKey) (types.BigInt, error) `` @@ -2581,6 +2587,17 @@ func (s *FullNodeStub) WalletVerify(p0 context.Context, p1 address.Address, p2 [ return false, ErrNotSupported } +func (s *GatewayStruct) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + if s.Internal.ChainGetBlock == nil { + return nil, ErrNotSupported + } + return s.Internal.ChainGetBlock(p0, p1) +} + +func (s *GatewayStub) ChainGetBlock(p0 context.Context, p1 cid.Cid) (*types.BlockHeader, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) ChainGetBlockMessages(p0 context.Context, p1 cid.Cid) (*api.BlockMessages, error) { if s.Internal.ChainGetBlockMessages == nil { return nil, ErrNotSupported @@ -2702,6 +2719,17 @@ func (s *GatewayStub) GasEstimateMessageGas(p0 context.Context, p1 *types.Messag return nil, ErrNotSupported } +func (s *GatewayStruct) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { + if s.Internal.MinerGetBaseInfo == nil { + return nil, ErrNotSupported + } + return s.Internal.MinerGetBaseInfo(p0, p1, p2, p3) +} + +func (s *GatewayStub) MinerGetBaseInfo(p0 context.Context, p1 address.Address, p2 abi.ChainEpoch, p3 types.TipSetKey) (*api.MiningBaseInfo, error) { + return nil, ErrNotSupported +} + func (s *GatewayStruct) MpoolGetNonce(p0 context.Context, p1 address.Address) (uint64, error) { if s.Internal.MpoolGetNonce == nil { return 0, ErrNotSupported @@ -2713,6 +2741,17 @@ func (s *GatewayStub) MpoolGetNonce(p0 context.Context, p1 address.Address) (uin return 0, ErrNotSupported } +func (s *GatewayStruct) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + if s.Internal.MpoolPending == nil { + return *new([]*types.SignedMessage), ErrNotSupported + } + return s.Internal.MpoolPending(p0, p1) +} + +func (s *GatewayStub) MpoolPending(p0 context.Context, p1 types.TipSetKey) ([]*types.SignedMessage, error) { + return *new([]*types.SignedMessage), ErrNotSupported +} + func (s *GatewayStruct) MpoolPush(p0 context.Context, p1 *types.SignedMessage) (cid.Cid, error) { if s.Internal.MpoolPush == nil { return *new(cid.Cid), ErrNotSupported diff --git a/blockstore/badger/blockstore_test_suite.go b/blockstore/badger/blockstore_test_suite.go index 7db15590193..480f5d793f3 100644 --- a/blockstore/badger/blockstore_test_suite.go +++ b/blockstore/badger/blockstore_test_suite.go @@ -9,9 +9,9 @@ import ( "strings" "testing" + u "github.com/ipfs/boxo/util" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - u "github.com/ipfs/go-ipfs-util" ipld "github.com/ipfs/go-ipld-format" "github.com/stretchr/testify/require" diff --git a/blockstore/blockstore.go b/blockstore/blockstore.go index 195e991e1f4..70093177c4a 100644 --- a/blockstore/blockstore.go +++ b/blockstore/blockstore.go @@ -4,9 +4,9 @@ import ( "context" "time" + blockstore "github.com/ipfs/boxo/blockstore" "github.com/ipfs/go-cid" ds "github.com/ipfs/go-datastore" - blockstore "github.com/ipfs/go-ipfs-blockstore" logging "github.com/ipfs/go-log/v2" ) diff --git a/blockstore/ipfs.go b/blockstore/ipfs.go index 7b356cd0e97..8e4224535df 100644 --- a/blockstore/ipfs.go +++ b/blockstore/ipfs.go @@ -5,15 +5,16 @@ import ( "context" "io" + iface "github.com/ipfs/boxo/coreiface" + "github.com/ipfs/boxo/coreiface/options" + "github.com/ipfs/boxo/coreiface/path" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - httpapi "github.com/ipfs/go-ipfs-http-client" - iface "github.com/ipfs/interface-go-ipfs-core" - "github.com/ipfs/interface-go-ipfs-core/options" - "github.com/ipfs/interface-go-ipfs-core/path" "github.com/multiformats/go-multiaddr" "github.com/multiformats/go-multihash" "golang.org/x/xerrors" + + rpc "github.com/filecoin-project/kubo-api-client" ) type IPFSBlockstore struct { @@ -24,7 +25,7 @@ type IPFSBlockstore struct { var _ BasicBlockstore = (*IPFSBlockstore)(nil) func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, error) { - localApi, err := httpapi.NewLocalApi() + localApi, err := rpc.NewLocalApi() if err != nil { return nil, xerrors.Errorf("getting local ipfs api: %w", err) } @@ -51,7 +52,7 @@ func NewLocalIPFSBlockstore(ctx context.Context, onlineMode bool) (Blockstore, e } func NewRemoteIPFSBlockstore(ctx context.Context, maddr multiaddr.Multiaddr, onlineMode bool) (Blockstore, error) { - httpApi, err := httpapi.NewApi(maddr) + httpApi, err := rpc.NewApi(maddr) if err != nil { return nil, xerrors.Errorf("setting remote ipfs api: %w", err) } diff --git a/blockstore/splitstore/splitstore.go b/blockstore/splitstore/splitstore.go index ba53feb7702..1f1ba0e992d 100644 --- a/blockstore/splitstore/splitstore.go +++ b/blockstore/splitstore/splitstore.go @@ -164,7 +164,7 @@ type SplitStore struct { path string mx sync.Mutex - warmupEpoch abi.ChainEpoch // protected by mx + warmupEpoch atomic.Int64 baseEpoch abi.ChainEpoch // protected by compaction lock pruneEpoch abi.ChainEpoch // protected by compaction lock @@ -684,9 +684,7 @@ func (s *SplitStore) View(ctx context.Context, cid cid.Cid, cb func([]byte) erro } func (s *SplitStore) isWarm() bool { - s.mx.Lock() - defer s.mx.Unlock() - return s.warmupEpoch > 0 + return s.warmupEpoch.Load() > 0 } // State tracking @@ -757,7 +755,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error bs, err = s.ds.Get(s.ctx, warmupEpochKey) switch err { case nil: - s.warmupEpoch = bytesToEpoch(bs) + s.warmupEpoch.Store(bytesToInt64(bs)) case dstore.ErrNotFound: warmup = true @@ -791,7 +789,7 @@ func (s *SplitStore) Start(chain ChainAccessor, us stmgr.UpgradeSchedule) error return xerrors.Errorf("error loading compaction index: %w", err) } - log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch) + log.Infow("starting splitstore", "baseEpoch", s.baseEpoch, "warmupEpoch", s.warmupEpoch.Load()) if warmup { err = s.warmup(curTs) diff --git a/blockstore/splitstore/splitstore_check.go b/blockstore/splitstore/splitstore_check.go index 2645c78c5b5..bdc70627116 100644 --- a/blockstore/splitstore/splitstore_check.go +++ b/blockstore/splitstore/splitstore_check.go @@ -145,7 +145,7 @@ func (s *SplitStore) doCheck(curTs *types.TipSet) error { func (s *SplitStore) Info() map[string]interface{} { info := make(map[string]interface{}) info["base epoch"] = s.baseEpoch - info["warmup epoch"] = s.warmupEpoch + info["warmup epoch"] = s.warmupEpoch.Load() info["compactions"] = s.compactionIndex info["prunes"] = s.pruneIndex info["compacting"] = s.compacting == 1 diff --git a/blockstore/splitstore/splitstore_compact.go b/blockstore/splitstore/splitstore_compact.go index f96f9d37033..534565bf388 100644 --- a/blockstore/splitstore/splitstore_compact.go +++ b/blockstore/splitstore/splitstore_compact.go @@ -1114,13 +1114,17 @@ func (s *SplitStore) walkChain(ts *types.TipSet, inclState, inclMsgs abi.ChainEp if err := walkBlock(c); err != nil { return xerrors.Errorf("error walking block (cid: %s): %w", c, err) } + + if err := s.checkYield(); err != nil { + return xerrors.Errorf("check yield: %w", err) + } } return nil }) } if err := g.Wait(); err != nil { - return err + return xerrors.Errorf("walkBlock workers errored: %w", err) } } @@ -1153,8 +1157,8 @@ func (s *SplitStore) walkObject(c cid.Cid, visitor ObjectVisitor, f func(cid.Cid } // check this before recursing - if err := s.checkYield(); err != nil { - return 0, err + if err := s.checkClosing(); err != nil { + return 0, xerrors.Errorf("check closing: %w", err) } var links []cid.Cid @@ -1222,8 +1226,8 @@ func (s *SplitStore) walkObjectIncomplete(c cid.Cid, visitor ObjectVisitor, f, m } // check this before recursing - if err := s.checkYield(); err != nil { - return sz, err + if err := s.checkClosing(); err != nil { + return sz, xerrors.Errorf("check closing: %w", err) } var links []cid.Cid diff --git a/blockstore/splitstore/splitstore_test.go b/blockstore/splitstore/splitstore_test.go index 4e168fc543c..63e77b47eaa 100644 --- a/blockstore/splitstore/splitstore_test.go +++ b/blockstore/splitstore/splitstore_test.go @@ -429,7 +429,7 @@ func testSplitStoreReification(t *testing.T, f func(context.Context, blockstore. } defer ss.Close() //nolint - ss.warmupEpoch = 1 + ss.warmupEpoch.Store(1) go ss.reifyOrchestrator() waitForReification := func() { @@ -529,7 +529,7 @@ func testSplitStoreReificationLimit(t *testing.T, f func(context.Context, blocks } defer ss.Close() //nolint - ss.warmupEpoch = 1 + ss.warmupEpoch.Store(1) go ss.reifyOrchestrator() waitForReification := func() { diff --git a/blockstore/splitstore/splitstore_warmup.go b/blockstore/splitstore/splitstore_warmup.go index e387263dae7..7fb6f3b9d08 100644 --- a/blockstore/splitstore/splitstore_warmup.go +++ b/blockstore/splitstore/splitstore_warmup.go @@ -136,9 +136,8 @@ func (s *SplitStore) doWarmup(curTs *types.TipSet) error { if err != nil { return xerrors.Errorf("error saving warm up epoch: %w", err) } - s.mx.Lock() - s.warmupEpoch = epoch - s.mx.Unlock() + + s.warmupEpoch.Store(int64(epoch)) // also save the compactionIndex, as this is used as an indicator of warmup for upgraded nodes err = s.ds.Put(s.ctx, compactionIndexKey, int64ToBytes(s.compactionIndex)) diff --git a/build/openrpc/full.json.gz b/build/openrpc/full.json.gz index 3e3763bb58d..609047aae58 100644 Binary files a/build/openrpc/full.json.gz and b/build/openrpc/full.json.gz differ diff --git a/build/openrpc/gateway.json.gz b/build/openrpc/gateway.json.gz index 1bc77e5cea2..6ea54c394bd 100644 Binary files a/build/openrpc/gateway.json.gz and b/build/openrpc/gateway.json.gz differ diff --git a/build/openrpc/miner.json.gz b/build/openrpc/miner.json.gz index 4e7aff7cb40..8068f521a57 100644 Binary files a/build/openrpc/miner.json.gz and b/build/openrpc/miner.json.gz differ diff --git a/build/openrpc/worker.json.gz b/build/openrpc/worker.json.gz index b0cc6e01b50..5e3bac1886a 100644 Binary files a/build/openrpc/worker.json.gz and b/build/openrpc/worker.json.gz differ diff --git a/build/params_interop.go b/build/params_interop.go index 2d8d366e9ca..04fc777f50f 100644 --- a/build/params_interop.go +++ b/build/params_interop.go @@ -49,16 +49,11 @@ var UpgradeHyperdriveHeight = abi.ChainEpoch(-16) var UpgradeChocolateHeight = abi.ChainEpoch(-17) var UpgradeOhSnapHeight = abi.ChainEpoch(-18) var UpgradeSkyrHeight = abi.ChainEpoch(-19) +var UpgradeSharkHeight = abi.ChainEpoch(-20) +var UpgradeHyggeHeight = abi.ChainEpoch(-21) +var UpgradeLightningHeight = abi.ChainEpoch(-22) -const UpgradeSharkHeight = abi.ChainEpoch(-20) - -const UpgradeHyggeHeight = abi.ChainEpoch(100) - -// ?????????? -const UpgradeLightningHeight = 200 - -// ?????????????????? -const UpgradeThunderHeight = 300 +const UpgradeThunderHeight = 50 var DrandSchedule = map[abi.ChainEpoch]DrandEnum{ 0: DrandMainnet, diff --git a/build/version.go b/build/version.go index 5de0e62ae4b..ab97f7a89e6 100644 --- a/build/version.go +++ b/build/version.go @@ -37,7 +37,7 @@ func BuildTypeString() string { } // BuildVersion is the local build version -const BuildVersion = "1.23.2" +const BuildVersion = "1.23.3" func UserVersion() string { if os.Getenv("LOTUS_VERSION_IGNORE_COMMIT") == "1" { diff --git a/chain/actors/policy/policy.go b/chain/actors/policy/policy.go index 4b90c46a06d..bf982af8904 100644 --- a/chain/actors/policy/policy.go +++ b/chain/actors/policy/policy.go @@ -61,6 +61,10 @@ const ( MaxPreCommitRandomnessLookback = builtin11.EpochsInDay + SealRandomnessLookback ) +var ( + MarketDefaultAllocationTermBuffer = market11.MarketDefaultAllocationTermBuffer +) + // SetSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { diff --git a/chain/actors/policy/policy.go.template b/chain/actors/policy/policy.go.template index f5178500a13..3eb39836ac4 100644 --- a/chain/actors/policy/policy.go.template +++ b/chain/actors/policy/policy.go.template @@ -39,6 +39,10 @@ const ( MaxPreCommitRandomnessLookback = builtin{{.latestVersion}}.EpochsInDay + SealRandomnessLookback ) +var ( + MarketDefaultAllocationTermBuffer = market{{.latestVersion}}.MarketDefaultAllocationTermBuffer +) + // SetSupportedProofTypes sets supported proof types, across all actor versions. // This should only be used for testing. func SetSupportedProofTypes(types ...abi.RegisteredSealProof) { diff --git a/chain/consensus/compute_state.go b/chain/consensus/compute_state.go index 6b08519af7d..64b9624ea2b 100644 --- a/chain/consensus/compute_state.go +++ b/chain/consensus/compute_state.go @@ -135,6 +135,10 @@ func (t *TipSetExecutor) ApplyBlocks(ctx context.Context, return xerrors.Errorf("running cron: %w", err) } + if !ret.ExitCode.IsSuccess() { + return xerrors.Errorf("cron failed with exit code %d: %w", ret.ExitCode, ret.ActorErr) + } + cronGas += ret.GasUsed if em != nil { diff --git a/chain/consensus/filcns/filecoin.go b/chain/consensus/filcns/filecoin.go index 509eb8a5e85..fd49f1c9a33 100644 --- a/chain/consensus/filcns/filecoin.go +++ b/chain/consensus/filcns/filecoin.go @@ -80,6 +80,11 @@ var RewardFunc = func(ctx context.Context, vmi vm.Interface, em stmgr.ExecMonito if actErr != nil { return xerrors.Errorf("failed to apply reward message: %w", actErr) } + + if !ret.ExitCode.IsSuccess() { + return xerrors.Errorf("reward actor failed with exit code %d: %w", ret.ExitCode, ret.ActorErr) + } + if em != nil { if err := em.MessageApplied(ctx, ts, rwMsg.Cid(), rwMsg, ret, true); err != nil { return xerrors.Errorf("callback failed on reward message: %w", err) diff --git a/chain/events/filter/index.go b/chain/events/filter/index.go index ab4e2449332..bacba60d7dd 100644 --- a/chain/events/filter/index.go +++ b/chain/events/filter/index.go @@ -7,14 +7,17 @@ import ( "fmt" "sort" "strings" + "time" "github.com/ipfs/go-cid" + logging "github.com/ipfs/go-log/v2" _ "github.com/mattn/go-sqlite3" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" ) @@ -42,6 +45,8 @@ var ddls = []string{ reverted INTEGER NOT NULL )`, + `CREATE INDEX IF NOT EXISTS height_tipset_key ON event (height,tipset_key)`, + `CREATE TABLE IF NOT EXISTS event_entry ( event_id INTEGER, indexed INTEGER NOT NULL, @@ -56,27 +61,210 @@ var ddls = []string{ version UINT64 NOT NULL UNIQUE )`, - // version 1. `INSERT OR IGNORE INTO _meta (version) VALUES (1)`, + `INSERT OR IGNORE INTO _meta (version) VALUES (2)`, } -const schemaVersion = 1 +var ( + log = logging.Logger("filter") +) const ( - insertEvent = `INSERT OR IGNORE INTO event - (height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) - VALUES(?, ?, ?, ?, ?, ?, ?, ?)` + schemaVersion = 2 - insertEntry = `INSERT OR IGNORE INTO event_entry - (event_id, indexed, flags, key, codec, value) - VALUES(?, ?, ?, ?, ?, ?)` + eventExists = `SELECT MAX(id) FROM event WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` + insertEvent = `INSERT OR IGNORE INTO event(height, tipset_key, tipset_key_cid, emitter_addr, event_index, message_cid, message_index, reverted) VALUES(?, ?, ?, ?, ?, ?, ?, ?)` + insertEntry = `INSERT OR IGNORE INTO event_entry(event_id, indexed, flags, key, codec, value) VALUES(?, ?, ?, ?, ?, ?)` + revertEventsInTipset = `UPDATE event SET reverted=true WHERE height=? AND tipset_key=?` + restoreEvent = `UPDATE event SET reverted=false WHERE height=? AND tipset_key=? AND tipset_key_cid=? AND emitter_addr=? AND event_index=? AND message_cid=? AND message_index=?` ) type EventIndex struct { db *sql.DB + + stmtEventExists *sql.Stmt + stmtInsertEvent *sql.Stmt + stmtInsertEntry *sql.Stmt + stmtRevertEventsInTipset *sql.Stmt + stmtRestoreEvent *sql.Stmt +} + +func (ei *EventIndex) initStatements() (err error) { + ei.stmtEventExists, err = ei.db.Prepare(eventExists) + if err != nil { + return xerrors.Errorf("prepare stmtEventExists: %w", err) + } + + ei.stmtInsertEvent, err = ei.db.Prepare(insertEvent) + if err != nil { + return xerrors.Errorf("prepare stmtInsertEvent: %w", err) + } + + ei.stmtInsertEntry, err = ei.db.Prepare(insertEntry) + if err != nil { + return xerrors.Errorf("prepare stmtInsertEntry: %w", err) + } + + ei.stmtRevertEventsInTipset, err = ei.db.Prepare(revertEventsInTipset) + if err != nil { + return xerrors.Errorf("prepare stmtRevertEventsInTipset: %w", err) + } + + ei.stmtRestoreEvent, err = ei.db.Prepare(restoreEvent) + if err != nil { + return xerrors.Errorf("prepare stmtRestoreEvent: %w", err) + } + + return nil +} + +func (ei *EventIndex) migrateToVersion2(ctx context.Context, chainStore *store.ChainStore) error { + now := time.Now() + + tx, err := ei.db.Begin() + if err != nil { + return xerrors.Errorf("begin transaction: %w", err) + } + // rollback the transaction (a no-op if the transaction was already committed) + defer tx.Rollback() //nolint:errcheck + + // create some temporary indices to help speed up the migration + _, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_height_tipset_key_cid ON event (height,tipset_key_cid)") + if err != nil { + return xerrors.Errorf("create index tmp_height_tipset_key_cid: %w", err) + } + _, err = tx.Exec("CREATE INDEX IF NOT EXISTS tmp_tipset_key_cid ON event (tipset_key_cid)") + if err != nil { + return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) + } + + stmtDeleteOffChainEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid!=? and height=?") + if err != nil { + return xerrors.Errorf("prepare stmtDeleteOffChainEvent: %w", err) + } + + stmtSelectEvent, err := tx.Prepare("SELECT id FROM event WHERE tipset_key_cid=? ORDER BY message_index ASC, event_index ASC, id DESC LIMIT 1") + if err != nil { + return xerrors.Errorf("prepare stmtSelectEvent: %w", err) + } + + stmtDeleteEvent, err := tx.Prepare("DELETE FROM event WHERE tipset_key_cid=? AND id= minHeight.Int64 { + if currTs.Height()%1000 == 0 { + log.Infof("Migrating height %d (remaining %d)", currTs.Height(), int64(currTs.Height())-minHeight.Int64) + } + + tsKey := currTs.Parents() + currTs, err = chainStore.GetTipSetFromKey(ctx, tsKey) + if err != nil { + return xerrors.Errorf("get tipset from key: %w", err) + } + log.Debugf("Migrating height %d", currTs.Height()) + + tsKeyCid, err := currTs.Key().Cid() + if err != nil { + return fmt.Errorf("tipset key cid: %w", err) + } + + // delete all events that are not in the canonical chain + _, err = stmtDeleteOffChainEvent.Exec(tsKeyCid.Bytes(), currTs.Height()) + if err != nil { + return xerrors.Errorf("delete off chain event: %w", err) + } + + // find the first eventId from the last time the tipset was applied + var eventId sql.NullInt64 + err = stmtSelectEvent.QueryRow(tsKeyCid.Bytes()).Scan(&eventId) + if err != nil { + if err == sql.ErrNoRows { + continue + } + return xerrors.Errorf("select event: %w", err) + } + + // this tipset might not have any events which is ok + if !eventId.Valid { + continue + } + log.Debugf("Deleting all events with id < %d at height %d", eventId.Int64, currTs.Height()) + + res, err := stmtDeleteEvent.Exec(tsKeyCid.Bytes(), eventId.Int64) + if err != nil { + return xerrors.Errorf("delete event: %w", err) + } + + nrRowsAffected, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("rows affected: %w", err) + } + log.Debugf("deleted %d events from tipset %s", nrRowsAffected, tsKeyCid.String()) + } + + // delete all entries that have an event_id that doesn't exist (since we don't have a foreign + // key constraint that gives us cascading deletes) + res, err := tx.Exec("DELETE FROM event_entry WHERE event_id NOT IN (SELECT id FROM event)") + if err != nil { + return xerrors.Errorf("delete event_entry: %w", err) + } + + nrRowsAffected, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("rows affected: %w", err) + } + log.Infof("cleaned up %d entries that had deleted events\n", nrRowsAffected) + + // drop the temporary indices after the migration + _, err = tx.Exec("DROP INDEX IF EXISTS tmp_tipset_key_cid") + if err != nil { + return xerrors.Errorf("create index tmp_tipset_key_cid: %w", err) + } + _, err = tx.Exec("DROP INDEX IF EXISTS tmp_height_tipset_key_cid") + if err != nil { + return xerrors.Errorf("drop index tmp_height_tipset_key_cid: %w", err) + } + + err = tx.Commit() + if err != nil { + return xerrors.Errorf("commit transaction: %w", err) + } + + // during the migration, we have likely increased the WAL size a lot, so lets do some + // simple DB administration to free up space (VACUUM followed by truncating the WAL file) + // as this would be a good time to do it when no other writes are happening + log.Infof("Performing DB vacuum and wal checkpointing to free up space after the migration") + _, err = ei.db.Exec("VACUUM") + if err != nil { + log.Warnf("error vacuuming database: %s", err) + } + _, err = ei.db.Exec("PRAGMA wal_checkpoint(TRUNCATE)") + if err != nil { + log.Warnf("error checkpointing wal: %s", err) + } + + log.Infof("Successfully migrated events to version 2 in %s", time.Since(now)) + + return nil } -func NewEventIndex(path string) (*EventIndex, error) { +func NewEventIndex(ctx context.Context, path string, chainStore *store.ChainStore) (*EventIndex, error) { db, err := sql.Open("sqlite3", path+"?mode=rwc") if err != nil { return nil, xerrors.Errorf("open sqlite3 database: %w", err) @@ -89,6 +277,8 @@ func NewEventIndex(path string) (*EventIndex, error) { } } + eventIndex := EventIndex{db: db} + q, err := db.Query("SELECT name FROM sqlite_master WHERE type='table' AND name='_meta';") if err == sql.ErrNoRows || !q.Next() { // empty database, create the schema @@ -102,24 +292,48 @@ func NewEventIndex(path string) (*EventIndex, error) { _ = db.Close() return nil, xerrors.Errorf("looking for _meta table: %w", err) } else { - // Ensure we don't open a database from a different schema version - - row := db.QueryRow("SELECT max(version) FROM _meta") + // check the schema version to see if we need to upgrade the database schema var version int - err := row.Scan(&version) + err := db.QueryRow("SELECT max(version) FROM _meta").Scan(&version) if err != nil { _ = db.Close() return nil, xerrors.Errorf("invalid database version: no version found") } + + if version == 1 { + log.Infof("upgrading event index from version 1 to version 2") + + err = eventIndex.migrateToVersion2(ctx, chainStore) + if err != nil { + _ = db.Close() + return nil, xerrors.Errorf("could not migrate sql data to version 2: %w", err) + } + + // to upgrade to version version 2 we only need to create an index on the event table + // which means we can just recreate the schema (it will not have any effect on existing data) + for _, ddl := range ddls { + if _, err := db.Exec(ddl); err != nil { + _ = db.Close() + return nil, xerrors.Errorf("could not upgrade index to version 2, exec ddl %q: %w", ddl, err) + } + } + + version = 2 + } + if version != schemaVersion { _ = db.Close() return nil, xerrors.Errorf("invalid database version: got %d, expected %d", version, schemaVersion) } } - return &EventIndex{ - db: db, - }, nil + err = eventIndex.initStatements() + if err != nil { + _ = db.Close() + return nil, xerrors.Errorf("error preparing eventIndex database statements: %w", err) + } + + return &eventIndex, nil } func (ei *EventIndex) Close() error { @@ -130,28 +344,38 @@ func (ei *EventIndex) Close() error { } func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, revert bool, resolver func(ctx context.Context, emitter abi.ActorID, ts *types.TipSet) (address.Address, bool)) error { - // cache of lookups between actor id and f4 address - - addressLookups := make(map[abi.ActorID]address.Address) - - ems, err := te.messages(ctx) - if err != nil { - return xerrors.Errorf("load executed messages: %w", err) - } - tx, err := ei.db.Begin() if err != nil { return xerrors.Errorf("begin transaction: %w", err) } - stmtEvent, err := tx.Prepare(insertEvent) - if err != nil { - return xerrors.Errorf("prepare insert event: %w", err) + // rollback the transaction (a no-op if the transaction was already committed) + defer tx.Rollback() //nolint:errcheck + + // lets handle the revert case first, since its simpler and we can simply mark all events events in this tipset as reverted and return + if revert { + _, err = tx.Stmt(ei.stmtRevertEventsInTipset).Exec(te.msgTs.Height(), te.msgTs.Key().Bytes()) + if err != nil { + return xerrors.Errorf("revert event: %w", err) + } + + err = tx.Commit() + if err != nil { + return xerrors.Errorf("commit transaction: %w", err) + } + + return nil } - stmtEntry, err := tx.Prepare(insertEntry) + + // cache of lookups between actor id and f4 address + addressLookups := make(map[abi.ActorID]address.Address) + + ems, err := te.messages(ctx) if err != nil { - return xerrors.Errorf("prepare insert entry: %w", err) + return xerrors.Errorf("load executed messages: %w", err) } + // iterate over all executed messages in this tipset and insert them into the database if they + // don't exist, otherwise mark them as not reverted for msgIdx, em := range ems { for evIdx, ev := range em.Events() { addr, found := addressLookups[ev.Emitter] @@ -170,7 +394,9 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever return xerrors.Errorf("tipset key cid: %w", err) } - res, err := stmtEvent.Exec( + // check if this event already exists in the database + var entryID sql.NullInt64 + err = tx.Stmt(ei.stmtEventExists).QueryRow( te.msgTs.Height(), // height te.msgTs.Key().Bytes(), // tipset_key tsKeyCid.Bytes(), // tipset_key_cid @@ -178,34 +404,76 @@ func (ei *EventIndex) CollectEvents(ctx context.Context, te *TipSetEvents, rever evIdx, // event_index em.Message().Cid().Bytes(), // message_cid msgIdx, // message_index - revert, // reverted - ) + ).Scan(&entryID) if err != nil { - return xerrors.Errorf("exec insert event: %w", err) + return xerrors.Errorf("error checking if event exists: %w", err) } - lastID, err := res.LastInsertId() - if err != nil { - return xerrors.Errorf("get last row id: %w", err) - } + if !entryID.Valid { + // event does not exist, lets insert it + res, err := tx.Stmt(ei.stmtInsertEvent).Exec( + te.msgTs.Height(), // height + te.msgTs.Key().Bytes(), // tipset_key + tsKeyCid.Bytes(), // tipset_key_cid + addr.Bytes(), // emitter_addr + evIdx, // event_index + em.Message().Cid().Bytes(), // message_cid + msgIdx, // message_index + false, // reverted + ) + if err != nil { + return xerrors.Errorf("exec insert event: %w", err) + } - for _, entry := range ev.Entries { - _, err := stmtEntry.Exec( - lastID, // event_id - isIndexedValue(entry.Flags), // indexed - []byte{entry.Flags}, // flags - entry.Key, // key - entry.Codec, // codec - entry.Value, // value + entryID.Int64, err = res.LastInsertId() + if err != nil { + return xerrors.Errorf("get last row id: %w", err) + } + + // insert all the entries for this event + for _, entry := range ev.Entries { + _, err = tx.Stmt(ei.stmtInsertEntry).Exec( + entryID.Int64, // event_id + isIndexedValue(entry.Flags), // indexed + []byte{entry.Flags}, // flags + entry.Key, // key + entry.Codec, // codec + entry.Value, // value + ) + if err != nil { + return xerrors.Errorf("exec insert entry: %w", err) + } + } + } else { + // event already exists, lets mark it as not reverted + res, err := tx.Stmt(ei.stmtRestoreEvent).Exec( + te.msgTs.Height(), // height + te.msgTs.Key().Bytes(), // tipset_key + tsKeyCid.Bytes(), // tipset_key_cid + addr.Bytes(), // emitter_addr + evIdx, // event_index + em.Message().Cid().Bytes(), // message_cid + msgIdx, // message_index ) if err != nil { - return xerrors.Errorf("exec insert entry: %w", err) + return xerrors.Errorf("exec restore event: %w", err) + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + return xerrors.Errorf("error getting rows affected: %s", err) + } + + // this is a sanity check as we should only ever be updating one event + if rowsAffected != 1 { + log.Warnf("restored %d events but expected only one to exist", rowsAffected) } } } } - if err := tx.Commit(); err != nil { + err = tx.Commit() + if err != nil { return xerrors.Errorf("commit transaction: %w", err) } diff --git a/chain/events/filter/index_test.go b/chain/events/filter/index_test.go index ee2ae8611b5..fcdb1ab0564 100644 --- a/chain/events/filter/index_test.go +++ b/chain/events/filter/index_test.go @@ -74,7 +74,7 @@ func TestEventIndexPrefillFilter(t *testing.T) { dbPath := filepath.Join(workDir, "actorevents.db") - ei, err := NewEventIndex(dbPath) + ei, err := NewEventIndex(context.Background(), dbPath, nil) require.NoError(t, err, "create event index") if err := ei.CollectEvents(context.Background(), events14000, false, addrMap.ResolveAddress); err != nil { require.NoError(t, err, "collect events") diff --git a/chain/events/observer.go b/chain/events/observer.go index 72b9deaee43..4462185858f 100644 --- a/chain/events/observer.go +++ b/chain/events/observer.go @@ -125,7 +125,7 @@ func (o *observer) listenHeadChangesOnce(ctx context.Context) error { for changes := range notifs { if err := o.applyChanges(ctx, changes); err != nil { - return err + return xerrors.Errorf("failed to apply a change notification: %w", err) } } diff --git a/chain/gen/gen.go b/chain/gen/gen.go index 98610cd6cb9..2e5f5e7f724 100644 --- a/chain/gen/gen.go +++ b/chain/gen/gen.go @@ -10,12 +10,12 @@ import ( "time" "github.com/google/uuid" - "github.com/ipfs/go-blockservice" + "github.com/ipfs/boxo/blockservice" + offline "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" - offline "github.com/ipfs/go-ipfs-exchange-offline" format "github.com/ipfs/go-ipld-format" logging "github.com/ipfs/go-log/v2" - "github.com/ipfs/go-merkledag" "github.com/ipld/go-car" "golang.org/x/xerrors" diff --git a/chain/gen/slashfilter/slashfilter.go b/chain/gen/slashfilter/slashfilter.go index 9865862670a..71b5dad9ad9 100644 --- a/chain/gen/slashfilter/slashfilter.go +++ b/chain/gen/slashfilter/slashfilter.go @@ -11,7 +11,6 @@ import ( "github.com/filecoin-project/go-state-types/abi" - "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" ) @@ -27,24 +26,30 @@ func New(dstore ds.Batching) *SlashFilter { } } -func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) error { - if build.IsNearUpgrade(bh.Height, build.UpgradeOrangeHeight) { - return nil - } - +func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, parentEpoch abi.ChainEpoch) (cid.Cid, bool, error) { epochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, bh.Height)) { // double-fork mining (2 blocks at one epoch) - if err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults"); err != nil { - return err + doubleForkWitness, doubleForkFault, err := checkFault(ctx, f.byEpoch, epochKey, bh, "double-fork mining faults") + if err != nil { + return cid.Undef, false, xerrors.Errorf("check double-fork mining faults: %w", err) + } + + if doubleForkFault { + return doubleForkWitness, doubleForkFault, nil } } parentsKey := ds.NewKey(fmt.Sprintf("/%s/%x", bh.Miner, types.NewTipSetKey(bh.Parents...).Bytes())) { // time-offset mining faults (2 blocks with the same parents) - if err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults"); err != nil { - return err + timeOffsetWitness, timeOffsetFault, err := checkFault(ctx, f.byParents, parentsKey, bh, "time-offset mining faults") + if err != nil { + return cid.Undef, false, xerrors.Errorf("check time-offset mining faults: %w", err) + } + + if timeOffsetFault { + return timeOffsetWitness, timeOffsetFault, nil } } @@ -55,19 +60,19 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par parentEpochKey := ds.NewKey(fmt.Sprintf("/%s/%d", bh.Miner, parentEpoch)) have, err := f.byEpoch.Has(ctx, parentEpochKey) if err != nil { - return err + return cid.Undef, false, xerrors.Errorf("failed to read from db: %w", err) } if have { // If we had, make sure it's in our parent tipset cidb, err := f.byEpoch.Get(ctx, parentEpochKey) if err != nil { - return xerrors.Errorf("getting other block cid: %w", err) + return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err) } _, parent, err := cid.CidFromBytes(cidb) if err != nil { - return err + return cid.Undef, false, xerrors.Errorf("failed to read cid from bytes: %w", err) } var found bool @@ -78,45 +83,45 @@ func (f *SlashFilter) MinedBlock(ctx context.Context, bh *types.BlockHeader, par } if !found { - return xerrors.Errorf("produced block would trigger 'parent-grinding fault' consensus fault; miner: %s; bh: %s, expected parent: %s", bh.Miner, bh.Cid(), parent) + return parent, true, nil } } } if err := f.byParents.Put(ctx, parentsKey, bh.Cid().Bytes()); err != nil { - return xerrors.Errorf("putting byEpoch entry: %w", err) + return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err) } if err := f.byEpoch.Put(ctx, epochKey, bh.Cid().Bytes()); err != nil { - return xerrors.Errorf("putting byEpoch entry: %w", err) + return cid.Undef, false, xerrors.Errorf("putting byEpoch entry: %w", err) } - return nil + return cid.Undef, false, nil } -func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) error { +func checkFault(ctx context.Context, t ds.Datastore, key ds.Key, bh *types.BlockHeader, faultType string) (cid.Cid, bool, error) { fault, err := t.Has(ctx, key) if err != nil { - return err + return cid.Undef, false, xerrors.Errorf("failed to read from datastore: %w", err) } if fault { cidb, err := t.Get(ctx, key) if err != nil { - return xerrors.Errorf("getting other block cid: %w", err) + return cid.Undef, false, xerrors.Errorf("getting other block cid: %w", err) } _, other, err := cid.CidFromBytes(cidb) if err != nil { - return err + return cid.Undef, false, xerrors.Errorf("failed to read cid of other block: %w", err) } if other == bh.Cid() { - return nil + return cid.Undef, false, nil } - return xerrors.Errorf("produced block would trigger '%s' consensus fault; miner: %s; bh: %s, other: %s", faultType, bh.Miner, bh.Cid(), other) + return other, true, nil } - return nil + return cid.Undef, false, nil } diff --git a/chain/gen/slashfilter/slashsvc/slashservice.go b/chain/gen/slashfilter/slashsvc/slashservice.go new file mode 100644 index 00000000000..7a662288098 --- /dev/null +++ b/chain/gen/slashfilter/slashsvc/slashservice.go @@ -0,0 +1,179 @@ +package slashsvc + +import ( + "context" + "time" + + "github.com/ipfs/go-cid" + levelds "github.com/ipfs/go-ds-leveldb" + logging "github.com/ipfs/go-log/v2" + ldbopts "github.com/syndtr/goleveldb/leveldb/opt" + "golang.org/x/xerrors" + + "github.com/filecoin-project/go-address" + cborutil "github.com/filecoin-project/go-cbor-util" + "github.com/filecoin-project/go-state-types/builtin" + "github.com/filecoin-project/specs-actors/actors/builtin/miner" + + lapi "github.com/filecoin-project/lotus/api" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/gen/slashfilter" + "github.com/filecoin-project/lotus/chain/types" +) + +var log = logging.Logger("slashsvc") + +type ConsensusSlasherApi interface { + ChainHead(context.Context) (*types.TipSet, error) + ChainGetBlock(context.Context, cid.Cid) (*types.BlockHeader, error) + MpoolPushMessage(ctx context.Context, msg *types.Message, spec *lapi.MessageSendSpec) (*types.SignedMessage, error) + SyncIncomingBlocks(context.Context) (<-chan *types.BlockHeader, error) + WalletDefaultAddress(context.Context) (address.Address, error) +} + +func SlashConsensus(ctx context.Context, a ConsensusSlasherApi, p string, from string) error { + var fromAddr address.Address + + ds, err := levelds.NewDatastore(p, &levelds.Options{ + Compression: ldbopts.NoCompression, + NoSync: false, + Strict: ldbopts.StrictAll, + ReadOnly: false, + }) + if err != nil { + return xerrors.Errorf("open leveldb: %w", err) + } + sf := slashfilter.New(ds) + if from == "" { + defaddr, err := a.WalletDefaultAddress(ctx) + if err != nil { + return err + } + fromAddr = defaddr + } else { + addr, err := address.NewFromString(from) + if err != nil { + return err + } + + fromAddr = addr + } + + blocks, err := a.SyncIncomingBlocks(ctx) + if err != nil { + return xerrors.Errorf("sync incoming blocks failed: %w", err) + } + + log.Infow("consensus fault reporter", "from", fromAddr) + go func() { + for block := range blocks { + otherBlock, extraBlock, fault, err := slashFilterMinedBlock(ctx, sf, a, block) + if err != nil { + log.Errorf("slash detector errored: %s", err) + continue + } + if fault { + log.Errorf(" SLASH FILTER DETECTED FAULT DUE TO BLOCKS %s and %s", otherBlock.Cid(), block.Cid()) + bh1, err := cborutil.Dump(otherBlock) + if err != nil { + log.Errorf("could not dump otherblock:%s, err:%s", otherBlock.Cid(), err) + continue + } + + bh2, err := cborutil.Dump(block) + if err != nil { + log.Errorf("could not dump block:%s, err:%s", block.Cid(), err) + continue + } + + params := miner.ReportConsensusFaultParams{ + BlockHeader1: bh1, + BlockHeader2: bh2, + } + if extraBlock != nil { + be, err := cborutil.Dump(extraBlock) + if err != nil { + log.Errorf("could not dump block:%s, err:%s", block.Cid(), err) + continue + } + params.BlockHeaderExtra = be + } + + enc, err := actors.SerializeParams(¶ms) + if err != nil { + log.Errorf("could not serialize declare faults parameters: %s", err) + continue + } + for { + head, err := a.ChainHead(ctx) + if err != nil || head.Height() > block.Height { + break + } + time.Sleep(time.Second * 10) + } + message, err := a.MpoolPushMessage(ctx, &types.Message{ + To: block.Miner, + From: fromAddr, + Value: types.NewInt(0), + Method: builtin.MethodsMiner.ReportConsensusFault, + Params: enc, + }, nil) + if err != nil { + log.Errorf("ReportConsensusFault to messagepool error:%s", err) + continue + } + log.Infof("ReportConsensusFault message CID:%s", message.Cid()) + + } + } + }() + + return nil +} + +func slashFilterMinedBlock(ctx context.Context, sf *slashfilter.SlashFilter, a ConsensusSlasherApi, blockB *types.BlockHeader) (*types.BlockHeader, *types.BlockHeader, bool, error) { + blockC, err := a.ChainGetBlock(ctx, blockB.Parents[0]) + if err != nil { + return nil, nil, false, xerrors.Errorf("chain get block error:%s", err) + } + + blockACid, fault, err := sf.MinedBlock(ctx, blockB, blockC.Height) + if err != nil { + return nil, nil, false, xerrors.Errorf("slash filter check block error:%s", err) + } + + if !fault { + return nil, nil, false, nil + } + + blockA, err := a.ChainGetBlock(ctx, blockACid) + if err != nil { + return nil, nil, false, xerrors.Errorf("failed to get blockA: %w", err) + } + + // (a) double-fork mining (2 blocks at one epoch) + if blockA.Height == blockB.Height { + return blockA, nil, true, nil + } + + // (b) time-offset mining faults (2 blocks with the same parents) + if types.CidArrsEqual(blockB.Parents, blockA.Parents) { + return blockA, nil, true, nil + } + + // (c) parent-grinding fault + // Here extra is the "witness", a third block that shows the connection between A and B as + // A's sibling and B's parent. + // Specifically, since A is of lower height, it must be that B was mined omitting A from its tipset + // + // B + // | + // [A, C] + if types.CidArrsEqual(blockA.Parents, blockC.Parents) && blockA.Height == blockC.Height && + types.CidArrsContains(blockB.Parents, blockC.Cid()) && !types.CidArrsContains(blockB.Parents, blockA.Cid()) { + return blockA, blockC, true, nil + } + + log.Error("unexpectedly reached end of slashFilterMinedBlock despite fault being reported!") + return nil, nil, false, nil +} diff --git a/chain/index/msgindex.go b/chain/index/msgindex.go index 39ba487f2ef..27eeea73e1d 100644 --- a/chain/index/msgindex.go +++ b/chain/index/msgindex.go @@ -37,7 +37,17 @@ var dbDefs = []string{ )`, `INSERT OR IGNORE INTO _meta (version) VALUES (1)`, } -var dbPragmas = []string{} + +var dbPragmas = []string{ + "PRAGMA synchronous = normal", + "PRAGMA temp_store = memory", + "PRAGMA mmap_size = 30000000000", + "PRAGMA page_size = 32768", + "PRAGMA auto_vacuum = NONE", + "PRAGMA automatic_index = OFF", + "PRAGMA journal_mode = WAL", + "PRAGMA read_uncommitted = ON", +} const ( // prepared stmts diff --git a/chain/index/msgindex_test.go b/chain/index/msgindex_test.go index 4ebdcfd35ba..bf4bc6190e8 100644 --- a/chain/index/msgindex_test.go +++ b/chain/index/msgindex_test.go @@ -39,10 +39,10 @@ func TestBasicMsgIndex(t *testing.T) { t.Logf("advance to epoch %d", i+1) err := cs.advance() require.NoError(t, err) - // wait for the coalescer to notify - time.Sleep(CoalesceMinDelay + 10*time.Millisecond) } + waitForCoalescerAfterLastEvent() + t.Log("verifying index") verifyIndex(t, cs, msgIndex) } @@ -51,7 +51,7 @@ func TestReorgMsgIndex(t *testing.T) { // slightly more nuanced test that includes reorgs // 1. Create an index with mock chain store // 2. Advance/Reorg the chain for a few tipsets - // 3. Verify that the index contains all messages with the correct tipst/epoch + // 3. Verify that the index contains all messages with the correct tipset/epoch cs := newMockChainStore() cs.genesis() @@ -67,10 +67,10 @@ func TestReorgMsgIndex(t *testing.T) { t.Logf("advance to epoch %d", i+1) err := cs.advance() require.NoError(t, err) - // wait for the coalescer to notify - time.Sleep(CoalesceMinDelay + 10*time.Millisecond) } + waitForCoalescerAfterLastEvent() + // a simple reorg t.Log("doing reorg") reorgme := cs.curTs @@ -80,7 +80,8 @@ func TestReorgMsgIndex(t *testing.T) { reorgmeChild := cs.makeBlk() err = cs.reorg([]*types.TipSet{reorgme}, []*types.TipSet{reorgmeChild}) require.NoError(t, err) - time.Sleep(CoalesceMinDelay + 10*time.Millisecond) + + waitForCoalescerAfterLastEvent() t.Log("verifying index") verifyIndex(t, cs, msgIndex) @@ -109,10 +110,10 @@ func TestReconcileMsgIndex(t *testing.T) { t.Logf("advance to epoch %d", i+1) err := cs.advance() require.NoError(t, err) - // wait for the coalescer to notify - time.Sleep(CoalesceMinDelay + 10*time.Millisecond) } + waitForCoalescerAfterLastEvent() + // Close it and reorg err = msgIndex.Close() require.NoError(t, err) @@ -296,3 +297,11 @@ func (cs *mockChainStore) GetTipSetFromKey(ctx context.Context, tsk types.TipSet } return ts, nil } + +func waitForCoalescerAfterLastEvent() { + // It can take up to CoalesceMinDelay for the coalescer timer to fire after the last event. + // When the timer fires, it can wait up to CoalesceMinDelay again for more events. + // Therefore the total wait is 2 * CoalesceMinDelay. + // Then we wait another second for the listener (the index) to actually process events. + time.Sleep(2*CoalesceMinDelay + time.Second) +} diff --git a/chain/messagepool/check.go b/chain/messagepool/check.go index 07a278f6dbb..fdec910c4ea 100644 --- a/chain/messagepool/check.go +++ b/chain/messagepool/check.go @@ -35,8 +35,8 @@ func (mp *MessagePool) CheckPendingMessages(ctx context.Context, from address.Ad mp.lk.RLock() mset, ok, err := mp.getPendingMset(ctx, from) if err != nil { - log.Warnf("errored while getting pending mset: %w", err) - return nil, err + mp.lk.RUnlock() + return nil, xerrors.Errorf("errored while getting pending mset: %w", err) } if ok { msgs = make([]*types.Message, 0, len(mset.msgs)) @@ -71,8 +71,8 @@ func (mp *MessagePool) CheckReplaceMessages(ctx context.Context, replace []*type msgMap[m.From] = mmap mset, ok, err := mp.getPendingMset(ctx, m.From) if err != nil { - log.Warnf("errored while getting pending mset: %w", err) - return nil, err + mp.lk.RUnlock() + return nil, xerrors.Errorf("errored while getting pending mset: %w", err) } if ok { count += len(mset.msgs) @@ -155,8 +155,8 @@ func (mp *MessagePool) checkMessages(ctx context.Context, msgs []*types.Message, mp.lk.RLock() mset, ok, err := mp.getPendingMset(ctx, m.From) if err != nil { - log.Warnf("errored while getting pending mset: %w", err) - return nil, err + mp.lk.RUnlock() + return nil, xerrors.Errorf("errored while getting pending mset: %w", err) } if ok && !interned { st = &actorState{nextNonce: mset.nextNonce, requiredFunds: mset.requiredFunds} diff --git a/chain/messagepool/messagepool.go b/chain/messagepool/messagepool.go index 4dcb6eb9b75..50f64f903ab 100644 --- a/chain/messagepool/messagepool.go +++ b/chain/messagepool/messagepool.go @@ -448,12 +448,8 @@ func New(ctx context.Context, api Provider, ds dtypes.MetadataDS, us stmgr.Upgra return mp, nil } -func (mp *MessagePool) TryForEachPendingMessage(f func(cid.Cid) error) error { - // avoid deadlocks in splitstore compaction when something else needs to access the blockstore - // while holding the mpool lock - if !mp.lk.TryLock() { - return xerrors.Errorf("mpool TryForEachPendingMessage: could not acquire lock") - } +func (mp *MessagePool) ForEachPendingMessage(f func(cid.Cid) error) error { + mp.lk.Lock() defer mp.lk.Unlock() for _, mset := range mp.pending { @@ -749,8 +745,7 @@ func (mp *MessagePool) checkMessage(ctx context.Context, m *types.SignedMessage) } if err := mp.VerifyMsgSig(m); err != nil { - log.Warnf("signature verification failed: %s", err) - return err + return xerrors.Errorf("signature verification failed: %s", err) } return nil @@ -969,13 +964,11 @@ func (mp *MessagePool) addLocked(ctx context.Context, m *types.SignedMessage, st } if _, err := mp.api.PutMessage(ctx, m); err != nil { - log.Warnf("mpooladd cs.PutMessage failed: %s", err) - return err + return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err) } if _, err := mp.api.PutMessage(ctx, &m.Message); err != nil { - log.Warnf("mpooladd cs.PutMessage failed: %s", err) - return err + return xerrors.Errorf("mpooladd cs.PutMessage failed: %s", err) } // Note: If performance becomes an issue, making this getOrCreatePendingMset will save some work diff --git a/chain/messagepool/messagepool_test.go b/chain/messagepool/messagepool_test.go index a781b50748c..3c6800d7b2b 100644 --- a/chain/messagepool/messagepool_test.go +++ b/chain/messagepool/messagepool_test.go @@ -11,9 +11,11 @@ import ( "github.com/ipfs/go-datastore" logging "github.com/ipfs/go-log/v2" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" + big2 "github.com/filecoin-project/go-state-types/big" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/go-state-types/network" builtin2 "github.com/filecoin-project/specs-actors/v2/actors/builtin" @@ -524,6 +526,36 @@ func TestPruningSimple(t *testing.T) { } } +func TestGasRewardNegative(t *testing.T) { + var mp MessagePool + + msg := types.SignedMessage{ + Message: types.Message{ + GasLimit: 1000, + GasFeeCap: big2.NewInt(20000), + GasPremium: big2.NewInt(15000), + }, + } + baseFee := big2.NewInt(30000) + // Over the GasPremium, but under the BaseFee + gr1 := mp.getGasReward(&msg, baseFee) + + msg.Message.GasFeeCap = big2.NewInt(15000) + // Equal to GasPremium, under the BaseFee + gr2 := mp.getGasReward(&msg, baseFee) + + msg.Message.GasFeeCap = big2.NewInt(10000) + // Under both GasPremium and BaseFee + gr3 := mp.getGasReward(&msg, baseFee) + + require.True(t, gr1.Sign() < 0) + require.True(t, gr2.Sign() < 0) + require.True(t, gr3.Sign() < 0) + + require.True(t, gr1.Cmp(gr2) > 0) + require.True(t, gr2.Cmp(gr3) > 0) +} + func TestLoadLocal(t *testing.T) { tma := newTestMpoolAPI() ds := datastore.NewMapDatastore() diff --git a/chain/messagepool/selection_test.go b/chain/messagepool/selection_test.go index c3a5c6d6f3a..17e0f34f4e0 100644 --- a/chain/messagepool/selection_test.go +++ b/chain/messagepool/selection_test.go @@ -13,6 +13,7 @@ import ( "os" "sort" "testing" + "time" "github.com/ipfs/go-cid" "github.com/ipfs/go-datastore" @@ -1690,3 +1691,188 @@ readLoop: } } + +func TestRealWorldSelectionTiming(t *testing.T) { + //stm: @TOKEN_WALLET_NEW_001, @TOKEN_WALLET_SIGN_001, @CHAIN_MEMPOOL_SELECT_001 + + // load test-messages.json.gz and rewrite the messages so that + // 1) we map each real actor to a test actor so that we can sign the messages + // 2) adjust the nonces so that they start from 0 + file, err := os.Open("test-messages2.json.gz") + if err != nil { + t.Fatal(err) + } + + gzr, err := gzip.NewReader(file) + if err != nil { + t.Fatal(err) + } + + dec := json.NewDecoder(gzr) + + var msgs []*types.SignedMessage + baseNonces := make(map[address.Address]uint64) + +readLoop: + for { + m := new(types.SignedMessage) + err := dec.Decode(m) + switch err { + case nil: + msgs = append(msgs, m) + nonce, ok := baseNonces[m.Message.From] + if !ok || m.Message.Nonce < nonce { + baseNonces[m.Message.From] = m.Message.Nonce + } + + case io.EOF: + break readLoop + + default: + t.Fatal(err) + } + } + + actorMap := make(map[address.Address]address.Address) + actorWallets := make(map[address.Address]api.Wallet) + + for _, m := range msgs { + baseNonce := baseNonces[m.Message.From] + + localActor, ok := actorMap[m.Message.From] + if !ok { + w, err := wallet.NewWallet(wallet.NewMemKeyStore()) + if err != nil { + t.Fatal(err) + } + + a, err := w.WalletNew(context.Background(), types.KTSecp256k1) + if err != nil { + t.Fatal(err) + } + + actorMap[m.Message.From] = a + actorWallets[a] = w + localActor = a + } + + w, ok := actorWallets[localActor] + if !ok { + t.Fatalf("failed to lookup wallet for actor %s", localActor) + } + + m.Message.From = localActor + m.Message.Nonce -= baseNonce + + sig, err := w.WalletSign(context.TODO(), localActor, m.Message.Cid().Bytes(), api.MsgMeta{}) + if err != nil { + t.Fatal(err) + } + + m.Signature = *sig + } + + mp, tma := makeTestMpool() + + block := tma.nextBlockWithHeight(uint64(build.UpgradeHyggeHeight) + 10) + ts := mock.TipSet(block) + tma.applyBlock(t, block) + + for _, a := range actorMap { + tma.setBalance(a, 1000000) + } + + tma.baseFee = types.NewInt(800_000_000) + + sort.Slice(msgs, func(i, j int) bool { + return msgs[i].Message.Nonce < msgs[j].Message.Nonce + }) + + // add the messages + for _, m := range msgs { + mustAdd(t, mp, m) + } + + // do message selection and check block packing + minGasLimit := int64(0.9 * float64(build.BlockGasLimit)) + + // greedy first + start := time.Now() + selected, err := mp.SelectMessages(context.Background(), ts, 1.0) + if err != nil { + t.Fatal(err) + } + t.Logf("selected %d messages in %s", len(selected), time.Since(start)) + + gasLimit := int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=1.0; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // high quality ticket + start = time.Now() + selected, err = mp.SelectMessages(context.Background(), ts, .8) + if err != nil { + t.Fatal(err) + } + t.Logf("selected %d messages in %s", len(selected), time.Since(start)) + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.8; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // mid quality ticket + start = time.Now() + selected, err = mp.SelectMessages(context.Background(), ts, .4) + if err != nil { + t.Fatal(err) + } + t.Logf("selected %d messages in %s", len(selected), time.Since(start)) + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.4; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // low quality ticket + start = time.Now() + selected, err = mp.SelectMessages(context.Background(), ts, .1) + if err != nil { + t.Fatal(err) + } + t.Logf("selected %d messages in %s", len(selected), time.Since(start)) + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.1; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } + + // very low quality ticket + start = time.Now() + selected, err = mp.SelectMessages(context.Background(), ts, .01) + if err != nil { + t.Fatal(err) + } + t.Logf("selected %d messages in %s", len(selected), time.Since(start)) + + gasLimit = int64(0) + for _, m := range selected { + gasLimit += m.Message.GasLimit + } + if gasLimit < minGasLimit { + t.Fatalf("failed to pack with tq=0.01; packed %d, minimum packing: %d", gasLimit, minGasLimit) + } +} diff --git a/chain/messagepool/test-messages2.json.gz b/chain/messagepool/test-messages2.json.gz new file mode 100644 index 00000000000..9d2cfbfe4fe Binary files /dev/null and b/chain/messagepool/test-messages2.json.gz differ diff --git a/chain/stmgr/execute.go b/chain/stmgr/execute.go index 8c85a1031bb..bed8578338b 100644 --- a/chain/stmgr/execute.go +++ b/chain/stmgr/execute.go @@ -131,15 +131,17 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c tsKey := ts.Key() // check if we have the trace for this tipset in the cache - sm.execTraceCacheLock.Lock() - if entry, ok := sm.execTraceCache.Get(tsKey); ok { - // we have to make a deep copy since caller can modify the invocTrace - // and we don't want that to change what we store in cache - invocTraceCopy := makeDeepCopy(entry.invocTrace) + if execTraceCacheSize > 0 { + sm.execTraceCacheLock.Lock() + if entry, ok := sm.execTraceCache.Get(tsKey); ok { + // we have to make a deep copy since caller can modify the invocTrace + // and we don't want that to change what we store in cache + invocTraceCopy := makeDeepCopy(entry.invocTrace) + sm.execTraceCacheLock.Unlock() + return entry.postStateRoot, invocTraceCopy, nil + } sm.execTraceCacheLock.Unlock() - return entry.postStateRoot, invocTraceCopy, nil } - sm.execTraceCacheLock.Unlock() var invocTrace []*api.InvocResult st, err := sm.ExecutionTraceWithMonitor(ctx, ts, &InvocationTracer{trace: &invocTrace}) @@ -147,11 +149,13 @@ func (sm *StateManager) ExecutionTrace(ctx context.Context, ts *types.TipSet) (c return cid.Undef, nil, err } - invocTraceCopy := makeDeepCopy(invocTrace) + if execTraceCacheSize > 0 { + invocTraceCopy := makeDeepCopy(invocTrace) - sm.execTraceCacheLock.Lock() - sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy}) - sm.execTraceCacheLock.Unlock() + sm.execTraceCacheLock.Lock() + sm.execTraceCache.Add(tsKey, tipSetCacheEntry{st, invocTraceCopy}) + sm.execTraceCacheLock.Unlock() + } return st, invocTrace, nil } diff --git a/chain/stmgr/stmgr.go b/chain/stmgr/stmgr.go index bf10665e7d4..12b991e577f 100644 --- a/chain/stmgr/stmgr.go +++ b/chain/stmgr/stmgr.go @@ -3,6 +3,8 @@ package stmgr import ( "context" "fmt" + "os" + "strconv" "sync" lru "github.com/hashicorp/golang-lru/v2" @@ -40,8 +42,7 @@ import ( const LookbackNoLimit = api.LookbackNoLimit const ReceiptAmtBitwidth = 3 -const execTraceCacheSize = 16 - +var execTraceCacheSize = 16 var log = logging.Logger("statemgr") type StateManagerAPI interface { @@ -74,6 +75,17 @@ func (m *migrationResultCache) keyForMigration(root cid.Cid) dstore.Key { return dstore.NewKey(kStr) } +func init() { + if s := os.Getenv("LOTUS_EXEC_TRACE_CACHE_SIZE"); s != "" { + letc, err := strconv.Atoi(s) + if err != nil { + log.Errorf("failed to parse 'LOTUS_EXEC_TRACE_CACHE_SIZE' env var: %s", err) + } else { + execTraceCacheSize = letc + } + } +} + func (m *migrationResultCache) Get(ctx context.Context, root cid.Cid) (cid.Cid, bool, error) { k := m.keyForMigration(root) @@ -200,9 +212,14 @@ func NewStateManager(cs *store.ChainStore, exec Executor, sys vm.SyscallBuilder, } } - execTraceCache, err := lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize) - if err != nil { - return nil, err + log.Debugf("execTraceCache size: %d", execTraceCacheSize) + var execTraceCache *lru.ARCCache[types.TipSetKey, tipSetCacheEntry] + var err error + if execTraceCacheSize > 0 { + execTraceCache, err = lru.NewARC[types.TipSetKey, tipSetCacheEntry](execTraceCacheSize) + if err != nil { + return nil, err + } } return &StateManager{ diff --git a/chain/stmgr/utils.go b/chain/stmgr/utils.go index c93267d50f8..5e3bbd2788b 100644 --- a/chain/stmgr/utils.go +++ b/chain/stmgr/utils.go @@ -72,7 +72,7 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, base, trace, err := sm.ExecutionTrace(ctx, ts) if err != nil { - return cid.Undef, nil, err + return cid.Undef, nil, xerrors.Errorf("failed to compute base state: %w", err) } for i := ts.Height(); i < height; i++ { @@ -116,6 +116,21 @@ func ComputeState(ctx context.Context, sm *StateManager, height abi.ChainEpoch, if ret.ExitCode != 0 { log.Infof("compute state apply message %d failed (exit: %d): %s", i, ret.ExitCode, ret.ActorErr) } + + ir := &api.InvocResult{ + MsgCid: msg.Cid(), + Msg: msg, + MsgRct: &ret.MessageReceipt, + ExecutionTrace: ret.ExecutionTrace, + Duration: ret.Duration, + } + if ret.ActorErr != nil { + ir.Error = ret.ActorErr.Error() + } + if ret.GasCosts != nil { + ir.GasCost = MakeMsgGasCost(msg, ret) + } + trace = append(trace, ir) } root, err := vmi.Flush(ctx) diff --git a/chain/store/store.go b/chain/store/store.go index 3a7b5ba345c..88103ac48be 100644 --- a/chain/store/store.go +++ b/chain/store/store.go @@ -425,6 +425,11 @@ func (cs *ChainStore) MaybeTakeHeavierTipSet(ctx context.Context, ts *types.TipS } defer cs.heaviestLk.Unlock() + + if ts.Equals(cs.heaviest) { + return nil + } + w, err := cs.weight(ctx, cs.StateBlockstore(), ts) if err != nil { return err diff --git a/chain/sub/incoming.go b/chain/sub/incoming.go index 9b66ad97135..a7c0bee57d9 100644 --- a/chain/sub/incoming.go +++ b/chain/sub/incoming.go @@ -8,11 +8,11 @@ import ( "time" lru "github.com/hashicorp/golang-lru/v2" + bserv "github.com/ipfs/boxo/blockservice" blocks "github.com/ipfs/go-block-format" - bserv "github.com/ipfs/go-blockservice" "github.com/ipfs/go-cid" logging "github.com/ipfs/go-log/v2" - "github.com/ipni/storetheindex/announce/message" + "github.com/ipni/go-libipni/announce/message" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/connmgr" "github.com/libp2p/go-libp2p/core/peer" @@ -358,6 +358,8 @@ func (mv *MessageValidator) Validate(ctx context.Context, pid peer.ID, msg *pubs fallthrough case xerrors.Is(err, messagepool.ErrNonceGap): fallthrough + case xerrors.Is(err, messagepool.ErrGasFeeCapTooLow): + fallthrough case xerrors.Is(err, messagepool.ErrNonceTooLow): fallthrough case xerrors.Is(err, messagepool.ErrExistingNonce): diff --git a/chain/sub/incoming_test.go b/chain/sub/incoming_test.go index 0a9504a884f..f54e090495c 100644 --- a/chain/sub/incoming_test.go +++ b/chain/sub/incoming_test.go @@ -9,7 +9,7 @@ import ( "github.com/golang/mock/gomock" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" - "github.com/ipni/storetheindex/announce/message" + "github.com/ipni/go-libipni/announce/message" pubsub "github.com/libp2p/go-libp2p-pubsub" pb "github.com/libp2p/go-libp2p-pubsub/pb" "github.com/libp2p/go-libp2p/core/peer" diff --git a/chain/types/blockmsg.go b/chain/types/blockmsg.go index f3114499d67..f8f0a08dbad 100644 --- a/chain/types/blockmsg.go +++ b/chain/types/blockmsg.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "fmt" "github.com/ipfs/go-cid" ) @@ -14,10 +15,13 @@ type BlockMsg struct { func DecodeBlockMsg(b []byte) (*BlockMsg, error) { var bm BlockMsg - if err := bm.UnmarshalCBOR(bytes.NewReader(b)); err != nil { + data := bytes.NewReader(b) + if err := bm.UnmarshalCBOR(data); err != nil { return nil, err } - + if l := data.Len(); l != 0 { + return nil, fmt.Errorf("extraneous data in BlockMsg CBOR encoding: got %d unexpected bytes", l) + } return &bm, nil } diff --git a/chain/types/blockmsg_test.go b/chain/types/blockmsg_test.go new file mode 100644 index 00000000000..02a6227680a --- /dev/null +++ b/chain/types/blockmsg_test.go @@ -0,0 +1,40 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDecodeBlockMsg(t *testing.T) { + type args struct { + b []byte + } + tests := []struct { + name string + data []byte + want *BlockMsg + wantErr bool + }{ + {"decode empty BlockMsg with extra data at the end", []byte{0x83, 0xf6, 0x80, 0x80, 0x20}, nil, true}, + {"decode valid empty BlockMsg", []byte{0x83, 0xf6, 0x80, 0x80}, new(BlockMsg), false}, + {"decode invalid cbor", []byte{0x83, 0xf6, 0x80}, nil, true}, + } + for _, tt := range tests { + data := tt.data + want := tt.want + wantErr := tt.wantErr + t.Run(tt.name, func(t *testing.T) { + got, err := DecodeBlockMsg(data) + if wantErr { + assert.Errorf(t, err, "DecodeBlockMsg(%x)", data) + return + } + assert.NoErrorf(t, err, "DecodeBlockMsg(%x)", data) + assert.Equalf(t, want, got, "DecodeBlockMsg(%x)", data) + serialized, err := got.Serialize() + assert.NoErrorf(t, err, "DecodeBlockMsg(%x)", data) + assert.Equalf(t, serialized, data, "DecodeBlockMsg(%x)", data) + }) + } +} diff --git a/chain/types/ethtypes/eth_types.go b/chain/types/ethtypes/eth_types.go index f157c7f94c5..3e0dd872467 100644 --- a/chain/types/ethtypes/eth_types.go +++ b/chain/types/ethtypes/eth_types.go @@ -238,6 +238,30 @@ func (c *EthCall) UnmarshalJSON(b []byte) error { return nil } +type EthSyncingResult struct { + DoneSync bool + StartingBlock EthUint64 + CurrentBlock EthUint64 + HighestBlock EthUint64 +} + +func (sr EthSyncingResult) MarshalJSON() ([]byte, error) { + if sr.DoneSync { + // when done syncing, the json response should be '"result": false' + return []byte("false"), nil + } + + // need to do an anonymous struct to avoid infinite recursion + return json.Marshal(&struct { + StartingBlock EthUint64 `json:"startingblock"` + CurrentBlock EthUint64 `json:"currentblock"` + HighestBlock EthUint64 `json:"highestblock"` + }{ + StartingBlock: sr.StartingBlock, + CurrentBlock: sr.CurrentBlock, + HighestBlock: sr.HighestBlock}) +} + const ( EthAddressLength = 20 EthHashLength = 32 @@ -548,12 +572,12 @@ func (h EthSubscriptionID) String() string { } type EthFilterSpec struct { - // Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first, + // Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first, // "pending" for not yet committed messages. // Optional, default: "latest". FromBlock *string `json:"fromBlock,omitempty"` - // Interpreted as an epoch or one of "latest" for last mined block, "earliest" for first, + // Interpreted as an epoch (in hex) or one of "latest" for last mined block, "earliest" for first, // "pending" for not yet committed messages. // Optional, default: "latest". ToBlock *string `json:"toBlock,omitempty"` @@ -815,3 +839,93 @@ func (e EthFeeHistoryParams) MarshalJSON() ([]byte, error) { } return json.Marshal([]interface{}{e.BlkCount, e.NewestBlkNum}) } + +type EthBlockNumberOrHash struct { + // PredefinedBlock can be one of "earliest", "pending" or "latest". We could merge this + // field with BlockNumber if the latter could store negative numbers representing + // each predefined value (e.g. -1 for "earliest", -2 for "pending" and -3 for "latest") + PredefinedBlock *string `json:"-"` + + BlockNumber *EthUint64 `json:"blockNumber,omitempty"` + BlockHash *EthHash `json:"blockHash,omitempty"` + RequireCanonical bool `json:"requireCanonical,omitempty"` +} + +func NewEthBlockNumberOrHashFromPredefined(predefined string) EthBlockNumberOrHash { + return EthBlockNumberOrHash{ + PredefinedBlock: &predefined, + BlockNumber: nil, + BlockHash: nil, + RequireCanonical: false, + } +} + +func NewEthBlockNumberOrHashFromNumber(number EthUint64) EthBlockNumberOrHash { + return EthBlockNumberOrHash{ + PredefinedBlock: nil, + BlockNumber: &number, + BlockHash: nil, + RequireCanonical: false, + } +} + +func NewEthBlockNumberOrHashFromHexString(str string) (EthBlockNumberOrHash, error) { + // check if block param is a number (decimal or hex) + var num EthUint64 = 0 + err := num.UnmarshalJSON([]byte(str)) + if err != nil { + return NewEthBlockNumberOrHashFromNumber(0), err + } + + return EthBlockNumberOrHash{ + PredefinedBlock: nil, + BlockNumber: &num, + BlockHash: nil, + RequireCanonical: false, + }, nil +} + +func (e EthBlockNumberOrHash) MarshalJSON() ([]byte, error) { + if e.PredefinedBlock != nil { + return json.Marshal(*e.PredefinedBlock) + } + + type tmpStruct EthBlockNumberOrHash + return json.Marshal(tmpStruct(e)) +} + +func (e *EthBlockNumberOrHash) UnmarshalJSON(b []byte) error { + // we first try to unmarshal into a EthBlockNumberOrHash struct to check + // if the block param is a block hash or block number (see EIP-1898). We use + // a temporary struct to avoid infinite recursion. + type tmpStruct EthBlockNumberOrHash + var tmp tmpStruct + if err := json.Unmarshal(b, &tmp); err == nil { + if tmp.BlockNumber != nil && tmp.BlockHash != nil { + return errors.New("cannot specify both blockNumber and blockHash") + } + + *e = EthBlockNumberOrHash(tmp) + return nil + } + + // check if block param is once of the special strings + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + if str == "earliest" || str == "pending" || str == "latest" { + e.PredefinedBlock = &str + return nil + } + + // check if block param is a number (decimal or hex) + var num EthUint64 + if err := num.UnmarshalJSON(b); err == nil { + e.BlockNumber = &num + return nil + } + + return errors.New("invalid block param") +} diff --git a/chain/vm/fvm.go b/chain/vm/fvm.go index 7c79972c7ee..08df7b2e0d2 100644 --- a/chain/vm/fvm.go +++ b/chain/vm/fvm.go @@ -520,10 +520,6 @@ func (vm *FVM) ApplyImplicitMessage(ctx context.Context, cmsg *types.Message) (* } } - if ret.ExitCode != 0 { - return applyRet, fmt.Errorf("implicit message failed with exit code: %d and error: %w", ret.ExitCode, applyRet.ActorErr) - } - return applyRet, nil } diff --git a/cli/client_retr.go b/cli/client_retr.go index b619a28719f..fa8164ab5ef 100644 --- a/cli/client_retr.go +++ b/cli/client_retr.go @@ -10,10 +10,10 @@ import ( "strings" "time" - "github.com/ipfs/go-blockservice" + "github.com/ipfs/boxo/blockservice" + offline "github.com/ipfs/boxo/exchange/offline" + "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" - offline "github.com/ipfs/go-ipfs-exchange-offline" - "github.com/ipfs/go-merkledag" carv2 "github.com/ipld/go-car/v2" "github.com/ipld/go-car/v2/blockstore" "github.com/ipld/go-ipld-prime" diff --git a/cli/evm.go b/cli/evm.go index 84cbf8c61f6..7eb36f8953b 100644 --- a/cli/evm.go +++ b/cli/evm.go @@ -130,7 +130,7 @@ var EvmCallSimulateCmd = &cli.Command{ From: &fromEthAddr, To: &toEthAddr, Data: params, - }, "") + }, ethtypes.NewEthBlockNumberOrHashFromPredefined("latest")) if err != nil { fmt.Println("Eth call fails, return val: ", res) return err @@ -518,7 +518,7 @@ var EvmGetBytecode = &cli.Command{ defer closer() ctx := ReqContext(cctx) - code, err := api.EthGetCode(ctx, contractAddr, "latest") + code, err := api.EthGetCode(ctx, contractAddr, ethtypes.NewEthBlockNumberOrHashFromPredefined("latest")) if err != nil { return err } diff --git a/cli/info.go b/cli/info.go index 007e3655fda..8b36be4889b 100644 --- a/cli/info.go +++ b/cli/info.go @@ -20,6 +20,7 @@ import ( "github.com/filecoin-project/lotus/api/v1api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/journal/alerting" ) var infoCmd = &cli.Command{ @@ -62,6 +63,21 @@ func infoCmdAct(cctx *cli.Context) error { fmt.Printf(" [epoch %s]\n", color.MagentaString(("%d"), status.SyncStatus.Epoch)) fmt.Printf("Peers to: [publish messages %d] [publish blocks %d]\n", status.PeerStatus.PeersToPublishMsgs, status.PeerStatus.PeersToPublishBlocks) + alerts, err := fullapi.LogAlerts(ctx) + if err != nil { + fmt.Printf("ERROR: getting alerts: %s\n", err) + } + + activeAlerts := make([]alerting.Alert, 0) + for _, alert := range alerts { + if alert.Active { + activeAlerts = append(activeAlerts, alert) + } + } + if len(activeAlerts) > 0 { + fmt.Printf("%s (check %s)\n", color.RedString("âš  %d Active alerts", len(activeAlerts)), color.YellowString("lotus log alerts")) + } + //Chain health calculated as percentage: amount of blocks in last finality / very healthy amount of blocks in a finality (900 epochs * 5 blocks per tipset) health := (100 * (900 * status.ChainStatus.BlocksPerTipsetLastFinality) / (900 * 5)) switch { diff --git a/cli/state.go b/cli/state.go index 9031ba870c5..667f6fb19b6 100644 --- a/cli/state.go +++ b/cli/state.go @@ -1065,12 +1065,19 @@ var StateComputeStateCmd = &cli.Command{ ctx := ReqContext(cctx) - ts, err := LoadTipSet(ctx, cctx, api) + h := abi.ChainEpoch(cctx.Uint64("vm-height")) + var ts *types.TipSet + if tss := cctx.String("tipset"); tss != "" { + ts, err = ParseTipSetRef(ctx, api, tss) + } else if h > 0 { + ts, err = api.ChainGetTipSetByHeight(ctx, h, types.EmptyTSK) + } else { + ts, err = api.ChainHead(ctx) + } if err != nil { return err } - h := abi.ChainEpoch(cctx.Uint64("vm-height")) if h == 0 { h = ts.Height() } @@ -1528,6 +1535,9 @@ func printMsg(ctx context.Context, api v0api.FullNode, msg cid.Cid, mw *lapi.Msg if err := printReceiptReturn(ctx, api, m, mw.Receipt); err != nil { return err } + if mw.Receipt.EventsRoot != nil { + fmt.Printf("Events Root: %s\n", mw.Receipt.EventsRoot) + } return nil } diff --git a/cli/wallet.go b/cli/wallet.go index c66275cdd50..2afe8617b95 100644 --- a/cli/wallet.go +++ b/cli/wallet.go @@ -10,6 +10,7 @@ import ( "strings" "github.com/urfave/cli/v2" + "golang.org/x/term" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" @@ -327,13 +328,21 @@ var walletImport = &cli.Command{ var inpdata []byte if !cctx.Args().Present() || cctx.Args().First() == "-" { - reader := bufio.NewReader(os.Stdin) - fmt.Print("Enter private key: ") - indata, err := reader.ReadBytes('\n') - if err != nil { - return err + if term.IsTerminal(int(os.Stdin.Fd())) { + fmt.Print("Enter private key(not display in the terminal): ") + inpdata, err = term.ReadPassword(int(os.Stdin.Fd())) + if err != nil { + return err + } + fmt.Println() + } else { + reader := bufio.NewReader(os.Stdin) + indata, err := reader.ReadBytes('\n') + if err != nil { + return err + } + inpdata = indata } - inpdata = indata } else { fdata, err := os.ReadFile(cctx.Args().First()) diff --git a/cmd/lotus-bench/main.go b/cmd/lotus-bench/main.go index 12d310b6573..6e7e274f218 100644 --- a/cmd/lotus-bench/main.go +++ b/cmd/lotus-bench/main.go @@ -98,14 +98,16 @@ func main() { log.Info("Starting lotus-bench") app := &cli.App{ - Name: "lotus-bench", - Usage: "Benchmark performance of lotus on your hardware", - Version: build.UserVersion(), + Name: "lotus-bench", + Usage: "Benchmark performance of lotus on your hardware", + Version: build.UserVersion(), + DisableSliceFlagSeparator: true, Commands: []*cli.Command{ proveCmd, sealBenchCmd, simpleCmd, importBenchCmd, + rpcCmd, }, } diff --git a/cmd/lotus-bench/rpc.go b/cmd/lotus-bench/rpc.go new file mode 100644 index 00000000000..5da784c6ef3 --- /dev/null +++ b/cmd/lotus-bench/rpc.go @@ -0,0 +1,576 @@ +package main + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "os" + "os/signal" + "sort" + "strconv" + "strings" + "sync" + "text/tabwriter" + "time" + + "github.com/urfave/cli/v2" +) + +var rpcCmd = &cli.Command{ + Name: "rpc", + Usage: "Runs a concurrent stress test on one or more rpc methods and prints the performance metrics including latency distribution and histogram", + Description: `This benchmark is designed to stress test the rpc methods of a lotus node so that we can simulate real world usage and measure the performance of rpc methods on the node. + +This benchmark has the following features: +* Can query each method both sequentially and concurrently +* Supports rate limiting +* Can query multiple different endpoints at once (supporting different concurrency level and rate limiting for each method) +* Gives a nice reporting summary of the stress testing of each method (including latency distribution, histogram and more) +* Easy to use + +To use this benchmark you must specify the rpc methods you want to test using the --method options, the format of it is: + + --method=NAME[:CONCURRENCY][:QPS][:PARAMS] where only NAME is required. + +Here are some real examples: + lotus-bench rpc --method='eth_chainId' // run eth_chainId with default concurrency and qps + lotus-bench rpc --method='eth_chainId:3' // override concurrency to 3 + lotus-bench rpc --method='eth_chainId::100' // override to 100 qps while using default concurrency + lotus-bench rpc --method='eth_chainId:3:100' // run using 3 workers but limit to 100 qps + lotus-bench rpc --method='eth_getTransactionCount:::["0xd4c70007F3F502f212c7e6794b94C06F36173B36", "latest"]' // run using optional params while using default concurrency and qps + lotus-bench rpc --method='eth_chainId' --method='eth_getTransactionCount:10:0:["0xd4c70007F3F502f212c7e6794b94C06F36173B36", "latest"]' // run multiple methods at once + +NOTE: The last two examples will not work until we upgrade urfave dependency (tracked in https://github.com/urfave/cli/issues/1734)`, + Flags: []cli.Flag{ + &cli.StringFlag{ + Name: "endpoint", + Value: "http://127.0.0.1:1234/rpc/v1", + Usage: "The rpc endpoint to benchmark", + }, + &cli.DurationFlag{ + Name: "duration", + Value: 60 * time.Second, + Usage: "Duration of benchmark in seconds", + }, + &cli.IntFlag{ + Name: "concurrency", + Value: 10, + Usage: "How many workers should be used per rpc method (can be overridden per method)", + }, + &cli.IntFlag{ + Name: "qps", + Value: 0, + Usage: "How many requests per second should be sent per rpc method (can be overridden per method), a value of 0 means no limit", + }, + &cli.StringSliceFlag{ + Name: "method", + Usage: `Method to benchmark, you can specify multiple methods by repeating this flag. You can also specify method specific options to set the concurrency and qps for each method (see usage). +`, + }, + &cli.DurationFlag{ + Name: "watch", + Value: 0 * time.Second, + Usage: "If >0 then generates reports every N seconds (only supports linux/unix)", + }, + &cli.BoolFlag{ + Name: "print-response", + Value: false, + Usage: "print the response of each request", + }, + }, + Action: func(cctx *cli.Context) error { + if len(cctx.StringSlice("method")) == 0 { + return errors.New("you must specify and least one method to benchmark") + } + + var rpcMethods []*RPCMethod + for _, str := range cctx.StringSlice("method") { + entries := strings.SplitN(str, ":", 4) + if len(entries) == 0 { + return errors.New("invalid method format") + } + + // check if concurrency was specified + concurrency := cctx.Int("concurrency") + if len(entries) > 1 { + if len(entries[1]) > 0 { + var err error + concurrency, err = strconv.Atoi(entries[1]) + if err != nil { + return fmt.Errorf("could not parse concurrency value from method %s: %v", entries[0], err) + } + } + } + + // check if qps was specified + qps := cctx.Int("qps") + if len(entries) > 2 { + if len(entries[2]) > 0 { + var err error + qps, err = strconv.Atoi(entries[2]) + if err != nil { + return fmt.Errorf("could not parse qps value from method %s: %v", entries[0], err) + } + } + } + + // check if params was specified + params := "[]" + if len(entries) > 3 { + params = entries[3] + } + + rpcMethods = append(rpcMethods, &RPCMethod{ + w: os.Stdout, + uri: cctx.String("endpoint"), + method: entries[0], + concurrency: concurrency, + qps: qps, + params: params, + printResp: cctx.Bool("print-response"), + }) + } + + // terminate early on ctrl+c + c := make(chan os.Signal, 1) + signal.Notify(c, os.Interrupt) + go func() { + <-c + fmt.Println("Received interrupt, stopping...") + for _, method := range rpcMethods { + method.Stop() + } + }() + + // stop all threads after duration + go func() { + time.Sleep(cctx.Duration("duration")) + for _, e := range rpcMethods { + e.Stop() + } + }() + + // start all threads + var wg sync.WaitGroup + wg.Add(len(rpcMethods)) + + for _, e := range rpcMethods { + go func(e *RPCMethod) { + defer wg.Done() + err := e.Run() + if err != nil { + fmt.Printf("error running rpc method: %v\n", err) + } + }(e) + } + + // if watch is set then print a report every N seconds + var progressCh chan struct{} + if cctx.Duration("watch") > 0 { + progressCh = make(chan struct{}, 1) + go func(progressCh chan struct{}) { + ticker := time.NewTicker(cctx.Duration("watch")) + for { + clearAndPrintReport := func() { + // clear the screen move the curser to the top left + fmt.Print("\033[2J") + fmt.Printf("\033[%d;%dH", 1, 1) + for i, e := range rpcMethods { + e.Report() + if i < len(rpcMethods)-1 { + fmt.Println() + } + } + } + select { + case <-ticker.C: + clearAndPrintReport() + case <-progressCh: + clearAndPrintReport() + return + } + } + }(progressCh) + } + + wg.Wait() + + if progressCh != nil { + // wait for the watch go routine to return + progressCh <- struct{}{} + + // no need to print the report again + return nil + } + + // print the report for each endpoint + for i, e := range rpcMethods { + e.Report() + if i < len(rpcMethods)-1 { + fmt.Println() + } + } + + return nil + }, +} + +// RPCMethod handles the benchmarking of a single endpoint method. +type RPCMethod struct { + w io.Writer + // the endpoint uri + uri string + // the rpc method we want to benchmark + method string + // the number of concurrent requests to make to this endpoint + concurrency int + // if >0 then limit to qps is the max number of requests per second to make to this endpoint (0 = no limit) + qps int + // many endpoints require specific parameters to be passed + params string + // whether or not to print the response of each request (useful for debugging) + printResp bool + // instruct the worker go routines to stop + stopCh chan struct{} + // when the endpoint bencharking started + start time.Time + // results channel is used by the workers to send results to the reporter + results chan *result + // reporter handles reading the results from workers and printing the report statistics + reporter *Reporter +} + +// result is the result of a single rpc method request. +type result struct { + err error + statusCode *int + duration time.Duration +} + +func (rpc *RPCMethod) Run() error { + client := &http.Client{ + Timeout: 0, + } + + var wg sync.WaitGroup + wg.Add(rpc.concurrency) + + rpc.results = make(chan *result, rpc.concurrency*1_000) + rpc.stopCh = make(chan struct{}, rpc.concurrency) + + go func() { + rpc.reporter = NewReporter(rpc.results, rpc.w) + rpc.reporter.Run() + }() + + rpc.start = time.Now() + + // throttle the number of requests per second + var qpsTicker *time.Ticker + if rpc.qps > 0 { + qpsTicker = time.NewTicker(time.Second / time.Duration(rpc.qps)) + } + + for i := 0; i < rpc.concurrency; i++ { + go func() { + rpc.startWorker(client, qpsTicker) + wg.Done() + }() + } + wg.Wait() + + // close the results channel so reporter will stop + close(rpc.results) + + // wait until the reporter is done + <-rpc.reporter.doneCh + + return nil +} + +func (rpc *RPCMethod) startWorker(client *http.Client, qpsTicker *time.Ticker) { + for { + // check if we should stop + select { + case <-rpc.stopCh: + return + default: + } + + // wait for the next tick if we are rate limiting this endpoint + if qpsTicker != nil { + <-qpsTicker.C + } + + req, err := rpc.buildRequest() + if err != nil { + log.Fatalln(err) + } + + start := time.Now() + + var statusCode *int + + // send request the endpoint + resp, err := client.Do(req) + if err != nil { + err = fmt.Errorf("HTTP error: %s", err.Error()) + } else { + statusCode = &resp.StatusCode + + // there was not a HTTP error but we need to still check the json response for errrors + var data []byte + data, err = io.ReadAll(resp.Body) + if err != nil { + log.Fatalln(err) + } + + // we are only interested if it has the error field in the response + type respData struct { + Error struct { + Code int `json:"code"` + Message string `json:"message"` + } `json:"error"` + } + + // unmarshal the response into a struct so we can check for errors + var d respData + err = json.Unmarshal(data, &d) + if err != nil { + log.Fatalln(err) + } + + // if the response has an error json message then it should be considered an error just like any http error + if len(d.Error.Message) > 0 { + // truncate the error message if it is too long + if len(d.Error.Message) > 1000 { + d.Error.Message = d.Error.Message[:1000] + "..." + } + // remove newlines from the error message so we don't screw up the report + d.Error.Message = strings.ReplaceAll(d.Error.Message, "\n", "") + + err = fmt.Errorf("JSON error: code:%d, message:%s", d.Error.Code, d.Error.Message) + } + + if rpc.printResp { + fmt.Printf("[%s] %s", rpc.method, string(data)) + } + + resp.Body.Close() //nolint:errcheck + } + + rpc.results <- &result{ + statusCode: statusCode, + err: err, + duration: time.Since(start), + } + } +} + +func (rpc *RPCMethod) buildRequest() (*http.Request, error) { + jreq, err := json.Marshal(struct { + Jsonrpc string `json:"jsonrpc"` + ID int `json:"id"` + Method string `json:"method"` + Params json.RawMessage `json:"params"` + }{ + Jsonrpc: "2.0", + Method: rpc.method, + Params: json.RawMessage(rpc.params), + ID: 0, + }) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("POST", rpc.uri, bytes.NewReader(jreq)) + if err != nil { + return nil, err + } + + req.Header.Set("Accept", "application/json") + + return req, nil +} + +func (rpc *RPCMethod) Stop() { + for i := 0; i < rpc.concurrency; i++ { + rpc.stopCh <- struct{}{} + } +} + +func (rpc *RPCMethod) Report() { + total := time.Since(rpc.start) + fmt.Fprintf(rpc.w, "[%s]:\n", rpc.method) + fmt.Fprintf(rpc.w, "- Options:\n") + fmt.Fprintf(rpc.w, " - concurrency: %d\n", rpc.concurrency) + fmt.Fprintf(rpc.w, " - params: %s\n", rpc.params) + fmt.Fprintf(rpc.w, " - qps: %d\n", rpc.qps) + rpc.reporter.Print(total, rpc.w) +} + +// Reporter reads the results from the workers through the results channel and aggregates the results. +type Reporter struct { + // write the report to this writer + w io.Writer + // the reporter read the results from this channel + results chan *result + // doneCh is used to signal that the reporter has finished reading the results (channel has closed) + doneCh chan bool + + // lock protect the following fields during critical sections (if --watch was specified) + lock sync.Mutex + // the latencies of all requests + latencies []int64 + // the number of requests that returned each status code + statusCodes map[int]int + // the number of errors that occurred + errors map[string]int +} + +func NewReporter(results chan *result, w io.Writer) *Reporter { + return &Reporter{ + w: w, + results: results, + doneCh: make(chan bool, 1), + statusCodes: make(map[int]int), + errors: make(map[string]int), + } +} + +func (r *Reporter) Run() { + for res := range r.results { + r.lock.Lock() + + r.latencies = append(r.latencies, res.duration.Milliseconds()) + + if res.statusCode != nil { + r.statusCodes[*res.statusCode]++ + } + + if res.err != nil { + if len(r.errors) < 1_000_000 { + r.errors[res.err.Error()]++ + } else { + // we don't want to store too many errors in memory + r.errors["hidden"]++ + } + } else { + r.errors["nil"]++ + } + + r.lock.Unlock() + } + + r.doneCh <- true +} + +func (r *Reporter) Print(elapsed time.Duration, w io.Writer) { + r.lock.Lock() + defer r.lock.Unlock() + + nrReq := int64(len(r.latencies)) + if nrReq == 0 { + fmt.Println("No requests were made") + return + } + + // we need to sort the latencies slice to calculate the percentiles + sort.Slice(r.latencies, func(i, j int) bool { + return r.latencies[i] < r.latencies[j] + }) + + var totalLatency int64 = 0 + for _, latency := range r.latencies { + totalLatency += latency + } + + fmt.Fprintf(w, "- Total Requests: %d\n", nrReq) + fmt.Fprintf(w, "- Total Duration: %dms\n", elapsed.Milliseconds()) + fmt.Fprintf(w, "- Requests/sec: %f\n", float64(nrReq)/elapsed.Seconds()) + fmt.Fprintf(w, "- Avg latency: %dms\n", totalLatency/nrReq) + fmt.Fprintf(w, "- Median latency: %dms\n", r.latencies[nrReq/2]) + fmt.Fprintf(w, "- Latency distribution:\n") + percentiles := []float64{0.1, 0.5, 0.9, 0.95, 0.99, 0.999} + for _, p := range percentiles { + idx := int64(p * float64(nrReq)) + fmt.Fprintf(w, " %s%% in %dms\n", fmt.Sprintf("%.2f", p*100.0), r.latencies[idx]) + } + + // create a simple histogram with 10 buckets spanning the range of latency + // into equal ranges + // + nrBucket := 10 + buckets := make([]Bucket, nrBucket) + latencyRange := r.latencies[len(r.latencies)-1] + bucketRange := latencyRange / int64(nrBucket) + + // mark the end of each bucket + for i := 0; i < nrBucket; i++ { + buckets[i].start = int64(i) * bucketRange + buckets[i].end = buckets[i].start + bucketRange + // extend the last bucked by any remaning range caused by the integer division + if i == nrBucket-1 { + buckets[i].end = latencyRange + } + } + + // count the number of requests in each bucket + currBucket := 0 + for i := 0; i < len(r.latencies); { + if r.latencies[i] <= buckets[currBucket].end { + buckets[currBucket].cnt++ + i++ + } else { + currBucket++ + } + } + + // print the histogram using a tabwriter which will align the columns nicely + fmt.Fprintf(w, "- Histogram:\n") + const padding = 2 + tabWriter := tabwriter.NewWriter(w, 0, 0, padding, ' ', tabwriter.AlignRight|tabwriter.Debug) + for i := 0; i < nrBucket; i++ { + ratio := float64(buckets[i].cnt) / float64(nrReq) + bars := strings.Repeat("#", int(ratio*100)) + fmt.Fprintf(tabWriter, " %d-%dms\t%d\t%s (%s%%)\n", buckets[i].start, buckets[i].end, buckets[i].cnt, bars, fmt.Sprintf("%.2f", ratio*100)) + } + tabWriter.Flush() //nolint:errcheck + + fmt.Fprintf(w, "- Status codes:\n") + for code, cnt := range r.statusCodes { + fmt.Fprintf(w, " [%d]: %d\n", code, cnt) + } + + // print the 10 most occurring errors (in case error values are not unique) + // + type kv struct { + err string + cnt int + } + var sortedErrors []kv + for err, cnt := range r.errors { + sortedErrors = append(sortedErrors, kv{err, cnt}) + } + sort.Slice(sortedErrors, func(i, j int) bool { + return sortedErrors[i].cnt > sortedErrors[j].cnt + }) + fmt.Fprintf(w, "- Errors (top 10):\n") + for i, se := range sortedErrors { + if i > 10 { + break + } + fmt.Fprintf(w, " [%s]: %d\n", se.err, se.cnt) + } +} + +type Bucket struct { + start int64 + // the end value of the bucket + end int64 + // how many entries are in the bucket + cnt int +} diff --git a/cmd/lotus-fountain/main.go b/cmd/lotus-fountain/main.go index 780aef91669..f6d503c2f3d 100644 --- a/cmd/lotus-fountain/main.go +++ b/cmd/lotus-fountain/main.go @@ -7,6 +7,7 @@ import ( "net" "net/http" "os" + "strings" "time" rice "github.com/GeertJohan/go.rice" @@ -15,10 +16,14 @@ import ( "golang.org/x/xerrors" "github.com/filecoin-project/go-address" + verifregtypes9 "github.com/filecoin-project/go-state-types/builtin/v9/verifreg" "github.com/filecoin-project/lotus/api/v0api" "github.com/filecoin-project/lotus/build" + "github.com/filecoin-project/lotus/chain/actors" + "github.com/filecoin-project/lotus/chain/actors/builtin/verifreg" "github.com/filecoin-project/lotus/chain/types" + "github.com/filecoin-project/lotus/chain/types/ethtypes" lcli "github.com/filecoin-project/lotus/cli" ) @@ -70,6 +75,11 @@ var runCmd = &cli.Command{ EnvVars: []string{"LOTUS_FOUNTAIN_AMOUNT"}, Value: "50", }, + &cli.Uint64Flag{ + Name: "data-cap", + EnvVars: []string{"LOTUS_DATACAP_AMOUNT"}, + Value: verifregtypes9.MinVerifiedDealSize.Uint64(), + }, &cli.Float64Flag{ Name: "captcha-threshold", Value: 0.5, @@ -108,6 +118,7 @@ var runCmd = &cli.Command{ ctx: ctx, api: nodeApi, from: from, + allowance: types.NewInt(cctx.Uint64("data-cap")), sendPerRequest: sendPerRequest, limiter: NewLimiter(LimiterConfig{ TotalRate: 500 * time.Millisecond, @@ -124,6 +135,8 @@ var runCmd = &cli.Command{ http.Handle("/", http.FileServer(box.HTTPBox())) http.HandleFunc("/funds.html", prepFundsHtml(box)) http.Handle("/send", h) + http.HandleFunc("/datacap.html", prepDataCapHtml(box)) + http.Handle("/datacap", h) fmt.Printf("Open http://%s\n", cctx.String("front")) go func() { @@ -156,12 +169,24 @@ func prepFundsHtml(box *rice.Box) http.HandlerFunc { } } +func prepDataCapHtml(box *rice.Box) http.HandlerFunc { + tmpl := template.Must(template.New("datacaps").Parse(box.MustString("datacap.html"))) + return func(w http.ResponseWriter, r *http.Request) { + err := tmpl.Execute(w, os.Getenv("RECAPTCHA_SITE_KEY")) + if err != nil { + http.Error(w, err.Error(), http.StatusBadGateway) + return + } + } +} + type handler struct { ctx context.Context api v0api.FullNode from address.Address sendPerRequest types.FIL + allowance types.BigInt limiter *Limiter recapThreshold float64 @@ -187,24 +212,41 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusBadGateway) return } + if !capResp.Success || capResp.Score < h.recapThreshold { log.Infow("spam", "capResp", capResp) http.Error(w, "spam protection", http.StatusUnprocessableEntity) return } - to, err := address.NewFromString(r.FormValue("address")) - if err != nil { + addressInput := r.FormValue("address") + + var filecoinAddress address.Address + var decodeError error + + if strings.HasPrefix(addressInput, "0x") { + ethAddress, err := ethtypes.ParseEthAddress(addressInput) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + filecoinAddress, decodeError = ethAddress.ToFilecoinAddress() + } else { + filecoinAddress, decodeError = address.NewFromString(addressInput) + } + + if decodeError != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } - if to == address.Undef { + if filecoinAddress == address.Undef { http.Error(w, "empty address", http.StatusBadRequest) return } // Limit based on wallet address - limiter := h.limiter.GetWalletLimiter(to.String()) + limiter := h.limiter.GetWalletLimiter(filecoinAddress.String()) if !limiter.Allow() { http.Error(w, http.StatusText(http.StatusTooManyRequests)+": wallet limit", http.StatusTooManyRequests) return @@ -227,11 +269,37 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - smsg, err := h.api.MpoolPushMessage(h.ctx, &types.Message{ - Value: types.BigInt(h.sendPerRequest), - From: h.from, - To: to, - }, nil) + var smsg *types.SignedMessage + if r.RequestURI == "/send" { + smsg, err = h.api.MpoolPushMessage( + h.ctx, &types.Message{ + Value: types.BigInt(h.sendPerRequest), + From: h.from, + To: filecoinAddress, + }, nil) + } else if r.RequestURI == "/datacap" { + var params []byte + params, err = actors.SerializeParams( + &verifregtypes9.AddVerifiedClientParams{ + Address: filecoinAddress, + Allowance: h.allowance, + }) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + smsg, err = h.api.MpoolPushMessage( + h.ctx, &types.Message{ + Params: params, + From: h.from, + To: verifreg.Address, + Method: verifreg.Methods.AddVerifiedClient, + }, nil) + } else { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return diff --git a/cmd/lotus-fountain/site/datacap.html b/cmd/lotus-fountain/site/datacap.html new file mode 100644 index 00000000000..7434b8e48e4 --- /dev/null +++ b/cmd/lotus-fountain/site/datacap.html @@ -0,0 +1,41 @@ + + + + Grant DataCap - Lotus Fountain + + + + + + +
+
+
+

Grant datacap

+

Please input your address to receive a data cap on the Calibration Testnet.

+
+
+
+ Enter destination address: + + +
+
+
+ + +
+ + diff --git a/cmd/lotus-fountain/site/funds.html b/cmd/lotus-fountain/site/funds.html index c6916239fcd..a09d73964f3 100644 --- a/cmd/lotus-fountain/site/funds.html +++ b/cmd/lotus-fountain/site/funds.html @@ -15,12 +15,13 @@
- [SENDING FUNDS] +

Send funds

+

Please input your address to receive test FIL (tFIL) on the Calibration Testnet. This faucet dispenses 100 tFIL.

Enter destination address: - +