diff --git a/Makefile b/Makefile index f25df5611..fbb82b44e 100755 --- a/Makefile +++ b/Makefile @@ -21,6 +21,7 @@ deps: compile: REBAR_CONFIG="config/grpc_client_gen_local.config" $(REBAR) grpc gen + REBAR_CONFIG="config/grpc_client_gen.config" $(REBAR) grpc gen $(MAKE) external_svcs $(REBAR) compile @@ -72,6 +73,7 @@ devrelease: grpc: @echo "generating miner grpc services" REBAR_CONFIG="config/grpc_client_gen_local.config" $(REBAR) grpc gen + REBAR_CONFIG="config/grpc_client_gen.config" $(REBAR) grpc gen $(GRPC_SERVICE_DIR): @echo "miner grpc service directory $(directory) does not exist" diff --git a/config/docker-testnet.config b/config/docker-testnet.config index b197e6d23..36209b297 100644 --- a/config/docker-testnet.config +++ b/config/docker-testnet.config @@ -21,6 +21,14 @@ {miner, [ {network, testnet}, + {mode, gateway}, + {gateways_run_chain, false}, + {seed_validators, [ + {"1ZPNnNd9k5qiQXXigKifQpCPiy5HTbszQDSyLM56ywk7ihNRvt6", "18.223.171.149", 8080}, %% test-val2 + {"1ZYe21WzqJGkWjXvyEt2c8ALSrufPfjzqfQP2SGy61UJd2h9EbL", "3.17.164.253", 8080}, %% test-val3 + {"1ZAxCrEsigGVbLUM37Jki6p88kyZ5NVqjVC6oHSbqu49t7bQDym", "18.191.60.231", 8080} %% test-val6 + ] + }, {api_base_url, "https://testnet-api.helium.wtf/v1"}, {jsonrpc_ip, {0,0,0,0}}, %% bind jsonrpc to host when in docker container {gateway_and_mux_enable, false}, diff --git a/config/grpc_client_gen.config b/config/grpc_client_gen.config new file mode 100644 index 000000000..7a97923a2 --- /dev/null +++ b/config/grpc_client_gen.config @@ -0,0 +1,31 @@ +{plugins, [ + {grpcbox_plugin, + {git, "https://github.com/andymck/grpcbox_plugin.git", + {branch, "andymck/ts-master/combined-opts-and-template-changes"}}} +]}. + +{grpc, [ + {proto_files, [ + "_build/default/lib/helium_proto/src/service/gateway.proto" + ]}, + {beam_out_dir, "src/grpc/autogen/client"}, + {out_dir, "src/grpc/autogen/client"}, + {keep_beams, false}, + {create_services, false}, + {override_gpb_defaults, true}, + {gpb_opts, [ + {rename,{msg_fqname,base_name}}, + use_packages, + {report_errors, false}, + {descriptor, false}, + {recursive, false}, + {i, "_build/default/lib/helium_proto/src"}, + {o, "src/grpc/autogen/client"}, + {module_name_prefix, ""}, + {module_name_suffix, "_miner_client_pb"}, + {rename, {msg_name, {suffix, "_pb"}}}, + {strings_as_binaries, false}, + type_specs, + {defs_as_proplists, true} + ]} +]}. diff --git a/config/sys.config b/config/sys.config index 159cf7e64..8495ef46a 100644 --- a/config/sys.config +++ b/config/sys.config @@ -93,6 +93,10 @@ {log_file_time_to_roll, 86400} %% rotate logs once a day ]} ]}, + {sibyl, + [ + {validator_ignore_list, []} + ]}, {miner, [ {denylist_keys, ["1SbEYKju337P6aYsRd9DT2k4qgK5ZK62kXbSvnJgqeaxK3hqQrYURZjL"]}, @@ -101,6 +105,7 @@ {jsonrpc_ip, {127,0,0,1}}, %% bind JSONRPC to localhost only {jsonrpc_port, 4467}, {mode, gateway}, + {gateways_run_chain, true}, %% if false, gateways will no longer follow the chain {use_ebus, true}, {batch_size, 2500}, {curve, 'SS512'}, @@ -115,6 +120,13 @@ {default_routers, ["/p2p/11w77YQLhgUt8HUJrMtntGGr97RyXmot1ofs5Ct2ELTmbFoYsQa","/p2p/11afuQSrmk52mgxLu91AdtDXbJ9wmqWBUxC3hvjejoXkxEZfPvY"]}, {mark_mods, [miner_hbbft_handler]}, {stabilization_period, 50000}, + {seed_validators, [ + {"11tk4zzbyfMPYYHYda255ACoqfYFVdrUSoCWrCYfn8BoyuYrERK", "52.49.199.40", 8080}, %% ireland + {"115PmCR6fpFihdjw626JXYdUEdzwjh66yoWzWkMvB9CRGEx1U6G", "3.132.190.192", 8080}, %% ohio + {"11pUovhssQdXzrfcYMTUrNNTQossgny8WqhfdbprrAVFyHcmvAN", "35.84.173.125", 8080}, %% oregon + {"11yJXQPG9deHqvw2ac6VWtNP7gZj8X3t3Qb3Gqm9j729p4AsdaA", "3.38.70.101", 8080}, %% seoul + {"11Gx2yPEmBGUrbHUiUWQs9vV7JDHQLZSddQs6e3WB2uvqSMUDBW", "54.251.77.229", 8080} %% singapore + ]}, {reg_domains_file, "countries_reg_domains.csv"}, {frequency_data, #{'US915' => [903.9, 904.1, 904.3, 904.5, 904.7, 904.9, 905.1, 905.3], 'EU868' => [867.1, 867.3, 867.5, 867.7, 867.9, 868.1, 868.3, 868.5], diff --git a/config/test.config b/config/test.config index f7c4b0ac8..a178b1f23 100644 --- a/config/test.config +++ b/config/test.config @@ -8,6 +8,11 @@ {libp2p, [ {use_dns_for_seeds, false} ]}, +{sibyl, + [ + {poc_mgr_mod, miner_poc_mgr}, + {poc_report_handler, miner_poc_report_handler} + ]}, {blockchain, [ {seed_dns_cname, ""}, diff --git a/config/test_val.config.src b/config/test_val.config.src index 0c9e3aeec..b2945a8e3 100644 --- a/config/test_val.config.src +++ b/config/test_val.config.src @@ -52,6 +52,12 @@ %% as without one miner_lora is not started %% including the params anyway in case someone needs it in this env {region_override, 'US915'}, - {gateway_and_mux_enable, false} + {gateway_and_mux_enable, false}, + {seed_validators, [ + {"1ZPNnNd9k5qiQXXigKifQpCPiy5HTbszQDSyLM56ywk7ihNRvt6", "18.223.171.149", 8080}, %% test-val2 + {"1ZYe21WzqJGkWjXvyEt2c8ALSrufPfjzqfQP2SGy61UJd2h9EbL", "3.17.164.253", 8080}, %% test-val3 + {"1ZAxCrEsigGVbLUM37Jki6p88kyZ5NVqjVC6oHSbqu49t7bQDym", "18.191.60.231", 8080} %% test-val6 + ] + } ]} ]. diff --git a/rebar.config b/rebar.config index d222e70a7..62483d276 100644 --- a/rebar.config +++ b/rebar.config @@ -10,9 +10,9 @@ {deps, [ {blockchain, {git, "https://github.com/helium/blockchain-core.git", - {branch, "master"}}}, + {branch, "andymck/poc-grpc-v2"}}}, {sibyl, {git, "https://github.com/helium/sibyl.git", - {branch, "master"}}}, + {branch, "andymck/poc-grpc"}}}, {hbbft, {git, "https://github.com/helium/erlang-hbbft.git", {branch, "master"}}}, {dkg, {git, "https://github.com/helium/erlang-dkg.git", {branch, "master"}}}, @@ -21,8 +21,10 @@ {jsx, "3.1.0"}, {kvc, {git, "https://github.com/etrepum/kvc", {tag, "v1.7.0"}}}, {longfi, {git, "https://github.com/helium/longfi-erlang", {tag, "0.2.2"}}}, + {grpc_lib, {git, "https://github.com/Bluehouse-Technology/grpc_lib", {branch, "master"}}}, {grpc_client, {git, "https://github.com/Bluehouse-Technology/grpc_client.git", {branch, "master"}}}, - {http2_client, {git, "https://github.com/Bluehouse-Technology/http2_client", {branch, "master"}}}, + {http2_client, {git, "https://github.com/Bluehouse-Technology/http2_client", + {branch, "master"}}}, recon, {elli, "3.3.0"}, {jsonrpc2, {git, "https://github.com/zuiderkwast/jsonrpc2-erlang", @@ -33,14 +35,14 @@ {xref_checks, [ undefined_function_calls, - undefined_functions, + undefined_functions %locals_not_used, - deprecated_function_calls, - deprecated_functions + %deprecated_function_calls, + %deprecated_functions ]}. {plugins, [ - {grpcbox_plugin, {git, "https://github.com/andymck/grpcbox_plugin.git", {branch, "andymck/ts-master"}}}, + {grpcbox_plugin, {git, "https://github.com/andymck/grpcbox_plugin.git", {branch, "andymck/ts-master/combined-opts-and-template-changes"}}}, {rebar3_gpb_plugin, "2.15.0"}, {rebar3_eqc, "1.3.0"} ]}. diff --git a/rebar.lock b/rebar.lock index d3713ebfe..f83e38b39 100644 --- a/rebar.lock +++ b/rebar.lock @@ -5,7 +5,7 @@ {<<"base64url">>,{pkg,<<"base64url">>,<<"1.0.1">>},1}, {<<"blockchain">>, {git,"https://github.com/helium/blockchain-core.git", - {ref,"943339691c1505c8a40ebaefd2046b29902c160f"}}, + {ref,"5881ad571903c1908fbbaed396a59bc7efa505ce"}}, 0}, {<<"certifi">>,{pkg,<<"certifi">>,<<"2.8.0">>},2}, {<<"chatterbox">>, @@ -82,7 +82,7 @@ 1}, {<<"getopt">>,{pkg,<<"getopt">>,<<"1.0.1">>},3}, {<<"goldrush">>,{pkg,<<"goldrush">>,<<"0.1.9">>},2}, - {<<"gpb">>,{pkg,<<"gpb">>,<<"4.19.2">>},2}, + {<<"gpb">>,{pkg,<<"gpb">>,<<"4.19.2">>},1}, {<<"gproc">>,{pkg,<<"gproc">>,<<"0.8.0">>},2}, {<<"grpc_client">>, {git,"https://github.com/Bluehouse-Technology/grpc_client.git", @@ -91,7 +91,7 @@ {<<"grpc_lib">>, {git,"https://github.com/Bluehouse-Technology/grpc_lib", {ref,"a77686b55b60b052d6c7cd927d04dde429bbdabf"}}, - 1}, + 0}, {<<"grpcbox">>, {git,"https://github.com/andymck/grpcbox.git", {ref,"fbf689bb9c25fc2943155c891974e1f745ce5ac7"}}, @@ -107,7 +107,7 @@ 0}, {<<"helium_proto">>, {git,"https://github.com/helium/proto.git", - {ref,"30f17c5d1a7942297923f4e743c681c46f917fc3"}}, + {ref,"f743a80e534bdc78805e3c5438cb466bec3c0b6f"}}, 1}, {<<"hpack">>,{pkg,<<"hpack_erl">>,<<"0.2.3">>},3}, {<<"http2_client">>, @@ -176,7 +176,7 @@ 3}, {<<"sibyl">>, {git,"https://github.com/helium/sibyl.git", - {ref,"4d16f60fba28eafd70e256976cbe3644911ac16b"}}, + {ref,"a5a86c9441a0db5e0db136af47fa09fb0ae50c02"}}, 0}, {<<"sidejob">>,{pkg,<<"sidejob">>,<<"2.1.0">>},2}, {<<"small_ints">>,{pkg,<<"small_ints">>,<<"0.1.0">>},4}, diff --git a/src/cli/miner_cli_info.erl b/src/cli/miner_cli_info.erl index 7bbd78936..d024702b6 100644 --- a/src/cli/miner_cli_info.erl +++ b/src/cli/miner_cli_info.erl @@ -223,7 +223,8 @@ info_region_usage() -> ]. info_region(["info", "region"], [], []) -> - case miner_lora:region() of + LoraMod = application:get_env(miner, lora_mod, miner_lora), + case LoraMod:region() of {ok, undefined} -> {exit_status, 1, [clique_status:text("undefined")]}; {ok, Region} -> @@ -271,7 +272,7 @@ info_onboarding(["info", "onboarding"], [], Flags) -> ProvidedKey end, PayerOutputOnly = proplists:is_defined(just_payer, Flags), - + case OnboardingKey of undefined -> error_message("This miner has no onboarding key, no onboarding info available."); @@ -293,7 +294,7 @@ info_onboarding(["info", "onboarding"], [], Flags) -> %% -spec onboarding_info_for_key(string()) -> {ok, {map(), map()}} | notfound | {error, non_neg_integer()}. onboarding_info_for_key(OnboardingKey) -> - Url = ?ONBOARDING_API_URL_BASE ++ "/hotspots/" ++ OnboardingKey, + Url = ?ONBOARDING_API_URL_BASE ++ "/hotspots/" ++ OnboardingKey, case get_api_json_as_map(Url) of {ok, OnboardingResult} -> OnboardingData = maps:get(<<"data">>, OnboardingResult), @@ -308,7 +309,7 @@ onboarding_info_for_key(OnboardingKey) -> -spec clique_status_for_onboarding_info({map(), map()}, boolean()) -> list(). clique_status_for_onboarding_info({MinerData, MakerData}, PayerOutputOnly) -> case PayerOutputOnly of - true -> + true -> PayerAddress = maps:get(<<"address">>, MakerData), PayerAddressString = binary_to_list(PayerAddress), [ clique_status:text(PayerAddressString) ]; diff --git a/src/handlers/miner_discovery_handler.erl b/src/handlers/miner_discovery_handler.erl index ca59efa4d..c1e7e0575 100644 --- a/src/handlers/miner_discovery_handler.erl +++ b/src/handlers/miner_discovery_handler.erl @@ -70,7 +70,8 @@ handle_data(server, Data, State) -> Sig ]); true -> - case miner_lora:location_ok() of + LoraMod = application:get_env(miner, lora_mod, miner_lora), + case LoraMod:location_ok() of true -> miner_discovery_worker:start(Packets); false -> diff --git a/src/handlers/miner_hbbft_handler.erl b/src/handlers/miner_hbbft_handler.erl index 4cb573b58..1d04fc7cb 100644 --- a/src/handlers/miner_hbbft_handler.erl +++ b/src/handlers/miner_hbbft_handler.erl @@ -61,6 +61,8 @@ %% TODO No need to pass Meta when tuple. Use sum type: {map, Meta} | tuple metadata(Version, Meta, Chain) -> {ok, HeadHash} = blockchain:head_hash(Chain), + Ledger = blockchain:ledger(Chain), + {ok, N} = blockchain:config(?num_consensus_members, Ledger), %% construct a 2-tuple of the system time and the current head block hash as our stamp data case Version of tuple -> @@ -93,7 +95,37 @@ metadata(Version, Meta, Chain) -> lager:info("no snapshot interval configured"), ChainMeta0 end, - t2b(maps:merge(Meta, ChainMeta)) + ChainMeta1 = + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + %% generate a set of ephemeral keys for POC usage + %% the hashes of the public keys are added to metadata + ChallengeRate = + case blockchain:config(?poc_challenge_rate, Ledger) of + {ok, CR} -> CR; + _ -> 1 + end, + lager:debug("poc challenge rate ~p", [ChallengeRate] ), + %% if a val is in the ignore list then dont generate poc keys for it + %% TODO: this is a temp hack. remove when testing finished + IgnoreVals = application:get_env(sibyl, validator_ignore_list, []), + SelfPubKeyBin = blockchain_swarm:pubkey_bin(), + case not lists:member(SelfPubKeyBin, IgnoreVals) of + true -> + {EmpKeys, EmpKeyHashes} = generate_ephemeral_keys(N, ChallengeRate), + lager:debug("poc ephemeral keys ~p", [EmpKeys]), + lager:debug("node ~p generating poc ephemeral key hashes ~p", [SelfPubKeyBin, EmpKeyHashes]), + ok = miner_poc_mgr:save_poc_keys(Height, EmpKeys), + maps:put(poc_keys, {SelfPubKeyBin, EmpKeyHashes}, ChainMeta); + false -> + ChainMeta + end; + _ -> + ChainMeta + + end, + lager:info("ChainMeta1 ~p", [ChainMeta1]), + t2b(maps:merge(Meta, ChainMeta1)) end. init([Members, Id, N, F, BatchSize, SK, Chain]) -> @@ -119,7 +151,8 @@ init([Members, Id, N, F, BatchSize, SK, Chain, Round, Buf]) -> signatures_required = N - F, hbbft = HBBFT, swarm_keys = {MyPubKey, SignFun}, % For re-signing on var-autoskip - chain = Chain1}}. + chain = Chain1 + }}. handle_command(start_acs, State) -> case hbbft:start_on_demand(State#state.hbbft) of @@ -821,6 +854,18 @@ bin_to_msg(<>) -> {error, truncated} end. +-spec generate_ephemeral_keys(pos_integer(), pos_integer()) ->{[#{secret => libp2p_crypto:privkey(), public => libp2p_crypto:pubkey()}], [binary()]}. +generate_ephemeral_keys(N, ChallengeRate) -> + NumKeys = max(1, trunc(ChallengeRate / (((N-1)/3) * 2 ))), + lists:foldl( + fun(_N, {AccKeys, AccHashes})-> + Keys = libp2p_crypto:generate_keys(ecc_compact), + #{public := OnionCompactKey} = Keys, + OnionHash = crypto:hash(sha256, libp2p_crypto:pubkey_to_bin(OnionCompactKey)), + {[Keys | AccKeys], [OnionHash | AccHashes]} + end, + {[], []}, lists:seq(1, NumKeys)). + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/src/jsonrpc/miner_jsonrpc_info.erl b/src/jsonrpc/miner_jsonrpc_info.erl index fbedad78f..0b6b3cc43 100644 --- a/src/jsonrpc/miner_jsonrpc_info.erl +++ b/src/jsonrpc/miner_jsonrpc_info.erl @@ -45,8 +45,9 @@ handle_rpc(<<"info_p2p_status">>, []) -> height => ?TO_VALUE(list_to_integer(Height)) }; handle_rpc(<<"info_region">>, []) -> + LoraMod = application:get_env(miner, lora_mod, miner_lora), R = - case miner_lora:region() of + case LoraMod:region() of {ok, undefined} -> null; {ok, Region} -> atom_to_binary(Region, utf8) end, diff --git a/src/miner.erl b/src/miner.erl index fc21290bf..ebe6b2dba 100644 --- a/src/miner.erl +++ b/src/miner.erl @@ -52,7 +52,8 @@ seen => binary(), bba_completion => binary(), head_hash => blockchain_block:hash(), - snapshot_hash => binary() + snapshot_hash => binary(), + poc_onion_keys => list() }. -type metadata() :: @@ -329,7 +330,8 @@ version() -> %% ------------------------------------------------------------------ init(_Args) -> - lager:info("STARTING UP MINER"), + Mode = application:get_env(miner, mode), + lager:info("STARTING UP MINER with mode ~p", [Mode]), ok = blockchain_event:add_handler(self()), %% TODO: Maybe put this somewhere else? ok = miner_discovery_handler:add_stream_handler(blockchain_swarm:tid()), @@ -574,6 +576,13 @@ create_block(Metadata, Txns, HBBFTRound, Chain, VotesNeeded, {MyPubKey, SignFun} HeightNext = HeightCurr + 1, Ledger = blockchain:ledger(Chain), SnapshotHash = snapshot_hash(Ledger, HeightNext, Metadata, VotesNeeded), + POCKeys = + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + poc_keys(Ledger, Metadata, CurrentBlockHash); + _ -> + [] + end, SeenBBAs = [{{J, S}, B} || {J, #{seen := S, bba_completion := B}} <- metadata_only_v2(Metadata)], {SeenVectors, BBAs} = lists:unzip(SeenBBAs), @@ -602,7 +611,8 @@ create_block(Metadata, Txns, HBBFTRound, Chain, VotesNeeded, {MyPubKey, SignFun} epoch_start => EpochStart, seen_votes => SeenVectors, bba_completion => BBA, - snapshot_hash => SnapshotHash + snapshot_hash => SnapshotHash, + poc_keys => POCKeys }), BinNewBlock = blockchain_block:serialize(NewBlock), Signature = SignFun(BinNewBlock), @@ -739,6 +749,34 @@ snapshot_hash(Ledger, BlockHeightNext, Metadata, VotesNeeded) -> <<>> end. +-spec poc_keys(L, M, B) -> [] + when L :: blockchain_ledger_v1:ledger(), + M :: metadata(), + B :: blockchain_block:hash(). +poc_keys(Ledger, Metadata, BlockHash) -> + %% Construct a set of poc keys. Each node will define its own set within the metadata + %% We want to take a deterministic random subset of these up to a max of poc challenge rate + %% Use the blockhash as the seed + RandState = blockchain_utils:rand_state(BlockHash), + ChallengeRate = + case blockchain:config(?poc_challenge_rate, Ledger) of + {ok, V} -> V; + _ -> 1 + end, + PocKeys0 = [{MinerAddr, Keys} || {_, #{poc_keys := {MinerAddr, Keys}}} <- metadata_only_v2(Metadata)], + {ok, CGMembers} = blockchain_ledger_v1:consensus_members(Ledger), + PocKeys1 = lists:foldl( + fun({MinerAddr, PocKeys}, Acc)-> + Pos = miner_util:index_of(MinerAddr, CGMembers), + NormalisedKeys = lists:map(fun(PocKey) -> {Pos, PocKey} end, PocKeys), + [NormalisedKeys | Acc] + end, [], PocKeys0), + sort_and_truncate_poc_keys(lists:flatten(PocKeys1), ChallengeRate, RandState). + +sort_and_truncate_poc_keys(L, MaxKeys, RandState) -> + {_, TruncList} = blockchain_utils:deterministic_subset(MaxKeys, RandState, L), + TruncList. + -spec common_enough_or_default(non_neg_integer(), [X], X) -> X. common_enough_or_default(_, [], Default) -> Default; diff --git a/src/miner_consensus_mgr.erl b/src/miner_consensus_mgr.erl index 58e1a08d8..c3a6906d4 100644 --- a/src/miner_consensus_mgr.erl +++ b/src/miner_consensus_mgr.erl @@ -177,6 +177,7 @@ einfo() -> %%%=================================================================== init(_Args) -> + lager:info("Starting consensus_mgr", []), ok = blockchain_event:add_handler(self()), erlang:send_after(timer:seconds(1), self(), monitor_miner), case blockchain_worker:blockchain() of diff --git a/src/miner_critical_sup.erl b/src/miner_critical_sup.erl index 66a48cc2d..30e7879d7 100644 --- a/src/miner_critical_sup.erl +++ b/src/miner_critical_sup.erl @@ -74,6 +74,17 @@ init(_Opts) -> %% downlink packets from state channels go here application:set_env(blockchain, sc_client_handler, miner_lora), + %% if POCs are over grpc and we are a gateway then dont start the chain + GatewaysRunChain = application:get_env(miner, gateways_run_chain, true), + MinerMode = application:get_env(miner, mode, gateway), + case {MinerMode, GatewaysRunChain} of + {gateway, false} -> + lager:info("grpc gateway, not loading chain"), + application:set_env(blockchain, autoload, false); + _ -> + ok + end, + BlockchainOpts = [ {key, {PublicKey, SigFun, ECDHFun}}, {seed_nodes, SeedNodes ++ SeedAddresses}, diff --git a/src/miner_discovery_worker.erl b/src/miner_discovery_worker.erl index 7b32b7c65..1f09cbcf3 100644 --- a/src/miner_discovery_worker.erl +++ b/src/miner_discovery_worker.erl @@ -46,7 +46,8 @@ init([Packets]) -> length(Packets), ?DEFAULT_TRANSMIT_DELAY_MS ]), - {ok, Region} = miner_lora:region(), + LoraMod = application:get_env(miner, lora_mod, miner_lora), + {ok, Region} = LoraMod:region(), TxPower = tx_power(Region), timer:send_after(?DEFAULT_TRANSMIT_DELAY_MS, self(), tick), {ok, #state{ @@ -75,7 +76,8 @@ handle_info( lists:nth(rand:uniform(length(FreqList)), FreqList) end, Spreading = spreading(Region, byte_size(Packet)), - case miner_lora:send_poc( + LoraMod = application:get_env(miner, lora_mod, miner_lora), + case LoraMod:send_poc( Packet, immediate, ChannelSelectorFun, diff --git a/src/miner_hbbft_sidecar.erl b/src/miner_hbbft_sidecar.erl index 0496d3bfb..51be803ef 100644 --- a/src/miner_hbbft_sidecar.erl +++ b/src/miner_hbbft_sidecar.erl @@ -20,8 +20,15 @@ -define(SERVER, ?MODULE). +-ifdef(TEST). +-define(SlowTxns, #{blockchain_txn_poc_receipts_v1 => 10000, + blockchain_txn_poc_receipts_v2 => 10000, + blockchain_txn_consensus_group_v1 => 30000}). +-else. -define(SlowTxns, #{blockchain_txn_poc_receipts_v1 => 75, + blockchain_txn_poc_receipts_v2 => 75, blockchain_txn_consensus_group_v1 => 30000}). +-endif. %% txns that do not appear naturally -define(InvalidTxns, [blockchain_txn_reward_v1, blockchain_txn_reward_v2]). diff --git a/src/miner_jsonrpc_handler.erl b/src/miner_jsonrpc_handler.erl index 7d47486ca..f5f4972e0 100644 --- a/src/miner_jsonrpc_handler.erl +++ b/src/miner_jsonrpc_handler.erl @@ -214,9 +214,9 @@ to_value(X) -> iolist_to_binary(io_lib:format("~p", [X])). ensure_binary_map(M) -> maps:fold(fun(K, V, Acc) -> - BinK = to_key(K), - BinV = to_value(V), - Acc#{BinK => BinV} + BinK = to_key(K), + BinV = to_value(V), + Acc#{BinK => BinV} end, #{}, M). jsonrpc_maybe(undefined) -> <<"undefined">>; diff --git a/src/miner_lora.erl b/src/miner_lora.erl index 46ff2f5af..659630c74 100644 --- a/src/miner_lora.erl +++ b/src/miner_lora.erl @@ -41,7 +41,7 @@ }). -record(state, { - socket, + socket = undefined :: undefined | port(), gateways = #{}, %% keyed by MAC packet_timers = #{}, %% keyed by token sig_fun, @@ -54,7 +54,11 @@ reg_throttle = undefined :: undefined | miner_lora_throttle:handle(), last_tmst_us = undefined :: undefined | integer(), % last concentrator tmst reported by the packet forwarder last_mono_us = undefined :: undefined | integer(), % last local monotonic timestamp taken when packet forwarder reported last tmst - chain = undefined :: undefined | blockchain:blockchain() + chain = undefined :: undefined | blockchain:blockchain(), + radio_udp_bind_ip, + radio_udp_bind_port, + cur_poc_challenger_type = undefined :: undefined | validator, + following_chain = true :: boolean() | undefined }). -record(country, { @@ -236,24 +240,15 @@ init(Args) -> lager:info("init with args ~p", [Args]), UDPIP = maps:get(radio_udp_bind_ip, Args), UDPPort = maps:get(radio_udp_bind_port, Args), - {ok, Socket} = gen_udp:open(UDPPort, [binary, {reuseaddr, true}, {active, 100}, {ip, UDPIP}]), - MirrorSocket = case application:get_env(miner, radio_mirror_port, undefined) of - undefined -> - undefined; - P -> - {ok, S} = gen_udp:open(P, [binary, {active, true}]), - S - end, %% cloud/miner pro will never assert location and so we dont use regulatory domain checks for these miners %% instead they will supply a region value, use this if it exists {RegDomainConfirmed, DefaultRegRegion, DefaultRegFreqList} = case maps:get(region_override, Args, undefined) of undefined -> - %% not overriding domain checks, so initialize with source data and defaults + %% not overriding domain checks, so initialize with source dahandle_info(reg_domain_timeoutta and defaults ets:new(?COUNTRY_FREQ_DATA, [named_table, public]), ok = init_ets(), - erlang:send_after(5000, self(), reg_domain_timeout), {false, undefined, undefined}; Region -> lager:info("using region specifed in config: ~p", [Region]), @@ -269,24 +264,17 @@ init(Args) -> end end, - S0 = #state{socket=Socket, - sig_fun = maps:get(sig_fun, Args), - mirror_socket = {MirrorSocket, undefined}, + S0 = #state{sig_fun = maps:get(sig_fun, Args), pubkey_bin = blockchain_swarm:pubkey_bin(), reg_domain_confirmed = RegDomainConfirmed, reg_region = DefaultRegRegion, reg_freq_list = DefaultRegFreqList, - reg_throttle=miner_lora_throttle:new(DefaultRegRegion) + reg_throttle=miner_lora_throttle:new(DefaultRegRegion), + radio_udp_bind_ip = UDPIP, + radio_udp_bind_port = UDPPort }, - - case blockchain_worker:blockchain() of - undefined -> - erlang:send_after(500, self(), chain_check), - {ok, S0}; - Chain -> - ok = blockchain_event:add_handler(self()), - {ok, update_state_using_chain(Chain, S0)} - end. + erlang:send_after(500, self(), init), + {ok, S0}. -spec update_state_using_chain(Chain :: blockchain_worker:blockchain(), InputState :: state()) -> state(). @@ -304,6 +292,8 @@ handle_call({send, Payload, When, ChannelSelectorFun, DataRate, Power, IPol, Hlm {error, _}=Error -> {reply, Error, State}; {ok, State1} -> {noreply, State1} end; +handle_call(port, _From, State = #state{socket = undefined}) -> + {reply, {error, no_socket}, State}; handle_call(port, _From, State) -> {reply, inet:port(State#state.socket), State}; handle_call(position, _From, #state{latlong = undefined} = State) -> @@ -347,33 +337,67 @@ handle_cast(_Msg, State) -> lager:warning("rcvd unknown cast msg: ~p", [_Msg]), {noreply, State}. -handle_info(chain_check, State) -> +handle_info(init, State = #state{following_chain = false}) -> + %% if we are not following chain then assume validators running POC challenges and thus + %% the alternative module 'miner_lora_light" will handle lora packets + %% just need to set required env vars here + application:set_env(miner, lora_mod, miner_lora_light), + application:set_env(miner, onion_server_mod, miner_onion_server_light), + {noreply, State}; +handle_info(init, State = #state{radio_udp_bind_ip = UDPIP, radio_udp_bind_port = UDPPort}) -> case blockchain_worker:blockchain() of undefined -> - erlang:send_after(500, self(), chain_check), + erlang:send_after(500, self(), init), {noreply, State}; Chain -> ok = blockchain_event:add_handler(self()), - {noreply, update_state_using_chain(Chain, State)} + erlang:send_after(500, self(), reg_domain_timeout), + Ledger = blockchain:ledger(Chain), + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + %% we are in validator POC mode, dont open a socket + %% instead let the alternative module 'miner_lora_light' take it + %% and have it handle lora packets + application:set_env(miner, lora_mod, miner_lora_light), + application:set_env(miner, onion_server_mod, miner_onion_server_light), + {noreply, State#state{cur_poc_challenger_type = validator}}; + NonValidatorChallenger -> + %% we are not in validator POC mode, so open a socket as normal + %% this module will handle lora packets + application:set_env(miner, lora_mod, miner_lora), + application:set_env(miner, onion_server_mod, miner_onion_server), + {ok, Socket, MirrorSocket} = open_socket(UDPIP, UDPPort), + {noreply, update_state_using_chain(Chain, State#state{cur_poc_challenger_type = NonValidatorChallenger, socket=Socket, mirror_socket = {MirrorSocket, undefined}})} + end end; handle_info({blockchain_event, {new_chain, NC}}, State) -> {noreply, update_state_using_chain(NC, State)}; -handle_info({blockchain_event, {add_block, Hash, _Sync, _Ledger}}, - #state{chain=Chain}=State) when Chain /= undefined -> +handle_info({blockchain_event, {add_block, Hash, _Sync, Ledger}}, + #state{chain=Chain, cur_poc_challenger_type = CurPoCChallengerType}=State) when Chain /= undefined -> {ok, Block} = blockchain:get_block(Hash, Chain), Predicate = fun(T) -> blockchain_txn:type(T) == blockchain_txn_vars_v1 end, case blockchain_utils:find_txn(Block, Predicate) of Txs when length(Txs) > 0 -> - %% Resend the timeout for regulatory domain - self() ! reg_domain_timeout; + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, V} when V /= CurPoCChallengerType -> + %% the poc challenger chain var has been modified, force this server + %% to restart and recheck if it can still bind to the lora port + %% in addition restart the grpc client so that we start afresh + _ = miner_poc_grpc_client_statem:stop(), + {stop, force_restart, State}; + _ -> + {noreply, State} + end; _ -> - ok - end, - {noreply, State}; + {noreply, State} + end; handle_info(reg_domain_timeout, #state{chain=undefined} = State) -> %% There is no chain, we cannot lookup regulatory domain data yet %% Keep waiting for chain - erlang:send_after(500, self(), chain_check), + erlang:send_after(500, self(), init), + {noreply, State}; +handle_info(reg_domain_timeout, #state{cur_poc_challenger_type=validator} = State) -> + %% validators are issuing POCs, so miner_lora_light will be in use...do nothing {noreply, State}; handle_info(reg_domain_timeout, #state{reg_domain_confirmed=false, pubkey_bin=Addr, chain=Chain} = State) -> lager:info("checking regulatory domain for address ~p", [Addr]), @@ -409,14 +433,14 @@ handle_info({udp, Socket, IP, Port, _Packet}, #state{mirror_socket={Socket, _}}= lager:info("received mirror port connection from ~p ~p", [IP, Port]), {noreply, State#state{mirror_socket={Socket, {IP, Port}}}}; handle_info(_Msg, State) -> - lager:warning("rcvd unknown info msg: ~p", [_Msg]), + lager:debug("rcvd unknown info msg: ~p", [_Msg]), {noreply, State}. code_change(_OldVsn, State, _Extra) -> {ok, State}. terminate(_Reason, #state{socket=Socket}) -> - gen_udp:close(Socket), + catch gen_udp:close(Socket), ok. %% ------------------------------------------------------------------ @@ -830,7 +854,7 @@ packet_rssi(Packet, UseRSSIS) -> BestRSSISelector = fun (Obj, Best) -> erlang:max(Best, FetchRSSI(Obj)) - end, + end, [H|T] = maps:get(<<"rsig">>, Packet), lists:foldl(BestRSSISelector, FetchRSSI(H), T); %% GWMP V1 @@ -1008,17 +1032,29 @@ maybe_update_reg_data(#state{pubkey_bin=Addr} = State) -> } end. - -spec reg_region(State :: state()) -> atom(). reg_region(State) -> State#state.reg_region. +-spec open_socket(string(), pos_integer()) -> {ok, port(), port()}. +open_socket(IP, Port) -> + {ok, Socket} = gen_udp:open(Port, [binary, {reuseaddr, true}, {active, 100}, {ip, IP}]), + MirrorSocket = + case application:get_env(miner, radio_mirror_port, undefined) of + undefined -> + undefined; + P -> + {ok, MS} = gen_udp:open(P, [binary, {active, true}]), + MS + end, + {ok, Socket, MirrorSocket}. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). rssi_fetch_test() -> PacketWithRSSIS = #{ - <<"rssis">> => 1, + <<"rssis">> => 1, <<"rssi">> => 2 }, PacketWithoutRSSIS = #{ @@ -1046,5 +1082,5 @@ rssi_fetch_test() -> ?assertEqual(packet_rssi(RSIGPacketWithRSSIS, false), 4), ?assertEqual(packet_rssi(RSIGPacketWithoutRSSIS, true), 4), ?assertEqual(packet_rssi(RSIGPacketWithoutRSSIS, false), 4). - --endif. + +-endif. \ No newline at end of file diff --git a/src/miner_lora_light.erl b/src/miner_lora_light.erl new file mode 100644 index 000000000..eb5a2e005 --- /dev/null +++ b/src/miner_lora_light.erl @@ -0,0 +1,796 @@ +%%%------------------------------------------------------------------- +%% @doc +%% == Miner lora for light gateways == +%% no use of chain or ledger +%% @end +%%%------------------------------------------------------------------- +-module(miner_lora_light). + +-include("src/grpc/autogen/client/gateway_miner_client_pb.hrl"). +-behaviour(gen_server). + +-export([ + start_link/1, + handle_response/1, + send/1, + send_poc/5, + port/0, + location_ok/0, + region/0, + region_params_update/2 +]). + +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-include_lib("helium_proto/include/blockchain_state_channel_v1_pb.hrl"). +-include("lora.hrl"). +-include_lib("blockchain/include/blockchain_utils.hrl"). +-include_lib("blockchain/include/blockchain_vars.hrl"). + +-record(gateway, { + mac, + ip, + port, + sent =0, + received = 0, + dropped = 0, + status, + rtt_samples = [], + rtt=5000000 %% in microseconds +}). + +-record(state, { + socket = undefined :: undefined | port(), + gateways = #{}, %% keyed by MAC + packet_timers = #{}, %% keyed by token + sig_fun, + pubkey_bin, + mirror_socket, + latlong, + reg_domain_confirmed = false :: boolean(), + reg_region :: atom(), + reg_region_params :: blockchain_region_param_v1:region_param_v1(), + reg_freq_list :: [float()] | undefined, + reg_throttle = undefined :: undefined | miner_lora_throttle:handle(), + last_tmst_us = undefined :: undefined | integer(), % last concentrator tmst reported by the packet forwarder + last_mono_us = undefined :: undefined | integer(), % last local monotonic timestamp taken when packet forwarder reported last tmst + chain = undefined :: undefined | blockchain:blockchain(), + radio_udp_bind_ip, + radio_udp_bind_port, + cur_poc_challenger_type = undefined :: undefined | validator, + following_chain = true :: undefined | boolean() +}). + +-type state() :: #state{}. +-type gateway() :: #gateway{}. +-type helium_packet() :: #packet_pb{}. + +-define(COUNTRY_FREQ_DATA, country_freq_data). + +%% in meters +-define(MAX_WANDER_DIST, 200). + +%% Maximum `tmst` counter value reported by an SX130x concentrator +%% IC. This is a raw [1] counter value with the following +%% characteristics: +%% +%% - unsigned +%% - counts upwards +%% - 32 bits +%% - increments at 1 MHz +%% +%% [1]: On SX1301 it is a raw value. On SX1302 it is a 32 bit value +%% counting at 32 MHz, but the SX1302 HAL throws away 5 bits to match +%% SX1301's behavior. +%% +%% Equivalent `(2^32)-1` +-define(MAX_TMST_VAL, 4294967295). + +-ifdef(TEST). +-define(REG_DOMAIN_TIMEOUT, 1000). +-else. +-define(REG_DOMAIN_TIMEOUT, 30000). +-endif. + + +%% ------------------------------------------------------------------ +%% API Function Definitions +%% ------------------------------------------------------------------ + +start_link(Args) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []). + +%% @doc used to handle state channel responses +-spec handle_response(blockchain_state_channel_response_v1:response()) -> ok | {error, any()}. +handle_response(Resp) -> + case blockchain_state_channel_response_v1:downlink(Resp) of + undefined -> + ok; + Packet -> + send(Packet) + end. + +-spec send(helium_packet()) -> ok | {error, any()}. +send(#packet_pb{payload=Payload, frequency=Freq, timestamp=When, signal_strength=Power, datarate=DataRate}=Packet) -> + lager:debug("got download packet ~p via freq ~p", [Packet, Freq]), + %% this is used for downlink packets that have been assigned a downlink frequency by the router, so just use the supplied frequency + ChannelSelectorFun = fun(_FreqList) -> Freq end, + gen_server:call(?MODULE, {send, Payload, When, ChannelSelectorFun, DataRate, Power, true, Packet}, 11000). + +-spec send_poc(binary(), any(), function(), iolist(), any()) -> ok | {error, any()} | {warning, any()}. +send_poc(Payload, When, ChannelSelectorFun, DataRate, Power) -> + gen_server:call(?MODULE, {send, Payload, When, ChannelSelectorFun, DataRate, Power, false, undefined}, 11000). + +-spec port() -> {ok, inet:port_number()} | {error, any()}. +port() -> + gen_server:call(?MODULE, port, 11000). + +-spec location_ok() -> true | false. +location_ok() -> + %% this terrible thing is to fake out dialyzer + application:get_env(miner, loc_ok_default, true). + +-spec region_params_update(atom(), [blockchain_region_param_v1:region_param_v1()]) -> ok. +region_params_update(Region, RegionParams) -> + gen_server:cast(?MODULE, {region_params_update, Region, RegionParams}). + +-spec region() -> {ok, atom()}. +region()-> + %% TODO: recalc region if hotspot re-asserts + gen_server:call(?MODULE, region, 5000). + +%% ------------------------------------------------------------------ +%% gen_server Function Definitions +%% ------------------------------------------------------------------ +init(Args) -> + lager:info("init with args ~p", [Args]), + UDPIP = maps:get(radio_udp_bind_ip, Args), + UDPPort = maps:get(radio_udp_bind_port, Args), + GatewaysRunChain = application:get_env(miner, gateways_run_chain, true), + lager:info("gateways_run_chain: ~p", [GatewaysRunChain]), + S0 = #state{pubkey_bin = blockchain_swarm:pubkey_bin(), + reg_domain_confirmed = false, + radio_udp_bind_ip = UDPIP, + radio_udp_bind_port = UDPPort, + following_chain = GatewaysRunChain + }, + erlang:send_after(500, self(), init), + {ok, S0}. + +handle_call({send, _Payload, _When, _ChannelSelectorFun, _DataRate, _Power, _IPol, _HlmPacket}, _From, + #state{reg_domain_confirmed = false}=State) -> + lager:debug("ignoring send request as regulatory domain not yet confirmed", []), + {reply, {error, reg_domain_unconfirmed}, State}; +handle_call({send, Payload, When, ChannelSelectorFun, DataRate, Power, IPol, HlmPacket}, From, State) -> + case send_packet(Payload, When, ChannelSelectorFun, DataRate, Power, IPol, HlmPacket, From, State) of + {error, _}=Error -> {reply, Error, State}; + {ok, State1} -> {noreply, State1} + end; +handle_call(port, _From, State = #state{socket = undefined}) -> + {reply, {error, no_socket}, State}; +handle_call(port, _From, State) -> + {reply, inet:port(State#state.socket), State}; +handle_call(region, _From, #state{reg_region = Region} = State) -> + {reply, {ok, Region}, State}; +handle_call(_Msg, _From, State) -> + lager:warning("rcvd unknown call msg: ~p", [_Msg]), + {reply, ok, State}. + +handle_cast({region_params_update, Region, RegionParams}, State) -> + lager:info("updating region params. Region: ~p, Params: ~p", [Region, RegionParams]), + Throttle = miner_lora_throttle:new(Region), + FreqList = [(blockchain_region_param_v1:channel_frequency(RP) / ?MHzToHzMultiplier) || RP <- RegionParams], + {noreply, State#state{ + reg_region = Region, + reg_region_params = RegionParams, + reg_domain_confirmed = true, + reg_throttle=Throttle, + reg_freq_list = FreqList}}; +handle_cast(_Msg, State) -> + lager:warning("rcvd unknown cast msg: ~p", [_Msg]), + {noreply, State}. + +handle_info(init, State = #state{radio_udp_bind_ip = UDPIP, radio_udp_bind_port = UDPPort, following_chain = false}) -> + %% if we are not following chain then assume validators are running POC challenges and thus + %% this module will handle lora packets and will need to open the port + application:set_env(miner, lora_mod, miner_lora_light), + application:set_env(miner, onion_server_mod, miner_onion_server_light), + {ok, Socket, MirrorSocket} = open_socket(UDPIP, UDPPort), + erlang:send_after(500, self(), reg_domain_timeout), + {noreply, State#state{socket=Socket, mirror_socket = {MirrorSocket, undefined}}}; +handle_info(init, State = #state{radio_udp_bind_ip = UDPIP, radio_udp_bind_port = UDPPort}) -> + case blockchain_worker:blockchain() of + undefined -> + lager:info("failed to find chain, will retry in a bit",[]), + erlang:send_after(500, self(), init), + {noreply, State}; + Chain -> + ok = blockchain_event:add_handler(self()), + Ledger = blockchain:ledger(Chain), + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + lager:debug("poc_challenger_type: ~p", [validator]), + %% we are in validator POC mode, open a socket + %% this module will handle lora packets + application:set_env(miner, lora_mod, miner_lora_light), + application:set_env(miner, onion_server_mod, miner_onion_server_light), + {ok, Socket, MirrorSocket} = open_socket(UDPIP, UDPPort), + {noreply, State#state{chain = Chain, cur_poc_challenger_type = validator, socket=Socket, mirror_socket = {MirrorSocket, undefined}}}; + NonValidatorChallenger -> + lager:debug("poc_challenger_type: ~p", [NonValidatorChallenger]), + %% we are NOT in validator POC mode, dont open a socket + %% instead let the alternative module 'miner_lora' take it + %% and handle lora packets + application:set_env(miner, lora_mod, miner_lora), + application:set_env(miner, onion_server_mod, miner_onion_server), + {noreply, State#state{cur_poc_challenger_type = NonValidatorChallenger}} + end + end; +handle_info({blockchain_event, {new_chain, NC}}, State) -> + {noreply, State#state{chain = NC}}; +handle_info( + {blockchain_event, {add_block, _BlockHash, _Sync, Ledger} = _Event}, + #state{cur_poc_challenger_type = CurPoCChallengerType} = State +)-> + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, V} when V /= CurPoCChallengerType -> + %% the poc challenger chain var has been modified, force this server + %% to restart. It will recheck if it can still bind to the lora port + %% in addition restart the grpc client so that we start afresh + _ = miner_poc_grpc_client_statem:stop(), + {stop, force_restart, State}; + _ -> + {noreply, State} + end; +handle_info({blockchain_event, _}, State) -> + {noreply, State}; + +handle_info({tx_timeout, Token}, #state{packet_timers=Timers}=State) -> + case maps:find(Token, Timers) of + {ok, {send, _Ref, From, _SentAt, _LocalFreq, _TimeOnAir, _HlmPacket}} -> + gen_server:reply(From, {error, timeout}); + error -> + ok + end, + {noreply, State#state{packet_timers=maps:remove(Token, Timers)}}; +handle_info({udp, Socket, IP, Port, Packet}, #state{socket=Socket}=State) -> + RxInstantLocal_us = erlang:monotonic_time(microsecond), + maybe_mirror(State#state.mirror_socket, Packet), + State2 = handle_udp_packet(Packet, IP, Port, RxInstantLocal_us, State), + {noreply, State2}; +handle_info({udp_passive, Socket}, #state{socket=Socket}=State) -> + inet:setopts(Socket, [{active, 100}]), + {noreply, State}; +handle_info({udp, Socket, IP, Port, _Packet}, #state{mirror_socket={Socket, _}}=State) -> + lager:info("received mirror port connection from ~p ~p", [IP, Port]), + {noreply, State#state{mirror_socket={Socket, {IP, Port}}}}; +handle_info(_Msg, State) -> + lager:warning("rcvd unknown info msg: ~p", [_Msg]), + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Reason, #state{socket=Socket}) -> + catch gen_udp:close(Socket), + ok. + +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ + +-spec mk_token(map()) -> binary(). +mk_token(Timers) -> + Token = <<(rand:uniform(65535)):16/integer-unsigned-little>>, + case maps:is_key(Token, Timers) of + true -> mk_token(Timers); + false -> Token + end. + +-spec select_gateway(map()) -> {ok, gateway()} | {error, no_gateways}. +select_gateway(Gateways) -> + %% TODO for a multi-tenant miner we'd have a mapping of swarm keys to + %% 64-bit packet forwarder IDs and, depending on what swarm key this send + %% was directed to, we'd select the appropriate gateway from the map. + case maps:size(Gateways) of + 0 -> + {error, no_gateways}; + _ -> + {ok, erlang:element(2, erlang:hd(maps:to_list(Gateways)))} + end. + +-spec handle_udp_packet(binary(), inet:ip_address(), inet:port_number(), integer(), state()) -> state(). +handle_udp_packet(<>, IP, Port, RxInstantLocal_us, + #state{socket=Socket, gateways=Gateways, + reg_domain_confirmed = RegDomainConfirmed}=State) -> + lager:info("PUSH_DATA ~p from ~p on ~p", [jsx:decode(JSON), MAC, Port]), + Gateway = + case maps:find(MAC, Gateways) of + {ok, #gateway{received=Received}=G} -> + %% We purposely do not update gateway's addr/port + %% here. They should only be updated when handling + %% PULL_DATA, otherwise we may send downlink packets + %% to the wrong place. + G#gateway{received=Received+1}; + error -> + #gateway{mac=MAC, ip=IP, port=Port, received=1} + end, + Packet = <>, + maybe_mirror(State#state.mirror_socket, Packet), + maybe_send_udp_ack(Socket, IP, Port, Packet, RegDomainConfirmed), + handle_json_data(jsx:decode(JSON, [return_maps]), Gateway, RxInstantLocal_us, State); +handle_udp_packet(<>, IP, Port, _RxInstantLocal_us, #state{socket=Socket, gateways=Gateways, + reg_domain_confirmed = RegDomainConfirmed}=State) -> + Packet = <>, + maybe_mirror(State#state.mirror_socket, Packet), + maybe_send_udp_ack(Socket, IP, Port, Packet, RegDomainConfirmed), + lager:info("PULL_DATA from ~p on ~p", [MAC, Port]), + Gateway = + case maps:find(MAC, Gateways) of + {ok, #gateway{received=Received}=G} -> + G#gateway{ip=IP, port=Port, received=Received+1}; + error -> + #gateway{mac=MAC, ip=IP, port=Port, received=1} + end, + State#state{gateways=maps:put(MAC, Gateway, Gateways)}; +handle_udp_packet(<>, _IP, _Port, _RxInstantLocal_us, #state{packet_timers=Timers, reg_throttle=Throttle}=State0) -> + lager:info("TX ack for token ~p ~p", [Token, MaybeJSON]), + case maps:find(Token, Timers) of + {ok, {send, Ref, From, SentAt, LocalFreq, TimeOnAir, _HlmPacket}} when MaybeJSON == <<>> -> %% empty string means success, at least with the semtech reference implementation + _ = erlang:cancel_timer(Ref), + _ = gen_server:reply(From, ok), + State0#state{packet_timers=maps:remove(Token, Timers), + reg_throttle=miner_lora_throttle:track_sent(Throttle, SentAt, LocalFreq, TimeOnAir)}; + {ok, {send, Ref, From, SentAt, LocalFreq, TimeOnAir, HlmPacket}} -> + %% likely some kind of error here + _ = erlang:cancel_timer(Ref), + State1 = State0#state{packet_timers=maps:remove(Token, Timers)}, + {Reply, NewState} = case kvc:path([<<"txpk_ack">>, <<"error">>], jsx:decode(MaybeJSON)) of + <<"NONE">> -> + lager:info("packet sent ok"), + Throttle1 = miner_lora_throttle:track_sent(Throttle, SentAt, LocalFreq, TimeOnAir), + {ok, State1#state{reg_throttle=Throttle1}}; + <<"COLLISION_", _/binary>> -> + %% colliding with a beacon or another packet, check if join2/rx2 is OK + lager:info("collision"), + {{error, collision}, State1}; + <<"TOO_LATE">> -> + lager:info("too late"), + case blockchain_helium_packet_v1:rx2_window(HlmPacket) of + undefined -> lager:warning("No RX2 available"), + {{error, too_late}, State1}; + _ -> retry_with_rx2(HlmPacket, From, State1) + end; + <<"TOO_EARLY">> -> + lager:info("too early"), + case blockchain_helium_packet_v1:rx2_window(HlmPacket) of + undefined -> lager:warning("No RX2 available"), + {{error, too_early}, State1}; + _ -> retry_with_rx2(HlmPacket, From, State1) + end; + <<"TX_FREQ">> -> + %% unmodified 1301 will send this + lager:info("tx frequency not supported"), + {{error, bad_tx_frequency}, State1}; + <<"TX_POWER">> -> + lager:info("tx power not supported"), + {{error, bad_tx_power}, State1}; + <<"GPS_UNLOCKED">> -> + lager:info("transmitting on GPS time not supported because no GPS lock"), + {{error, no_gps_lock}, State1}; + [] -> + %% there was no error, see if there was a warning, which implies we sent the packet + %% but some correction had to be done. + Throttle1 = miner_lora_throttle:track_sent(Throttle, SentAt, LocalFreq, TimeOnAir), + case kvc:path([<<"txpk_ack">>, <<"warn">>], jsx:decode(MaybeJSON)) of + <<"TX_POWER">> -> + %% modified 1301 and unmodified 1302 will send this + {{warning, {tx_power_corrected, kvc:path([<<"txpk_ack">>, <<"value">>], jsx:decode(MaybeJSON))}}, State1#state{reg_throttle=Throttle1}}; + Other -> + {{warning, {unknown, Other}}, State1#state{reg_throttle=Throttle1}} + end; + Error -> + %% any other errors are pretty severe + lager:error("Failure enqueing packet for gateway ~p", [Error]), + {{error, {unknown, Error}}, State1} + end, + gen_server:reply(From, Reply), + NewState; + error -> + State0 + end; +handle_udp_packet(Packet, _IP, _Port, _RxInstantLocal_us, State) -> + lager:info("unhandled udp packet ~p", [Packet]), + State. + +-spec handle_json_data(map(), gateway(), integer(), state()) -> state(). +handle_json_data(#{<<"rxpk">> := Packets} = Map, Gateway, RxInstantLocal_us, State0) -> + State1 = handle_packets(sort_packets(Packets), Gateway, RxInstantLocal_us, State0), + handle_json_data(maps:remove(<<"rxpk">>, Map), Gateway, RxInstantLocal_us, State1); +handle_json_data(#{<<"stat">> := Status} = Map, Gateway0, RxInstantLocal_us, #state{gateways=Gateways}=State) -> + Gateway1 = Gateway0#gateway{status=Status}, + lager:info("got status ~p", [Status]), + lager:info("Gateway ~p", [lager:pr(Gateway1, ?MODULE)]), + Mac = Gateway1#gateway.mac, + State1 = maybe_update_gps(Status, State), + handle_json_data(maps:remove(<<"stat">>, Map), Gateway1, RxInstantLocal_us, + State1#state{gateways=maps:put(Mac, Gateway1, Gateways)}); +handle_json_data(_, _Gateway, _RxInstantLocal_us, State) -> + State. + +%% cache GPS the state with each update. I'm not sure if this will +%% lead to a lot of wander, but I do want to be able to refine if we +%% have a poor quality initial lock. we might want to keep track of +%% server boot time and lock it down after some period of time. +-spec maybe_update_gps(#{}, state()) -> state(). +maybe_update_gps(#{<<"lati">> := Lat, <<"long">> := Long}, State) -> + State#state{latlong = {Lat, Long}}; +maybe_update_gps(_Status, State) -> + State. +maybe_send_udp_ack(_Socket, _IP, _Port, _Packet, false = _RegDomainConfirmed)-> + ok; +maybe_send_udp_ack(Socket, IP, Port, Packet, _RegDomainConfirmed)-> + ok = gen_udp:send(Socket, IP, Port, Packet). + +-spec sort_packets(list()) -> list(). +sort_packets(Packets) -> + lists:sort( + fun(A, B) -> + packet_snr(A) >= packet_snr(B) + end, + Packets + ). + +-spec handle_packets(list(), gateway(), integer(), state()) -> state(). +handle_packets([], _Gateway, _RxInstantLocal_us, State) -> + State; +handle_packets(_Packets, _Gateway, _RxInstantLocal_us, #state{reg_domain_confirmed = false} = State) -> + State; +handle_packets([Packet|Tail], Gateway, RxInstantLocal_us, #state{reg_region = _Region} = State) -> + POCVersion = application:get_env(miner, poc_version, 11), + Data = base64:decode(maps:get(<<"data">>, Packet)), + case route(Data) of + error -> + ok; + {onion, Payload} -> + Freq = maps:get(<<"freq">>, Packet), + %% onion server + UseRSSIS = case POCVersion of + X when X > 10 -> true; + _ -> false + end, + miner_onion_server_light:decrypt_radio( + Payload, + erlang:trunc(packet_rssi(Packet, UseRSSIS)), + packet_snr(Packet), + %% TODO we might want to send GPS time here, if available + maps:get(<<"tmst">>, Packet), + Freq, + channel(Freq, State#state.reg_freq_list), + maps:get(<<"datr">>, Packet) + ); + {_Type, _RoutingInfo} -> + %% normally packets here would be send to the router + %% but in light mode we can just discard non poc packets + noop + end, + handle_packets(Tail, Gateway, RxInstantLocal_us, State#state{last_mono_us = RxInstantLocal_us, last_tmst_us = maps:get(<<"tmst">>, Packet)}). + +-spec route(binary()) -> any(). + route(Pkt) -> + case longfi:deserialize(Pkt) of + error -> + route_non_longfi(Pkt); + {ok, LongFiPkt} -> + %% hello longfi, my old friend + try longfi:type(LongFiPkt) == monolithic andalso longfi:oui(LongFiPkt) == 0 andalso longfi:device_id(LongFiPkt) == 1 of + true -> + {onion, longfi:payload(LongFiPkt)}; + false -> + %% we currently don't expect non-onion packets, + %% this is probably a false positive on a LoRaWAN packet + route_non_longfi(Pkt) + catch _:_ -> + route_non_longfi(Pkt) + end + end. + +% Some binary madness going on here +-spec route_non_longfi(binary()) -> any(). +route_non_longfi(<>) -> + {lorawan, {eui, DevEUI, AppEUI}}; +route_non_longfi(<>) when MType == ?UNCONFIRMED_UP; MType == ?CONFIRMED_UP -> + Body = binary:part(PayloadAndMIC, {0, byte_size(PayloadAndMIC) -4}), + {FPort, _FRMPayload} = + case Body of + <<>> -> {undefined, <<>>}; + <> -> {Port, Payload} + end, + case FPort of + 0 when FOptsLen /= 0 -> + error; + _ -> + {lorawan, {devaddr, DevAddr}} + end; +route_non_longfi(_) -> + error. + +maybe_mirror({undefined, undefined}, _) -> + ok; +maybe_mirror({_, undefined}, _) -> + ok; +maybe_mirror({Sock, Destination}, Packet) -> + gen_udp:send(Sock, Destination, Packet). + +channel(Freq, Frequencies) -> + channel(Freq, Frequencies, 0). + +channel(Freq, [H|T], Acc) -> + case abs(H - Freq) =< 0.001 of + true -> + Acc; + false -> + channel(Freq, T, Acc+1) + end. + +%% @doc returns a tuple of {SpreadingFactor, Bandwidth} from strings like "SFdBWddd" +%% +%% Example: `{7, 125} = scratch:parse_datarate("SF7BW125")' +-spec parse_datarate(string()) -> {integer(), integer()}. +parse_datarate(Datarate) -> + case Datarate of + [$S, $F, SF1, SF2, $B, $W, BW1, BW2, BW3] -> + {erlang:list_to_integer([SF1, SF2]), erlang:list_to_integer([BW1, BW2, BW3])}; + [$S, $F, SF1, $B, $W, BW1, BW2, BW3] -> + {erlang:list_to_integer([SF1]), erlang:list_to_integer([BW1, BW2, BW3])} + end. + +%% @doc adjusts concentrator timestamp (`tmst`) to a monotonic value. +%% +%% The returned value is a best-effort estimate of what +%% `erlang:monotonic_time(microsecond)` would return if it was called +%% at `Tmst_us`. +-spec tmst_to_local_monotonic_time(immediate | integer(), undefined | integer(), undefined | integer()) -> integer(). +tmst_to_local_monotonic_time(immediate, _PrevTmst_us, _PrevMonoTime_us) -> + erlang:monotonic_time(microsecond); +tmst_to_local_monotonic_time(_When, undefined, undefined) -> + %% We haven't yet received a `tmst` from the packet forwarder, so + %% we don't have anything to track. Let's just use the current + %% time and hope for the best. + erlang:monotonic_time(microsecond); +tmst_to_local_monotonic_time(Tmst_us, PrevTmst_us, PrevMonoTime_us) when Tmst_us >= PrevTmst_us -> + Tmst_us - PrevTmst_us + PrevMonoTime_us; +tmst_to_local_monotonic_time(Tmst_us, PrevTmst_us, PrevMonoTime_us) -> + %% Because `Tmst_us` is less than the last `tmst` we received from + %% the packet forwarder, we allow for the possibility one single + %% roll over of the clock has occurred, and that `Tmst_us` might + %% represent a time in the future. + Tmst_us + ?MAX_TMST_VAL - PrevTmst_us + PrevMonoTime_us. + +%% Extracts a packet's RSSI, abstracting away the differences between +%% GWMP JSON V1/V2. +-spec packet_rssi(map(), boolean()) -> number(). +packet_rssi(Packet, UseRSSIS) -> + RSSIS = maps:get(<<"rssis">>, Packet, undefined), + SingleRSSI = case UseRSSIS andalso RSSIS =/= undefined of + true -> RSSIS; + false -> maps:get(<<"rssi">>, Packet, undefined) + end, + case SingleRSSI of + %% No RSSI, perhaps this is a GWMP V2 + undefined -> + %% `rsig` is a list. It can contain more than one signal + %% quality object if the packet was received on multiple + %% antennas/receivers. So let's pick the one with the + %% highest RSSI. + FetchRSSI = case UseRSSIS of + true -> + %% Use RSSIS if available, fall back to RSSIC. + fun (Obj) -> + maps:get(<<"rssis">>, Obj, + maps:get(<<"rssic">>, Obj, undefined)) + end; + false -> + %% Just use RSSIC. + fun (Obj) -> + maps:get(<<"rssic">>, Obj, undefined) + end + end, + BestRSSISelector = + fun (Obj, Best) -> + erlang:max(Best, FetchRSSI(Obj)) + end, + [H|T] = maps:get(<<"rsig">>, Packet), + lists:foldl(BestRSSISelector, FetchRSSI(H), T); + %% GWMP V1 + RSSI -> + RSSI + end. + +%% Extracts a packet's SNR, abstracting away the differences between +%% GWMP JSON V1/V2. +-spec packet_snr(map()) -> number(). +packet_snr(Packet) -> + case maps:get(<<"lsnr">>, Packet, undefined) of + %% GWMP V2 + undefined -> + %% `rsig` is a list. It can contain more than one signal + %% quality object if the packet was received on multiple + %% antennas/receivers. So let's pick the one with the + %% highest SNR + [H|T] = maps:get(<<"rsig">>, Packet), + Selector = fun(Obj, Best) -> + erlang:max(Best, maps:get(<<"lsnr">>, Obj)) + end, + lists:foldl(Selector, maps:get(<<"lsnr">>, H), T); + %% GWMP V1 + LSNR -> + LSNR + end. + +-spec send_packet( + Payload :: binary(), + When :: integer(), + ChannelSelectorFun :: fun(), + DataRate :: string(), + Power :: float(), + IPol :: boolean(), + HlmPacket :: helium_packet(), + From :: {pid(), reference()}, + State :: state() +) -> {error, any()} | {ok, state()}. +send_packet(Payload, When, ChannelSelectorFun, DataRate, Power, IPol, HlmPacket, From, + #state{socket=Socket, + gateways=Gateways, + packet_timers=Timers, + reg_freq_list=Freqs, + reg_throttle=Throttle, + last_tmst_us=PrevTmst_us, + last_mono_us=PrevMono_us}=State) -> + case select_gateway(Gateways) of + {error, _}=Error -> + Error; + {ok, #gateway{ip=IP, port=Port}} -> + lager:info("PULL_RESP to ~p:~p", [IP, Port]), + %% the fun is set by the sender and is used to deterministically route data via channels + LocalFreq = ChannelSelectorFun(Freqs), + + %% Check this transmission for regulatory compliance. + {SpreadingFactor, Bandwidth} = parse_datarate(DataRate), + TimeOnAir = miner_lora_throttle:time_on_air(Bandwidth, SpreadingFactor, 5, 8, true, byte_size(Payload)), + AdjustedTmst_us = tmst_to_local_monotonic_time(When, PrevTmst_us, PrevMono_us), + SentAt = AdjustedTmst_us / 1000, + case miner_lora_throttle:can_send(Throttle, SentAt, LocalFreq, TimeOnAir) of + false -> lager:warning("This transmission should have been rejected"); + true -> ok + end, + + Token = mk_token(Timers), + Packet = create_packet(Payload, When, LocalFreq, DataRate, Power, IPol, Token), + maybe_mirror(State#state.mirror_socket, Packet), + lager:debug("sending packet via channel: ~p",[LocalFreq]), + ok = gen_udp:send(Socket, IP, Port, Packet), + %% TODO a better timeout would be good here + Ref = erlang:send_after(10000, self(), {tx_timeout, Token}), + {ok, State#state{packet_timers=maps:put(Token, {send, Ref, From, SentAt, LocalFreq, TimeOnAir, HlmPacket}, Timers)}} + end. + +-spec create_packet( + Payload :: binary(), + When :: atom() | integer(), + LocalFreq :: integer(), + DataRate :: string(), + Power :: float(), + IPol :: boolean(), + Token :: binary() +) -> binary(). +create_packet(Payload, When, LocalFreq, DataRate, Power, IPol, Token) -> + + IsImme = When == immediate, + Tmst = case IsImme of + false -> When; + true -> 0 + end, + + DecodedJSX = #{<<"txpk">> => #{ + <<"ipol">> => IPol, %% IPol for downlink to devices only, not poc packets + <<"imme">> => IsImme, + <<"powe">> => trunc(Power), + <<"tmst">> => Tmst, + <<"freq">> => LocalFreq, + <<"modu">> => <<"LORA">>, + <<"datr">> => list_to_binary(DataRate), + <<"codr">> => <<"4/5">>, + <<"size">> => byte_size(Payload), + <<"rfch">> => 0, + <<"data">> => base64:encode(Payload) + } + }, + BinJSX = jsx:encode(DecodedJSX), + lager:debug("PULL_RESP: ~p",[DecodedJSX]), + lager:debug("sending packet via channel: ~p",[LocalFreq]), + <>. + +-spec retry_with_rx2( + HlmPacket0 :: helium_packet(), + From :: {pid(), reference()}, + State :: state() +) -> {error, any()} | {ok, state()}. +retry_with_rx2(HlmPacket0, From, State) -> + #window_pb{timestamp=TS, + frequency=Freq, + datarate=DataRate} = blockchain_helium_packet_v1:rx2_window(HlmPacket0), + lager:info("Retrying with RX2 window ~p", [TS]), + Power = blockchain_helium_packet_v1:signal_strength(HlmPacket0), + Payload = blockchain_helium_packet_v1:payload(HlmPacket0), + ChannelSelectorFun = fun(_FreqList) -> Freq end, + HlmPacket1 = HlmPacket0#packet_pb{rx2_window=undefined}, + send_packet(Payload, TS, ChannelSelectorFun, DataRate, Power, true, HlmPacket1, From, State). + +-spec open_socket(string(), pos_integer()) -> {ok, port(), port()}. +open_socket(IP, Port) -> + {ok, Socket} = gen_udp:open(Port, [binary, {reuseaddr, true}, {active, 100}, {ip, IP}]), + MirrorSocket = + case application:get_env(miner, radio_mirror_port, undefined) of + undefined -> + undefined; + P -> + {ok, MS} = gen_udp:open(P, [binary, {active, true}]), + MS + end, + {ok, Socket, MirrorSocket}. + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). + +rssi_fetch_test() -> + PacketWithRSSIS = #{ + <<"rssis">> => 1, + <<"rssi">> => 2 + }, + PacketWithoutRSSIS = #{ + <<"rssi">> => 2 + }, + RSIGPacketWithRSSIS = #{ + <<"rsig">> => [ + #{ <<"rssis">> => 1, <<"rssic">> => 2 }, + #{ <<"rssis">> => 3, <<"rssic">> => 4 }, + #{ <<"rssis">> => -1, <<"rssic">> => 0 } + ] + }, + RSIGPacketWithoutRSSIS = #{ + <<"rsig">> => [ + #{ <<"rssic">> => 2 }, + #{ <<"rssic">> => 4 }, + #{ <<"rssic">> => 0 } + ] + }, + ?assertEqual(packet_rssi(PacketWithRSSIS, true), 1), + ?assertEqual(packet_rssi(PacketWithRSSIS, false), 2), + ?assertEqual(packet_rssi(PacketWithoutRSSIS, true), 2), + ?assertEqual(packet_rssi(PacketWithoutRSSIS, false), 2), + ?assertEqual(packet_rssi(RSIGPacketWithRSSIS, true), 3), + ?assertEqual(packet_rssi(RSIGPacketWithRSSIS, false), 4), + ?assertEqual(packet_rssi(RSIGPacketWithoutRSSIS, true), 4), + ?assertEqual(packet_rssi(RSIGPacketWithoutRSSIS, false), 4). + +-endif. diff --git a/src/miner_restart_sup.erl b/src/miner_restart_sup.erl index f65e1994e..7ff4b2a10 100644 --- a/src/miner_restart_sup.erl +++ b/src/miner_restart_sup.erl @@ -54,13 +54,10 @@ init(_Opts) -> application:set_env(blockchain, sc_client_handler, miner_lora), BaseDir = application:get_env(blockchain, base_dir, "data"), - %% Miner Options - POCOpts = #{ - base_dir => BaseDir - }, + %% Miner Options - OnionServer = + OnionOpts = case application:get_env(miner, radio_device, undefined) of {RadioBindIP, RadioBindPort0, RadioSendIP, RadioSendPort} -> RadioBindPort = @@ -70,7 +67,7 @@ init(_Opts) -> end, %% check if we are overriding/forcing the region ( for lora ) RegionOverRide = check_for_region_override(), - OnionOpts = #{ + #{ radio_udp_bind_ip => RadioBindIP, radio_udp_bind_port => RadioBindPort, radio_udp_send_ip => RadioSendIP, @@ -78,12 +75,14 @@ init(_Opts) -> ecdh_fun => ECDHFun, sig_fun => SigFun, region_override => RegionOverRide - }, - [?WORKER(miner_onion_server, [OnionOpts]), - ?WORKER(miner_lora, [OnionOpts]), - ?WORKER(miner_poc_statem, [POCOpts])]; + }; _ -> - [] + #{ + radio_udp_bind_ip => {127, 0, 0, 1}, + radio_udp_bind_port => 0, + ecdh_fun => ECDHFun, + sig_fun => SigFun + } end, EbusServer = @@ -92,17 +91,63 @@ init(_Opts) -> _ -> [] end, - ValServers = - case application:get_env(miner, mode, gateway) of + MinerMode = application:get_env(miner, mode, gateway), + POCServers = + case MinerMode of validator -> - [?WORKER(miner_val_heartbeat, []), - ?SUP(sibyl_sup, [])]; - _ -> [] + %% NOTE: validators do not require the onion or lora server + %% however removing these here breaks tests + %% there is no harm done by leaving them running + application:set_env(sibyl, poc_mgr_mod, miner_poc_mgr), + application:set_env(sibyl, poc_report_handler, miner_poc_report_handler), + PocMgrTab = miner_poc_mgr:make_ets_table(), + POCMgrOpts = #{tab1 => PocMgrTab}, + POCOpts = #{base_dir => BaseDir, + cfs => ["default", + "poc_mgr_cf" + ] + }, + [ + ?WORKER(miner_onion_server, [OnionOpts]), + ?WORKER(miner_lora, [OnionOpts]), + ?WORKER(miner_poc_mgr_db_owner, [POCOpts]), + ?WORKER(miner_poc_statem, [POCOpts]), + ?WORKER(miner_poc_mgr, [POCMgrOpts]) + ]; + gateway -> + %% running as a gateway + %% run both the grpc and libp2p version of the lora & onion modules + %% they will work out which is required based on chain vars + %% start miner_poc_statem, if the pocs are being run by validators, it will do nothing + %% start the grpc start client, if the pocs are NOT being run by validators, it will do nothing + POCOpts = #{ + base_dir => BaseDir + }, + [ + ?WORKER(miner_onion_server_light, [OnionOpts]), + ?WORKER(miner_onion_server, [OnionOpts]), + ?WORKER(miner_lora_light, [OnionOpts]), + ?WORKER(miner_lora, [OnionOpts]), + ?WORKER(miner_poc_grpc_client_statem, []), + ?WORKER(miner_poc_statem, [POCOpts]) + + ] end, {JsonRpcPort, JsonRpcIp} = jsonrpc_server_config(), + ValServers = + case MinerMode of + validator -> + [ + ?WORKER(miner_val_heartbeat, []), + ?SUP(sibyl_sup, []) + ]; + _ -> + [] + end, ChildSpecs = + [ ?WORKER(miner_hbbft_sidecar, []), ?WORKER(miner, []), @@ -111,9 +156,9 @@ init(_Opts) -> {port, JsonRpcPort}]]), ?WORKER(miner_poc_denylist, []) ] ++ + POCServers ++ ValServers ++ - EbusServer ++ - OnionServer, + EbusServer, {ok, {SupFlags, ChildSpecs}}. diff --git a/src/poc/grpc_client_custom.erl b/src/poc/grpc_client_custom.erl new file mode 100644 index 000000000..b335927db --- /dev/null +++ b/src/poc/grpc_client_custom.erl @@ -0,0 +1,283 @@ +%% NOTE: +%% copied and modified from https://github.com/Bluehouse-Technology/grpc_client/blob/master/src/grpc_client.erl +%% requires the gpb modules to have been created with the following config: +%%{gpb_opts, [ +%% {rename,{msg_fqname,base_name}}, +%% use_packages, +%% {report_errors, false}, +%% {descriptor, false}, +%% {recursive, false}, +%% {i, "_build/default/lib/helium_proto/src"}, +%% {o, "src/grpc/autogen/client"}, +%% {module_name_prefix, ""}, +%% {module_name_suffix, "_client_pb"}, +%% {rename, {msg_name, {suffix, "_pb"}}}, +%% {strings_as_binaries, false}, +%% type_specs, +%% {defs_as_proplists, true} +%%]} + +%%%------------------------------------------------------------------- +%%% Licensed to the Apache Software Foundation (ASF) under one +%%% or more contributor license agreements. See the NOTICE file +%%% distributed with this work for additional information +%%% regarding copyright ownership. The ASF licenses this file +%%% to you under the Apache License, Version 2.0 (the +%%% "License"); you may not use this file except in compliance +%%% with the License. You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, +%%% software distributed under the License is distributed on an +%%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%%% KIND, either express or implied. See the License for the +%%% specific language governing permissions and limitations +%%% under the License. +%%% + +%% @doc The interface for grpc_client. +%% +%% This module contains the functions use a gRPC service from Erlang. +%% +%% See the Readme in the root folder of the repository for a reference to a +%% more general (tutorial-style) introduction. +%% +-module(grpc_client_custom). + +-export([compile/1, compile/2, + connect/3, connect/4, + new_stream/4, new_stream/5, + send/2, send_last/2, + unary/6, + rcv/1, rcv/2, + get/1, + ping/2, + stop_stream/1, stop_stream/2, + stop_connection/1]). + +-type connection_option() :: + verify_server_opt() | + server_host_override_opt() | + http2_client_opt() | + {http2_options, [http2_option()]}. + +-type verify_server_opt() :: {verify_server_identity, boolean()}. +%% If true (and if the transport is ssl), the client will verify +%% that the subject of the server certificate matches with the domain +%% of the server (use the 'server_host_override' to check against +%% another name). + +-type server_host_override_opt() :: {server_host_override, string()}. +%% If the 'verify_server_identity' option is set, check the subject of +%% the server certificate against this name (rather than against the host name). + +-type http2_client_opt() :: {http2_client, module()}. +%% A module that implements an HTPP/2 client (with a specific API). +%% By default 'http2_client' will be used. As an alternative +%% 'grpc_client_chatterbox_adapter' can be used, which provides an interface to the +%% chatterbox http/2 client, or any other HTTP/2 client implementation with the right +%% API. + +-type http2_option() :: term(). +%% Passed on to the HTTP/2 client. See the documentation of 'http2_client' for the options +%% that can be specified for the default HTTP2/2 client. + +-type connection() :: grpc_client_connection:connection(). + +-type metadata_key() :: binary(). +-type metadata_value() :: binary(). +-type metadata() :: #{metadata_key() => metadata_value()}. +-type compression_method() :: none | gzip. + +-type stream_option() :: + {metadata, metadata()} | + {compression, compression_method()} | + {http2_options, [term()]}. + +-type client_stream() :: pid(). + +-type rcv_response() :: {data, map()} | + {headers, metadata()} | + eof | {error, term()}. + +-type get_response() :: rcv_response() | empty. + +-type unary_response() :: ok_response() | error_response(). + +-type ok_response() :: + {ok, #{result => any(), + status_message => binary(), + http_status => 200, + grpc_status => 0, + headers => metadata(), + trailers => metadata()}}. + +-type error_response() :: + {error, #{error_type => error_type(), + http_status => integer(), + grpc_status => integer(), + status_message => binary(), + headers => metadata(), + result => any(), + trailers => grpc:metadata()}}. + +-type error_type() :: client | timeout | http | grpc. + +-export_type([connection/0, + stream_option/0, + connection_option/0, + client_stream/0, + unary_response/0, + metadata/0, + compression_method/0 + ]). + +-spec compile(FileName::string()) -> ok. +%% @equiv compile(FileName, []) +compile(FileName) -> + grpc_client_custom:compile(FileName, []). + +-spec compile(FileName::string(), Options::gpb_compile:opts()) -> ok. +%% @doc Compile a .proto file to generate client stubs and a module +%% to encode and decode the protobuf messages. +%% +%% Refer to gpb for the options. grpc_client will always use the option +%% 'maps' (so that the protobuf messages are translated to and +%% from maps) and the option '{i, "."}' (so that .proto files in the +%% current working directory will be found). +compile(FileName, Options) -> + grpc_lib_compile:file(FileName, [{generate, client} | Options]). + +-spec connect(Transport::tcp|ssl, + Host::string(), + Port::integer()) -> {ok, connection()} | {error, term()}. +%% @equiv connect(Transport, Host, Port, []) +connect(Transport, Host, Port) -> + connect(Transport, Host, Port, []). + +-spec connect(Transport::tcp|ssl, + Host::string(), + Port::integer(), + Options::[connection_option()]) -> {ok, connection()} | {error, term()}. +%% @doc Start a connection to a gRPC server. +%% +%% If 'verify_server_identity' is true (and Transport == ssl), the client will +%% check that the subject of the certificate received from the server is +%% identical to Host. +%% +%% If it is known that the server returns a certificate with another subject +%% than the host name, the 'server_host_override' option can be used to +%% specify that other subject. +%% +%% The transport options will be passed to the selected Transport when +%% establishing the connection. +%% +%% The option {'http2_client', module()} enables the selection of +%% an http2 client. The default is http2_client, as an alternative it +%% is possible to select 'grpc_client_chatterbox_adapter', which +%% implements an adapter for the chatterbox http/2 client. +connect(Transport, Host, Port, Options) -> + grpc_client_connection:new(Transport, Host, Port, Options). + +-spec new_stream(Connection::connection(), + Service::atom(), + Rpc::atom(), + DecoderModule::module()) -> {ok, client_stream()}. +%% @equiv new_stream(Connection, Service, Rpc, DecoderModule, []) +new_stream(Connection, Service, Rpc, DecoderModule) -> + new_stream(Connection, Service, Rpc, DecoderModule, []). + +-spec new_stream(Connection::connection(), + Service::atom(), + Rpc::atom(), + DecoderModule::module(), + Options::[stream_option()]) -> {ok, client_stream()}. +%% @doc Create a new stream to start a new RPC. +new_stream(Connection, Service, Rpc, DecoderModule, Options) -> + CBMod = proplists:get_value(callback_mod, Options), + grpc_client_stream_custom:new(Connection, Service, Rpc, DecoderModule, Options, CBMod). + +-spec send(Stream::client_stream(), Msg::any()) -> ok. +%% @doc Send a message from the client to the server. +send(Stream, Msg) when is_pid(Stream) -> + grpc_client_stream_custom:send(Stream, Msg). + +-spec send_last(Stream::client_stream(), Msg::map()) -> ok. +%% @doc Send a message to server and mark it as the last message +%% on the stream. For simple RPC and client-streaming RPCs that +%% will trigger the response from the server. +send_last(Stream, Msg) when is_pid(Stream)-> + grpc_client_stream_custom:send_last(Stream, Msg). + +-spec rcv(Stream::client_stream()) -> rcv_response(). +%% @equiv rcv(Stream, infinity) +rcv(Stream) -> + grpc_client_stream_custom:rcv(Stream). + +-spec rcv(Stream::client_stream(), Timeout::timeout()) -> rcv_response(). +%% @doc Receive a message from the server. This is a blocking +%% call, it returns when a message has been received or after Timeout. +%% Timeout is in milliseconds. +%% +%% Returns 'eof' after the last message from the server has been read. +rcv(Stream, Timeout) -> + grpc_client_stream_custom:rcv(Stream, Timeout). + +-spec get(Stream::client_stream()) -> get_response(). +%% @doc Get a message from the stream, if there is one in the queue. If not return +%% 'empty'. This is a non-blocking call. +%% +%% Returns 'eof' after the last message from the server has been read. +get(Stream) -> + grpc_client_stream_custom:get(Stream). + +-spec ping(Connection::connection(), + Timeout::timeout()) -> {ok, RoundTripTime::integer()} | + {error, term()}. +%% @doc Send a PING request. +ping(Connection, Timeout) -> + grpc_client_connection:ping(Connection, Timeout). + +-spec stop_stream(Stream::client_stream()) -> ok. +%% @equiv stop_stream(Stream, 0) +stop_stream(Stream) -> + stop_stream(Stream, 0). + +-spec stop_stream(Stream::client_stream(), ErrorCode::integer()) -> ok. +%% @doc +%% Stops a stream. Depending on the state of the connection a 'RST_STREAM' +%% frame may be sent to the server with the provided Errorcode (it should be +%% a HTTP/2 error code, see RFC7540). +stop_stream(Stream, ErrorCode) -> + grpc_client_stream_custom:stop(Stream, ErrorCode). + +-spec stop_connection(Connection::connection()) -> ok. +%% @doc Stop a connection and clean up. +stop_connection(Connection) -> + grpc_client_connection:stop(Connection). + +-spec unary(Connection::connection(), + Message::tuple(), Service::atom(), Rpc::atom(), + Decoder::module(), + Options::[stream_option() | + {timeout, timeout()} | + {callback_mod, atom()}]) -> unary_response(). +%% @doc Call a unary rpc in one go. +%% +%% Set up a stream, receive headers, message and trailers, stop +%% the stream and assemble a response. This is a blocking function. +unary(Connection, Message, Service, Rpc, Decoder, Options) -> + {Timeout, StreamOptions} = grpc_lib:keytake(timeout, Options, infinity), + try + {ok, Stream} = new_stream(Connection, Service, + Rpc, Decoder, [{type, unary} | StreamOptions]), + Response = grpc_client_stream_custom:call_rpc(Stream, Message, Timeout), + stop_stream(Stream), + Response + catch + _Type:_Error:_Stack -> + lager:warning("Failed to create stream. Type: ~p, Error: ~p, Stack:~p", [_Type, _Error, _Stack]), + {error, #{error_type => client, + status_message => <<"stream create failed">>}} + end. \ No newline at end of file diff --git a/src/poc/grpc_client_stream_custom.erl b/src/poc/grpc_client_stream_custom.erl new file mode 100644 index 000000000..a283b2755 --- /dev/null +++ b/src/poc/grpc_client_stream_custom.erl @@ -0,0 +1,469 @@ +%% NOTE: +%% copied and modified from https://github.com/Bluehouse-Technology/grpc_client/blob/master/src/grpc_client_stream.erl +%% requires the gpb modules to have been created with the following config: +%%{gpb_opts, [ +%% {rename,{msg_fqname,base_name}}, +%% use_packages, +%% {report_errors, false}, +%% {descriptor, false}, +%% {recursive, false}, +%% {i, "_build/default/lib/helium_proto/src"}, +%% {o, "src/grpc/autogen/client"}, +%% {module_name_prefix, ""}, +%% {module_name_suffix, "_client_pb"}, +%% {rename, {msg_name, {suffix, "_pb"}}}, +%% {strings_as_binaries, false}, +%% type_specs, +%% {defs_as_proplists, true} +%%]} + +%%%------------------------------------------------------------------- +%%% Licensed to the Apache Software Foundation (ASF) under one +%%% or more contributor license agreements. See the NOTICE file +%%% distributed with this work for additional information +%%% regarding copyright ownership. The ASF licenses this file +%%% to you under the Apache License, Version 2.0 (the +%%% "License"); you may not use this file except in compliance +%%% with the License. You may obtain a copy of the License at +%%% +%%% http://www.apache.org/licenses/LICENSE-2.0 +%%% +%%% Unless required by applicable law or agreed to in writing, +%%% software distributed under the License is distributed on an +%%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%%% KIND, either express or implied. See the License for the +%%% specific language governing permissions and limitations +%%% under the License. +%%% + +%% @private An a-synchronous client with a queue-like interface. +%% A gen_server is started for each stream, this keeps track +%% of the status of the http2 stream and it buffers responses in a queue. +-module(grpc_client_stream_custom). + +-behaviour(gen_server). + +-export([new/6, + send/2, send_last/2, + get/1, rcv/1, rcv/2, + state/1, + call_rpc/3, + stop/2]). + +%% gen_server behaviors +-export([code_change/3, handle_call/3, handle_cast/2, handle_info/2, init/1, terminate/2]). + +%%-type stream() :: +%% #{stream_id := integer(), +%% package := string(), +%% service := string(), +%% rpc := string(), +%% queue := queue:queue(), +%% response_pending := boolean(), +%% state := idle | open | half_closed_local | half_closed_remote | closed, +%% encoder := module(), +%% connection := grpc_client_custom:connection(), +%% headers_sent := boolean(), +%% metadata := grpc_client_custom:metadata(), +%% compression := grpc_client_custom:compression_method(), +%% buffer := binary(), +%% handler_callback := undefined, +%% handler_state := undefined, +%% type := unary | streaming | undefined}. + +-spec new(Connection::pid(), + Service::atom(), + Rpc::atom(), + Encoder::module(), + Options::list(), + HandlerMod::atom() ) -> {ok, Pid::pid()} | {error, Reason::term()}. +new(Connection, Service, Rpc, Encoder, Options, HandlerMod) -> + gen_server:start_link(?MODULE, + {Connection, Service, Rpc, Encoder, Options, HandlerMod}, []). + +send(Pid, Message) -> + gen_server:call(Pid, {send, Message}). + +send_last(Pid, Message) -> + gen_server:call(Pid, {send_last, Message}). + +get(Pid) -> + gen_server:call(Pid, get). + +rcv(Pid) -> + rcv(Pid, infinity). + +rcv(Pid, Timeout) -> + gen_server:call(Pid, {rcv, Timeout}, infinity). + +%% @doc Get the state of the stream. +state(Pid) -> + gen_server:call(Pid, state). + +-spec stop(Stream::pid(), ErrorCode::integer()) -> ok. +%% @doc Close (stop/clean up) the stream. +%% +%% If the stream is in open or half closed state, a RST_STREAM frame +%% will be sent to the server. +stop(Pid, ErrorCode) -> + gen_server:call(Pid, {stop, ErrorCode}). + +%% @doc Call a unary rpc and process the response. +call_rpc(Pid, Message, Timeout) -> + try send_last(Pid, Message) of + ok -> + process_response(Pid, Timeout) + catch + _:_ -> + {error, #{error_type => client, + status_message => <<"failed to encode and send message">>}} + end. + +%% gen_server implementation +%% @private +init({Connection, Service, Rpc, Encoder, Options, HandlerMod}) -> + try + StreamType = proplists:get_value(type, Options, undefined), + lager:info("init stream for RPC ~p and type ~p", [Rpc, StreamType]), + Stream = new_stream(Connection, Service, Rpc, Encoder, Options), + lager:info("init stream success with state ~p, handle_mod: ~p", [Stream, HandlerMod]), + HandlerState = HandlerMod:init(), + {ok, Stream#{handler_state => HandlerState, handler_callback => HandlerMod, type => StreamType}} + catch + _Class:_Error:_Stack -> + lager:warning("failed to create stream, ~p ~p ~p", [_Class, _Error, _Stack]), + {stop, <<"failed to create stream">>} + end. + +%% @private +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%% @private +handle_call(state, _From, #{state := State} = Stream) -> + {reply, State, Stream}; +handle_call({stop, ErrorCode}, _From, Stream) -> + {stop, normal, ok, rst_stream(Stream, ErrorCode)}; +handle_call({send_last, Message}, _From, Stream) -> + {reply, ok, send_msg(Stream, Message, true)}; +handle_call({send, Message}, _From, Stream) -> + {reply, ok, send_msg(Stream, Message, false)}; +handle_call(get, _From, #{queue := Queue, + state := StreamState} = Stream) -> + {Value, NewQueue} = queue:out(Queue), + Response = case {Value, StreamState} of + {{value, V}, _} -> + V; + {empty, S} when S == closed; + S == half_closed_remote -> + eof; + {empty, _} -> + empty + end, + {reply, Response, Stream#{queue => NewQueue}}; +handle_call({rcv, Timeout}, From, #{queue := Queue, + state := StreamState} = Stream) -> + {Value, NewQueue} = queue:out(Queue), + NewStream = Stream#{queue => NewQueue}, + case {Value, StreamState} of + {{value, V}, _} -> + {reply, V, NewStream}; + {empty, S} when S == closed; + S == half_closed_remote -> + {reply, eof, NewStream}; + {empty, _} -> + {noreply, NewStream#{client => From, + response_pending => true}, Timeout} + end. + +%% @private +handle_cast(_, State) -> + {noreply, State}. + +%% @private +handle_info({'RECV_DATA', StreamId, Bin}, Stream) -> + %% This is a workaround to deal with the different format from Chatterbox. + %% TODO: find a better way to do this. + handle_info({'RECV_DATA', StreamId, Bin, false, false}, Stream); +handle_info({'RECV_DATA', StreamId, Bin, + _StreamWindowError, _ConnectionWindowError}, + #{stream_id := StreamId, + buffer := Buffer} = Stream) -> + case <> of + <> -> + Response = + try + {data, decode(Encoded, Message, Stream#{buffer => Rest})} + catch + throw:{error, Message} -> + {error, Message}; + _Error:_Message -> + {error, <<"failed to decode message">>} + end, + info_response(Response, Stream#{buffer => Rest}); + NotComplete -> + {noreply, Stream#{buffer => NotComplete}} + end; + +handle_info({'RECV_HEADERS', StreamId, Headers}, + #{stream_id := StreamId, + state := StreamState} = Stream) -> + HeadersMap = maps:from_list([grpc_lib:maybe_decode_header(H) + || H <- Headers]), + Encoding = maps:get(<<"grpc-encoding">>, HeadersMap, none), + NewState = case StreamState of + idle -> + open; + _ -> + StreamState + end, + info_response({headers, HeadersMap}, + Stream#{response_encoding => Encoding, + state => NewState}); +handle_info({'END_STREAM', StreamId}, + #{stream_id := StreamId, + state := StreamState} = Stream) -> + NewState = case StreamState of + half_closed_local -> + closed; + _ -> + half_closed_remote + end, + info_response(eof, Stream#{state => NewState}); +handle_info({ClosedMessage, StreamId, _ErrorCode}, + #{stream_id := StreamId} = Stream) + when ClosedMessage == 'RESET_BY_PEER'; + ClosedMessage == 'CLOSED_BY_PEER' -> + info_response(eof, Stream#{state => closed}); +handle_info(timeout, #{response_pending := true, + client := Client} = Stream) -> + gen_server:reply(Client, {error, timeout}), + {noreply, Stream#{response_pending => false}}; +handle_info(Msg, #{handler_callback := HandlerCB} = Stream) -> + NewState = + case erlang:function_exported(HandlerCB, handle_info, 2) of + true -> HandlerCB:handle_info(Msg, Stream); + false -> Stream + end, + {noreply, NewState}. +%%handle_info(_InfoMessage, Stream) -> +%% {noreply, Stream}. + +%% @private +terminate(_Reason, _State) -> + ok. + + +%% internal methods + +new_stream(Connection, Service, Rpc, Encoder, Options) -> + Compression = proplists:get_value(compression, Options, none), + Metadata = proplists:get_value(metadata, Options, #{}), + TransportOptions = proplists:get_value(http2_options, Options, []), + {ok, StreamId} = grpc_client_connection:new_stream(Connection, TransportOptions), + RpcDef = Encoder:find_rpc_def(Service, Rpc), + RpcDefMap = maps:from_list(RpcDef), + %% the gpb rpc def has 'input', 'output' etc. + %% All the information is combined in 1 map, + %% which is is the state of the gen_server. + RpcDefMap#{stream_id => StreamId, + package => [], + service => Service, + rpc => Rpc, + queue => queue:new(), + response_pending => false, + state => idle, + encoder => Encoder, + connection => Connection, + headers_sent => false, + metadata => Metadata, + compression => Compression, + buffer => <<>>}. + +send_msg(#{stream_id := StreamId, + connection := Connection, + headers_sent := HeadersSent, + metadata := Metadata, + state := State + } = Stream, Message, EndStream) -> + Encoded = encode(Stream, Message), + case HeadersSent of + false -> + DefaultHeaders = default_headers(Stream), + AllHeaders = add_metadata(DefaultHeaders, Metadata), + ok = grpc_client_connection:send_headers(Connection, StreamId, AllHeaders); + true -> + ok + end, + Opts = [{end_stream, EndStream}], + NewState = + case {EndStream, State} of + {false, _} when State == idle -> + open; + {false, _} -> + State; + {true, _} when State == open; + State == idle -> + half_closed_local; + {true, _} -> + closed + end, + ok = grpc_client_connection:send_body(Connection, StreamId, Encoded, Opts), + Stream#{headers_sent => true, + state => NewState}. + +rst_stream(#{connection := Connection, + stream_id := StreamId} = Stream, ErrorCode) -> + grpc_client_connection:rst_stream(Connection, StreamId, ErrorCode), + Stream#{state => closed}. + +default_headers(#{service := Service, + rpc := Rpc, + package := Package, + compression := Compression, + connection := #{host := Host, + scheme := Scheme} + }) -> + Path = iolist_to_binary(["/", Package, atom_to_list(Service), + "/", atom_to_list(Rpc)]), + Headers1 = case Compression of + none -> + []; + _ -> + [{<<"grpc-encoding">>, + atom_to_binary(Compression, unicode)}] + end, + [{<<":method">>, <<"POST">>}, + {<<":scheme">>, Scheme}, + {<<":path">>, Path}, + {<<":authority">>, Host}, + {<<"content-type">>, <<"application/grpc+proto">>}, + {<<"user-agent">>, <<"grpc-erlang/0.0.1">>}, + {<<"te">>, <<"trailers">>} | Headers1]. + +add_metadata(Headers, Metadata) -> + lists:foldl(fun(H, Acc) -> + {K, V} = grpc_lib:maybe_encode_header(H), + %% if the key exists, replace it. + lists:keystore(K, 1, Acc, {K,V}) + end, Headers, maps:to_list(Metadata)). + +info_response(Response, #{response_pending := true, + client := Client} = Stream) -> + gen_server:reply(Client, Response), + {noreply, Stream#{response_pending => false}}; +info_response(Response, #{queue := Queue, type := unary} = Stream) -> + NewQueue = queue:in(Response, Queue), + {noreply, Stream#{queue => NewQueue}}; +%%info_response(Response, #{queue := Queue} = Stream) -> +%% NewQueue = queue:in(Response, Queue), +%% {noreply, Stream#{queue => NewQueue}}. + +info_response(eof = Response, #{type := Type} = Stream) -> + lager:info("info_response ~p, stream type: ~p", [Response, Type]), + {stop, normal, rst_stream(Stream, 0)}; +info_response(Response, #{handler_callback := CB, handler_state := CBState} = Stream) -> + lager:info("info_response ~p, CB: ~p", [Response, CB]), + NewCBState = CB:handle_msg(Response, CBState), + {noreply, Stream#{handler_callback_state => NewCBState}}. +%% TODO: fix the error handling, currently it is very hard to understand the +%% error that results from a bad message (Map). +encode(#{encoder := Encoder, + input := MsgType, + compression := CompressionMethod}, Map) -> + %% RequestData = Encoder:encode_msg(Map, MsgType), + try Encoder:encode_msg(Map, MsgType) of + RequestData -> + maybe_compress(RequestData, CompressionMethod) + catch + error:function_clause -> + throw({error, {failed_to_encode, MsgType, Map}}); + Error:Reason -> + throw({error, {Error, Reason}}) + end. + +maybe_compress(Encoded, none) -> + Length = byte_size(Encoded), + <<0, Length:32, Encoded/binary>>; +maybe_compress(Encoded, gzip) -> + Compressed = zlib:gzip(Encoded), + Length = byte_size(Compressed), + <<1, Length:32, Compressed/binary>>; +maybe_compress(_Encoded, Other) -> + throw({error, {compression_method_not_supported, Other}}). + +decode(Encoded, Binary, + #{response_encoding := Method, + encoder := Encoder, + output := MsgType}) -> + Message = case Encoded of + 1 -> decompress(Binary, Method); + 0 -> Binary + end, + Encoder:decode_msg(Message, MsgType). + +decompress(Compressed, <<"gzip">>) -> + zlib:gunzip(Compressed); +decompress(_Compressed, Other) -> + throw({error, {decompression_method_not_supported, Other}}). + +process_response(Pid, Timeout) -> + case rcv(Pid, Timeout) of + {headers, #{<<":status">> := <<"200">>, + <<"grpc-status">> := GrpcStatus} = Trailers} + when GrpcStatus /= <<"0">> -> + %% "trailers only" response. + grpc_response(#{}, #{}, Trailers); + {headers, #{<<":status">> := <<"200">>} = Headers} -> + get_message(Headers, Pid, Timeout); + {headers, #{<<":status">> := HttpStatus} = Headers} -> + {error, #{error_type => http, + status => {http, HttpStatus}, + headers => Headers}}; + {headers, #{<<"grpc-status">> := GrpcStatus} = Headers} + when GrpcStatus == <<"14">> -> + {error, #{error_type => http, + status => {http, <<"503">>}, + headers => Headers}}; + {error, timeout} -> + {error, #{error_type => timeout}} + end. + +get_message(Headers, Pid, Timeout) -> + case rcv(Pid, Timeout) of + {data, Response} -> + get_trailer(Response, Headers, Pid, Timeout); + {headers, Trailers} -> + grpc_response(Headers, #{}, Trailers); + {error, timeout} -> + {error, #{error_type => timeout, + headers => Headers}} + end. + +get_trailer(Response, Headers, Pid, Timeout) -> + case rcv(Pid, Timeout) of + {headers, Trailers} -> + grpc_response(Headers, Response, Trailers); + {error, timeout} -> + {error, #{error_type => timeout, + headers => Headers, + result => Response}} + end. + +grpc_response(Headers, Response, #{<<"grpc-status">> := <<"0">>} = Trailers) -> + StatusMessage = maps:get(<<"grpc-message">>, Trailers, <<"">>), + {ok, #{status_message => StatusMessage, + http_status => 200, + grpc_status => 0, + headers => Headers, + result => Response, + trailers => Trailers}}; +grpc_response(Headers, Response, #{<<"grpc-status">> := ErrorStatus} = Trailers) -> + StatusMessage = maps:get(<<"grpc-message">>, Trailers, <<"">>), + {error, #{error_type => grpc, + http_status => 200, + grpc_status => binary_to_integer(ErrorStatus), + status_message => StatusMessage, + headers => Headers, + result => Response, + trailers => Trailers}}. \ No newline at end of file diff --git a/src/poc/miner_onion_server.erl b/src/poc/miner_onion_server.erl index a4e91ddef..5e77a292e 100644 --- a/src/poc/miner_onion_server.erl +++ b/src/poc/miner_onion_server.erl @@ -150,7 +150,7 @@ send_receipt(Data, OnionCompactKey, Type, Time, RSSI, SNR, Frequency, Channel, D Acc; false -> P2P = libp2p_crypto:pubkey_bin_to_p2p(Challenger), - case miner_poc:dial_framed_stream(blockchain_swarm:tid(), P2P, []) of + case miner_poc:dial_framed_stream(blockchain_swarm:tid(), P2P, miner_poc_handler, []) of {error, _Reason} -> lager:error("failed to dial challenger ~p (~p)", [P2P, _Reason]), [error|Acc]; @@ -238,7 +238,7 @@ send_witness(Data, OnionCompactKey, Time, RSSI, SNR, Frequency, Channel, DataRat false -> EncodedWitness = blockchain_poc_response_v1:encode(Witness1), P2P = libp2p_crypto:pubkey_bin_to_p2p(Challenger), - case miner_poc:dial_framed_stream(blockchain_swarm:tid(), P2P, []) of + case miner_poc:dial_framed_stream(blockchain_swarm:tid(), P2P, miner_poc_handler, []) of {error, _Reason} -> lager:warning("failed to dial challenger ~p: ~p", [P2P, _Reason]), timer:sleep(timer:seconds(30)), diff --git a/src/poc/miner_onion_server_light.erl b/src/poc/miner_onion_server_light.erl new file mode 100644 index 000000000..6861d4f90 --- /dev/null +++ b/src/poc/miner_onion_server_light.erl @@ -0,0 +1,381 @@ +%%%------------------------------------------------------------------- +%% @doc +%% == Miner Onion Server for light gateways == +%% no use of chain or ledger +%% @end +%%%------------------------------------------------------------------- +-module(miner_onion_server_light). + +-behavior(gen_server). + +-include("src/grpc/autogen/client/gateway_miner_client_pb.hrl"). +-include_lib("blockchain/include/blockchain_vars.hrl"). +-include_lib("blockchain/include/blockchain_caps.hrl"). + +%% ------------------------------------------------------------------ +%% API Function Exports +%% ------------------------------------------------------------------ +-export([ + start_link/1, + decrypt_p2p/1, + decrypt_radio/7, + retry_decrypt/11, + send_receipt/11, + send_witness/9, + region_params_update/2, + region_params/0 +]). + +-ifdef(TEST). +-include_lib("eunit/include/eunit.hrl"). +-define(TX_RAND_SLEEP, 1). +-define(TX_MIN_SLEEP, 0). +-define(TX_COUNT, 1). +-else. +-define(TX_RAND_SLEEP, 10000). +-define(TX_MIN_SLEEP, 0). +-define(TX_COUNT, 3). +-endif. + +-ifdef(EQC). +-export([try_decrypt/5]). +-endif. + +%% ------------------------------------------------------------------ +%% gen_server Function Exports +%% ------------------------------------------------------------------ +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2 +]). + +-record(state, { + compact_key :: ecc_compact:compact_key(), + ecdh_fun, + miner_name :: binary(), + sender :: undefined | {pid(), term()}, + packet_id = 0 :: non_neg_integer(), + region_params = undefined :: undefined | blockchain_region_param_v1:region_param_v1(), + region = undefined :: undefined | atom() +}). + +-define(BLOCK_RETRY_COUNT, 10). +-define(CHANNELS, [903.9, 904.1, 904.3, 904.5, 904.7, 904.9, 905.1, 905.3]). + +-type state() :: #state{}. + +%% ------------------------------------------------------------------ +%% API Function Definitions +%% ------------------------------------------------------------------ +start_link(Args) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []). + +-spec decrypt_p2p(binary()) -> ok. +decrypt_p2p(Onion) -> + gen_server:cast(?MODULE, {decrypt_p2p, Onion}). + +decrypt_radio(Packet, RSSI, SNR, Timestamp, Freq, Channel, Spreading) -> + gen_server:cast(?MODULE, {decrypt_radio, Packet, RSSI, SNR, Timestamp, Freq, Channel, Spreading}). + +retry_decrypt(Type, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate, Stream) -> + gen_server:cast(?MODULE, {retry_decrypt, Type, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate, Stream}). + +-spec region_params_update(atom(), [blockchain_region_param_v1:region_param_v1()]) -> ok. +region_params_update(Region, RegionParams) -> + gen_server:cast(?MODULE, {region_params_update, Region, RegionParams}). + +-spec region_params() -> ok. +region_params() -> + gen_server:call(?MODULE, region_params). + +-spec send_receipt(Data :: binary(), + OnionCompactKey :: libp2p_crypto:pubkey_bin(), + Type :: radio | p2p, + Time :: pos_integer(), + RSSI :: integer(), + SNR :: float(), + Frequency :: float(), + Channel :: non_neg_integer(), + DataRate :: binary(), + Power :: non_neg_integer(), + State :: state()) -> ok | {error, any()}. +send_receipt(Data, OnionCompactKey, Type, Time, RSSI, SNR, Frequency, Channel, DataRate, Power, _State) -> + case miner_lora_light:location_ok() of + true -> + lager:md([{poc_id, blockchain_utils:poc_id(OnionCompactKey)}]), + OnionKeyHash = crypto:hash(sha256, OnionCompactKey), + Address = blockchain_swarm:pubkey_bin(), + Receipt = case application:get_env(miner, data_aggregation_version, 3) of + 1 -> + blockchain_poc_receipt_v1:new(Address, Time, RSSI, Data, Type, SNR, Frequency); + 2 -> + blockchain_poc_receipt_v1:new(Address, Time, RSSI, Data, Type, SNR, Frequency, Channel, DataRate); + V when V >= 3 -> + R0 = blockchain_poc_receipt_v1:new(Address, Time, RSSI, Data, Type, SNR, Frequency, Channel, DataRate), + blockchain_poc_receipt_v1:tx_power(R0, Power); + _ -> + blockchain_poc_receipt_v1:new(Address, Time, RSSI, Data, Type) + end, + + %% TODO: put retry mechanism back in place + miner_poc_grpc_client_statem:send_report(receipt, Receipt, OnionKeyHash); + false -> + ok + end. + +-spec send_witness(Data :: binary(), + OnionCompactKey :: libp2p_crypto:pubkey_bin(), + Time :: pos_integer(), + RSSI :: integer(), + SNR :: float(), + Frequency :: float(), + Channel :: non_neg_integer(), + DataRate :: binary(), + State :: state()) -> ok. +send_witness(Data, OnionCompactKey, Time, RSSI, SNR, Frequency, Channel, DataRate, _State) -> + case miner_lora_light:location_ok() of + true -> + POCID = blockchain_utils:poc_id(OnionCompactKey), + lager:info([{poc_id, POCID}], + "sending witness at RSSI: ~p, Frequency: ~p, SNR: ~p", + [RSSI, Frequency, SNR]), + OnionKeyHash = crypto:hash(sha256, OnionCompactKey), + SelfPubKeyBin = blockchain_swarm:pubkey_bin(), + Witness = case application:get_env(miner, data_aggregation_version, 2) of + V when V >= 2 -> + %% Send channel + datarate with data_aggregation_version >= 2 + blockchain_poc_witness_v1:new(SelfPubKeyBin, Time, RSSI, Data, SNR, Frequency, Channel, DataRate); + 1 -> + blockchain_poc_witness_v1:new(SelfPubKeyBin, Time, RSSI, Data, SNR, Frequency); + _ -> + blockchain_poc_witness_v1:new(SelfPubKeyBin, Time, RSSI, Data) + end, + miner_poc_grpc_client_statem:send_report(witness, Witness, OnionKeyHash); + false -> + ok + end. + +%% ------------------------------------------------------------------ +%% gen_server Function Definitions +%% ------------------------------------------------------------------ +init(Args) -> + lager:info("init with ~p", [Args]), + {ok, Name} = erl_angry_purple_tiger:animal_name(libp2p_crypto:bin_to_b58(blockchain_swarm:pubkey_bin())), + MinerName = binary:replace(erlang:list_to_binary(Name), <<"-">>, <<" ">>, [global]), + State = #state{ + compact_key = blockchain_swarm:pubkey_bin(), + ecdh_fun = maps:get(ecdh_fun, Args), + miner_name = unicode:characters_to_binary(MinerName, utf8) + }, + {ok, State}. + +handle_call(region_params, _From, #state{region_params = Params}=State) -> + {reply, {ok, Params}, State}; +handle_call(compact_key, _From, #state{compact_key=CK}=State) when CK /= undefined -> + {reply, {ok, CK}, State}; +handle_call(_Msg, _From, State) -> + {reply, ok, State}. + +handle_cast({region_params_update, Region, RegionParams}, State) -> + lager:info("updating region params. Region: ~p, Params: ~p", [Region, RegionParams]), + {noreply, State#state{region = Region, region_params = RegionParams}}; +handle_cast({decrypt_p2p, _Payload}, #state{region_params = undefined} = State) -> + lager:warning("dropping p2p challenge packet as no region params data", []), + {noreply, State}; +handle_cast({decrypt_p2p, <>}, State) -> + %%TODO - rssi, freq, snr, channel and datarate were originally undefined + %% but this breaks the in use PB encoder, so defaulted to values below + NewState = decrypt(p2p, IV, OnionCompactKey, Tag, CipherText, 0, 0.0, 0.0, 0, [12], State), + {noreply, NewState}; +handle_cast({decrypt_radio, _Payload}, #state{region_params = undefined} = State) -> + lager:warning("dropping radio challenge packet as no region params data", []), + {noreply, State}; +handle_cast({decrypt_radio, <>, + RSSI, SNR, _Timestamp, Frequency, Channel, DataRate}, State) -> + NewState = decrypt(radio, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate, State), + {noreply, NewState}; +handle_cast({retry_decrypt, Type, _IV, _OnionCompactKey, _Tag, _CipherText, _RSSI, _SNR, _Frequency, _Channel, _DataRate}, #state{region_params = undefined} = State) -> + lager:warning("dropping retry ~p challenge packet as no region params data", [Type]), + {noreply, State}; +handle_cast({retry_decrypt, Type, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate}, State) -> + NewState = decrypt(Type, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate, State), + {noreply, NewState}; +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(_Msg, State) -> + lager:warning("unhandled Msg: ~p", [_Msg]), + {noreply, State}. + +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ +decrypt(Type, IV, OnionCompactKey, Tag, CipherText, RSSI, SNR, Frequency, Channel, DataRate, #state{ecdh_fun=ECDHFun, region_params = RegionParams, region = Region}=State) -> + POCID = blockchain_utils:poc_id(OnionCompactKey), + OnionKeyHash = crypto:hash(sha256, OnionCompactKey), + lager:info("attempting decrypt of type ~p for onion key hash ~p", [Type, OnionKeyHash]), + NewState = case try_decrypt(IV, OnionCompactKey, OnionKeyHash, Tag, CipherText, ECDHFun) of + {error, fail_decrypt} -> + lager:info([{poc_id, POCID}], + "sending witness at RSSI: ~p, Frequency: ~p, SNR: ~p", + [RSSI, Frequency, SNR]), + _ = erlang:spawn( + ?MODULE, + send_witness, + [crypto:hash(sha256, <>), + OnionCompactKey, + os:system_time(nanosecond), RSSI, SNR, Frequency, Channel, DataRate, State] + ), + lager:info([{poc_id, POCID}], "could not decrypt packet received via ~p: treating as a witness", [Type]), + State; + {ok, Data, NextPacket} -> + lager:info([{poc_id, POCID}], "decrypted a layer: ~w received via ~p~n", [Data, Type]), + %% fingerprint with a blank key + Packet = longfi:serialize(<<0:128/integer-unsigned-little>>, longfi:new(monolithic, 0, 1, 0, NextPacket, #{})), + %% deterministally pick a channel based on the layerdata + <> = Data, + %% TODO calculate some kind of delay here + case miner_lora_light:location_ok() of + true -> + %% the fun below will be executed by miner_lora:send and supplied with the localised lists of channels + ChannelSelectorFun = fun(FreqList) -> lists:nth((IntData rem length(FreqList)) + 1, FreqList) end, + + %% NOTE: poc version used to be derived from ledger + %% as we wont be following the chain, cant use that + case application:get_env(miner, poc_version, 11) of + POCVersion when POCVersion >= 11 -> + %% send receipt with poc_v11 updates + case RegionParams of + undefined -> + %% continue doing the old way + %% the fun below will be executed by miner_lora:send and supplied with the localised lists of channels + Spreading = spreading(Region, erlang:byte_size(Packet)), + TxPower = tx_power(Region), + erlang:spawn(fun() -> miner_lora_light:send_poc(Packet, immediate, ChannelSelectorFun, Spreading, TxPower) end), + erlang:spawn(fun() -> ?MODULE:send_receipt(Data, OnionCompactKey, Type, os:system_time(nanosecond), + RSSI, SNR, Frequency, Channel, DataRate, TxPower, State) end); + _ -> + case blockchain_region_params_v1:get_spreading(RegionParams, erlang:byte_size(Packet)) of + {error, Why} -> + lager:error("unable to get spreading, reason: ~p", [Why]), + ok; + {ok, Spreading} -> + case tx_power(Region, State) of + {error, Reason} -> + %% could not calculate txpower, don't do anything + lager:error("unable to get tx_power, reason: ~p", [Reason]), + ok; + {ok, TxPower, EffectiveTxPower, AssertedGain} -> + %% TxPower is the power we tell the radio to transmit at + %% and EffectiveTxPower is the power we expect to radiate at the + %% antenna. + BW = blockchain_region_params_v1:get_bandwidth(RegionParams), + DR = datarate(Spreading, BW), + case miner_lora_light:send_poc(Packet, immediate, ChannelSelectorFun, DR, TxPower) of + ok -> + lager:info("sending receipt with observed power: ~p with radio power ~p", [EffectiveTxPower, TxPower]), + ?MODULE:send_receipt(Data, OnionCompactKey, Type, os:system_time(nanosecond), + RSSI, SNR, Frequency, Channel, DataRate, EffectiveTxPower, State); + {warning, {tx_power_corrected, CorrectedPower}} -> + %% Corrected power never takes into account antenna gain config in pkt forwarder so we + %% always add it back here + lager:warning("tx_power_corrected! original_power: ~p, corrected_power: ~p, with gain ~p; sending receipt with power ~p", + [TxPower, CorrectedPower, AssertedGain, CorrectedPower + AssertedGain]), + ?MODULE:send_receipt(Data, OnionCompactKey, Type, os:system_time(nanosecond), + RSSI, SNR, Frequency, Channel, DataRate, CorrectedPower + AssertedGain, State); + {warning, {unknown, Other}} -> + %% This should not happen + lager:warning("What is this? ~p", [Other]), + ok; + {error, Reason} -> + lager:error("unable to send_poc, reason: ~p", [Reason]), + ok + end + end + end + end + end; + false -> + ok + end, + State; + {error, Reason} -> + lager:info([{poc_id, POCID}], "could not decrypt packet received via ~p: Reason, discarding", [Type, Reason]), + State + end, + NewState. + +-spec try_decrypt(binary(), binary(), binary(), binary(), binary(), function()) -> poc_not_found | {ok, binary(), binary()} | {error, any()}. +try_decrypt(IV, OnionCompactKey, _OnionKeyHash, Tag, CipherText, ECDHFun) -> + try blockchain_poc_packet_v2:decrypt(<>, ECDHFun) of + error -> + {error, fail_decrypt}; + {Payload, NextLayer} -> + {ok, Payload, NextLayer} + catch _A:_B:_C -> + lager:error("A: ~p, B: ~p, C: ~p", [_A, _B, _C]), + {error, {_A, _B}} + end. +%% end. + +-spec tx_power(Region :: atom(), State :: state()) -> {ok, pos_integer(), pos_integer(), non_neg_integer()} | {error, any()}. +tx_power(Region, #state{compact_key=_CK, region_params = RegionParams}) -> + try + MaxEIRP = lists:max([blockchain_region_param_v1:max_eirp(R) || R <- RegionParams]), + %% if the antenna gain is accounted for in the packet forwarder config file + %% set this to false + %% ConsiderTxGain = application:get_env(miner, consider_tx_gain, true), + %% TODO - revisit as we are dropping the GW gain from the ledger + %% do we need an API to pull this from a validator ? + EIRP = trunc(MaxEIRP/10), + lager:info("Region: ~p, Gain: ~p, MaxEIRP: ~p, EIRP: ~p", + [Region, undefined, MaxEIRP/10, EIRP]), + {ok, EIRP, EIRP, 0} + catch _Class:_Error -> + {error, failed_to_get_tx_power} + end. + +-spec datarate(Spreading :: atom(), BW :: pos_integer()) -> string(). +datarate(Spreading, BW) -> + BWInKhz = trunc(BW / 1000), + atom_to_list(Spreading) ++ "BW" ++ integer_to_list(BWInKhz). + +-spec tx_power(atom()) -> pos_integer(). +tx_power('EU868') -> + 14; +tx_power('US915') -> + 27; +tx_power(_) -> + 27. + +-spec spreading(Region :: atom(), + Len :: pos_integer()) -> string(). +spreading('EU868', L) when L < 65 -> + "SF12BW125"; +spreading('EU868', L) when L < 129 -> + "SF9BW125"; +spreading('EU868', L) when L < 238 -> + "SF8BW125"; +spreading(_, L) when L < 25 -> + "SF10BW125"; +spreading(_, L) when L < 67 -> + "SF9BW125"; +spreading(_, L) when L < 139 -> + "SF8BW125"; +spreading(_, _) -> + "SF7BW125". + +-ifdef(EQC). +-spec try_decrypt(binary(), binary(), binary(), binary(), function()) -> {ok, binary(), binary()} | {error, any()}. +try_decrypt(IV, OnionCompactKey, Tag, CipherText, ECDHFun) -> + OnionKeyHash = crypto:hash(sha256, OnionCompactKey), + try_decrypt(IV, OnionCompactKey, OnionKeyHash, Tag, CipherText, ECDHFun). +-endif. diff --git a/src/poc/miner_poc.erl b/src/poc/miner_poc.erl index cbe846f45..9a4ee41b6 100644 --- a/src/poc/miner_poc.erl +++ b/src/poc/miner_poc.erl @@ -6,8 +6,8 @@ -module(miner_poc). -export([ - dial_framed_stream/3, - add_stream_handler/1 + dial_framed_stream/4, + add_stream_handler/2 ]). -define(POC_VERSION, "miner_poc/1.0.0"). @@ -17,13 +17,13 @@ %% Dial PoC stream %% @end %%-------------------------------------------------------------------- --spec dial_framed_stream(ets:tab(), string(), list()) -> {ok, pid()} | {error, any()} | ignore. -dial_framed_stream(SwarmTID, Address, Args) -> +-spec dial_framed_stream(ets:tab(), string(), atom(), list()) -> {ok, pid()} | {error, any()} | ignore. +dial_framed_stream(SwarmTID, Address, HandlerMod, Args) -> libp2p_swarm:dial_framed_stream( SwarmTID, Address, ?POC_VERSION, - miner_poc_handler, + HandlerMod, Args ). @@ -31,10 +31,11 @@ dial_framed_stream(SwarmTID, Address, Args) -> %% @doc %% @end %%-------------------------------------------------------------------- --spec add_stream_handler(pid() | ets:tab()) -> ok. -add_stream_handler(SwarmTID) -> +-spec add_stream_handler(pid() | ets:tab(), atom()) -> ok. +add_stream_handler(SwarmTID, HandlerMod) -> libp2p_swarm:add_stream_handler( SwarmTID, ?POC_VERSION, - {libp2p_framed_stream, server, [miner_poc_handler, self(), SwarmTID]} + {libp2p_framed_stream, server, [HandlerMod, self(), SwarmTID]} ). + diff --git a/src/poc/miner_poc_grpc_client_handler.erl b/src/poc/miner_poc_grpc_client_handler.erl new file mode 100644 index 000000000..7bc17360e --- /dev/null +++ b/src/poc/miner_poc_grpc_client_handler.erl @@ -0,0 +1,205 @@ +%% +%% grpc client handler for poc streamed msgs - WIP +%% +-module(miner_poc_grpc_client_handler). + +-include("src/grpc/autogen/client/gateway_miner_client_pb.hrl"). + +%% ------------------------------------------------------------------ +%% Stream Exports +%% ------------------------------------------------------------------ +-export([ + init/0, + handle_msg/2, + handle_info/2 +]). + +%% ------------------------------------------------------------------ +%% API Function Exports +%% ------------------------------------------------------------------ +-ifdef(TEST). +-export([ + connect/1 +]). +-endif. + +-export([ + connect/3, + poc_stream/3, + config_update_stream/1, + region_params_update_stream/3 +]). + +init()-> + []. + +-ifdef(TEST). +connect(PeerP2P) -> + {ok, _PubKey, _SigFun, _} = blockchain_swarm:keys(), + %% get the test specific grpc port for the peer + %% ( which is going to be the libp2p port + 1000 ) + %% see miner_ct_utils for more info + {ok, PeerGrpcPort} = p2p_port_to_grpc_port(PeerP2P), + connect(PeerP2P, "127.0.0.1", PeerGrpcPort). +-endif. + +-spec connect(libp2p_crypto:peer_id(), string(), non_neg_integer()) -> {ok, grpc_client_custom:connection()} | {error, any()}. +connect(PeerP2P, PeerIP, GRPCPort) -> + try + lager:debug("connecting over grpc to peer ~p via IP ~p and port ~p", [PeerP2P, PeerIP, GRPCPort]), + {ok, Connection} = grpc_client_custom:connect(tcp, PeerIP, GRPCPort), + {ok, Connection} + catch _Error:_Reason:_Stack -> + lager:warning("*** failed to connect over grpc to peer ~p. Reason ~p Stack ~p", [PeerP2P, _Reason, _Stack]), + {error, failed_to_connect_to_grpc_peer} + end. + +-spec poc_stream(grpc_client_custom:connection(), libp2p_crypto:pubkey_bin(), function()) -> {ok, pid()} | {error, any()}. +poc_stream(Connection, PubKeyBin, SigFun)-> + try + {ok, Stream} = grpc_client_stream_custom:new( + Connection, + 'helium.gateway', + stream_poc, + gateway_miner_client_pb, + [{type, stream}], + ?MODULE), + lager:debug("*** new poc stream established with pid ~p", [Stream]), + %% subscribe to poc updates + Req = #gateway_poc_req_v1_pb{address = PubKeyBin, signature = <<>>}, + ReqEncoded = gateway_miner_client_pb:encode_msg(Req, gateway_poc_req_v1_pb), + ReqSigned = Req#gateway_poc_req_v1_pb{signature = SigFun(ReqEncoded)}, + ok = grpc_client_custom:send(Stream, ReqSigned), + {ok, Stream} + catch _Error:_Reason:_Stack -> + lager:warning("*** failed to connect to poc stream on connection ~p. Reason ~p Stack ~p", [Connection, _Reason, _Stack]), + {error, stream_failed} + end. + +-spec config_update_stream(grpc_client_custom:connection()) -> {ok, pid()} | {error, any()}. +config_update_stream(Connection)-> + try + {ok, Stream} = grpc_client_stream_custom:new( + Connection, + 'helium.gateway', + config_update, + gateway_miner_client_pb, + [{type, stream}], + ?MODULE), + %% subscribe to config updates + Req = #gateway_config_update_req_v1_pb{}, + ok = grpc_client_custom:send(Stream, Req), + {ok, Stream} + catch _Error:_Reason:_Stack -> + lager:warning("*** failed to connect to config_update stream on connection ~p. Reason ~p Stack ~p", [Connection, _Reason, _Stack]), + {error, stream_failed} + end. + +-spec region_params_update_stream(grpc_client_custom:connection(), libp2p_crypto:pubkey_bin(), function()) -> {ok, pid()} | {error, any()}. +region_params_update_stream(Connection, PubKeyBin, SigFun)-> + try + {ok, Stream} = grpc_client_stream_custom:new( + Connection, + 'helium.gateway', + region_params_update, + gateway_miner_client_pb, + [{type, stream}], + ?MODULE), + %% subscribe to region params updates + Req = #gateway_region_params_update_req_v1_pb{address = PubKeyBin, signature = <<>>}, + ReqEncoded = gateway_miner_client_pb:encode_msg(Req, gateway_region_params_update_req_v1_pb), + ReqSigned = Req#gateway_region_params_update_req_v1_pb{signature = SigFun(ReqEncoded)}, + ok = grpc_client_custom:send(Stream, ReqSigned), + {ok, Stream} + catch _Error:_Reason:_Stack -> + lager:warning("*** failed to connect to region_params_update stream on connection ~p. Reason ~p Stack ~p", [Connection, _Reason, _Stack]), + {error, stream_failed} + end. + +%% TODO: handle headers +handle_msg({headers, _Headers}, StreamState) -> + lager:debug("*** grpc client ignoring headers ~p", [_Headers]), + StreamState; +handle_msg({data, #gateway_resp_v1_pb{msg = {poc_challenge_resp, ChallengeNotification}, height = NotificationHeight, signature = ChallengerSig}} = Msg, StreamState) -> + lager:debug("grpc client received gateway_poc_challenge_notification_resp_v1 msg ~p", [Msg]), + #gateway_poc_challenge_notification_resp_v1_pb{challenger = #routing_address_pb{uri = URI, pub_key = PubKeyBin}, block_hash = BlockHash, onion_key_hash = OnionKeyHash} = ChallengeNotification, + Self = self(), + F = fun() -> + TargetRes = miner_poc_grpc_client_statem:check_target(binary_to_list(URI), PubKeyBin, OnionKeyHash, BlockHash, NotificationHeight, ChallengerSig), + lager:info("check target result for key ~p: ~p",[OnionKeyHash, TargetRes]), + case TargetRes of + {ok, Result, _Details} -> + handle_check_target_resp(Result); + {error, <<"queued_poc">>} -> + erlang:send_after(5000, Self, {retry_check_target, 1, Msg}); + {error, _Reason, _Details} -> + ok; + {error, _Reason} -> + ok + end + end, + spawn(F), + StreamState; +handle_msg({data, #gateway_resp_v1_pb{msg = {config_update_streamed_resp, Payload}, height = _NotificationHeight, signature = _ChallengerSig}} = _Msg, StreamState) -> + lager:debug("grpc client received config_update_streamed_resp msg ~p", [_Msg]), + #gateway_config_update_streamed_resp_v1_pb{keys = UpdatedKeys} = Payload, + miner_poc_grpc_client_statem:update_config(UpdatedKeys), + StreamState; +handle_msg({data, #gateway_resp_v1_pb{msg = {region_params_streamed_resp, Payload}, height = _NotificationHeight, signature = _ChallengerSig}} = _Msg, StreamState) -> + lager:debug("grpc client received region_params_streamed_resp msg ~p", [_Msg]), + #gateway_region_params_streamed_resp_v1_pb{region = Region, params =Params} = Payload, + #blockchain_region_params_v1_pb{region_params = RegionParams} = Params, + miner_lora_light:region_params_update(Region, RegionParams), + miner_onion_server_light:region_params_update(Region, RegionParams), + StreamState; +handle_msg({data, _Msg}, StreamState) -> + lager:warning("grpc client received unexpected msg ~p",[_Msg]), + StreamState. + +handle_info({retry_check_target, Attempt, Msg}, StreamState) when Attempt =< 3 -> + lager:debug("retry_check_target with attempt ~p for msg: ~p", [Attempt, Msg]), + {data, #gateway_resp_v1_pb{msg = {poc_challenge_resp, ChallengeNotification}, height = NotificationHeight, signature = ChallengerSig}} = Msg, + #gateway_poc_challenge_notification_resp_v1_pb{challenger = #routing_address_pb{uri = URI, pub_key = PubKeyBin}, block_hash = BlockHash, onion_key_hash = OnionKeyHash} = ChallengeNotification, + Self = self(), + F = fun()-> + TargetRes = miner_poc_grpc_client_statem:check_target(binary_to_list(URI), PubKeyBin, OnionKeyHash, BlockHash, NotificationHeight, ChallengerSig), + lager:info("check target result retry ~p for key ~p: ~p",[Attempt, OnionKeyHash, TargetRes]), + case TargetRes of + {ok, Result, _Details} -> + handle_check_target_resp(Result); + {error, <<"queued_poc">>} -> + erlang:send_after(5000, Self, {retry_check_target, Attempt +1, Msg}); + {error, _Reason, _Details} -> + ok; + {error, _Reason} -> + ok + end + end, + spawn(F), + StreamState; +handle_info(_Msg, StreamState) -> + lager:warning("grpc client unhandled msg: ~p", [_Msg]), + StreamState. + +%% ------------------------------------------------------------------ +%% Internal functions +%% ------------------------------------------------------------------ +-spec handle_check_target_resp(#gateway_poc_check_challenge_target_resp_v1_pb{})-> ok. +handle_check_target_resp(#gateway_poc_check_challenge_target_resp_v1_pb{target = true, onion = Onion} = _ChallengeResp) -> + ok = miner_onion_server_light:decrypt_p2p(Onion); +handle_check_target_resp(#gateway_poc_check_challenge_target_resp_v1_pb{target = false} = _ChallengeResp) -> + ok. + +-ifdef(TEST). +p2p_port_to_grpc_port(PeerAddr)-> + SwarmTID = blockchain_swarm:tid(), + Peerbook = libp2p_swarm:peerbook(SwarmTID), + {ok, _ConnAddr, {Transport, _TransportPid}} = libp2p_transport:for_addr(SwarmTID, PeerAddr), + {ok, PeerPubKeyBin} = Transport:p2p_addr(PeerAddr), + {ok, PeerInfo} = libp2p_peerbook:get(Peerbook, PeerPubKeyBin), + ListenAddrs = libp2p_peer:listen_addrs(PeerInfo), + [H | _ ] = libp2p_transport:sort_addrs(SwarmTID, ListenAddrs), + [_, _, _IP,_, Port] = _Full = re:split(H, "/"), + lager:info("*** peer p2p port ~p", [Port]), + {ok, list_to_integer(binary_to_list(Port)) + 1000}. +-endif. diff --git a/src/poc/miner_poc_grpc_client_statem.erl b/src/poc/miner_poc_grpc_client_statem.erl new file mode 100644 index 000000000..f82355b09 --- /dev/null +++ b/src/poc/miner_poc_grpc_client_statem.erl @@ -0,0 +1,540 @@ +-module(miner_poc_grpc_client_statem). +-behavior(gen_statem). + +%%-dialyzer({nowarn_function, process_unary_response/1}). +%%-dialyzer({nowarn_function, handle_info/2}). +%%-dialyzer({nowarn_function, build_config_req/1}). + + +-include("src/grpc/autogen/client/gateway_miner_client_pb.hrl"). +-include_lib("public_key/include/public_key.hrl"). +-include_lib("helium_proto/include/blockchain_txn_vars_v1_pb.hrl"). + +%% ------------------------------------------------------------------ +%% API Function Exports +%% ------------------------------------------------------------------ +-export([ + start_link/0, + stop/0, + connection/0, + check_target/6, + send_report/3, + send_report/4, + update_config/1 +]). + +%% ------------------------------------------------------------------ +%% gen_statem Function Exports +%% ------------------------------------------------------------------ +-export([ + init/1, + callback_mode/0, + terminate/2 +]). + +%% ------------------------------------------------------------------ +%% record defs and macros +%% ------------------------------------------------------------------ +-record(data, { + self_pub_key_bin, + self_sig_fun, + connection, + connection_pid, + conn_monitor_ref, + stream_poc_pid, + stream_poc_monitor_ref, + stream_config_update_pid, + stream_config_update_monitor_ref, + stream_region_params_update_pid, + stream_region_params_update_monitor_ref, + val_p2p_addr, + val_public_ip, + val_grpc_port +}). + +%% these are config vars the miner is interested in, if they change we +%% will want to get their latest values +-define(CONFIG_VARS, ["poc_version", "data_aggregation_version"]). + +%% delay between validator reconnects attempts +-define(VALIDATOR_RECONNECT_DELAY, 5000). +%% delay between stream reconnects attempts +-define(STREAM_RECONNECT_DELAY, 5000). + +-type data() :: #data{}. + +%% ------------------------------------------------------------------ +%% gen_statem callbacks Exports +%% ------------------------------------------------------------------ +-export([ + setup/3, + connected/3 +]). + +%% ------------------------------------------------------------------ +%% API Definitions +%% ------------------------------------------------------------------ +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_statem:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec stop() -> ok. +stop() -> + gen_statem:stop(?MODULE). + +-spec connection() -> {ok, grpc_client_custom:connection()}. +connection() -> + gen_statem:call(?MODULE, connection, infinity). + +-spec check_target(string(), libp2p_crypto:pubkey_bin(), binary(), binary(), non_neg_integer(), libp2p_crypto:signature()) -> {error, any()} | {error, any(), map()} | {ok, any(), map()}. +check_target(ChallengerURI, ChallengerPubKeyBin, OnionKeyHash, BlockHash, NotificationHeight, ChallengerSig) -> + SelfPubKeyBin = blockchain_swarm:pubkey_bin(), + {ok, _, SelfSigFun, _} = blockchain_swarm:keys(), + %% split the URI into its IP and port parts + #{host := IP, port := Port, scheme := _Scheme} = uri_string:parse(ChallengerURI), + TargetIP = maybe_override_ip(IP), + %% build the request + Req = build_check_target_req(ChallengerPubKeyBin, OnionKeyHash, + BlockHash, NotificationHeight, ChallengerSig, SelfPubKeyBin, SelfSigFun), + send_grpc_unary_req(TargetIP, Port, Req, 'check_challenge_target'). + +-spec send_report(witness | receipt, any(), binary()) -> ok. +send_report(ReportType, Report, OnionKeyHash)-> + gen_statem:cast(?MODULE, {send_report, ReportType, Report, OnionKeyHash, 5}). + +-spec send_report(witness | receipt, any(), binary(), non_neg_integer()) -> ok. +send_report(ReportType, Report, OnionKeyHash, Retries)-> + gen_statem:cast(?MODULE, {send_report, ReportType, Report, OnionKeyHash, Retries}). + +-spec update_config([string()]) -> ok. +update_config(UpdatedKeys)-> + gen_statem:cast(?MODULE, {update_config, UpdatedKeys}). + +%% ------------------------------------------------------------------ +%% gen_statem Definitions +%% ------------------------------------------------------------------ +init(_Args) -> + lager:info("starting ~p", [?MODULE]), + erlang:process_flag(trap_exit, true), + SelfPubKeyBin = blockchain_swarm:pubkey_bin(), + {ok, _, SigFun, _} = blockchain_swarm:keys(), + {ok, setup, #data{self_pub_key_bin = SelfPubKeyBin, self_sig_fun = SigFun}}. + +callback_mode() -> [state_functions,state_enter]. + +terminate(_Reason, Data) -> + lager:info("terminating with reason ~p", [_Reason]), + _ = disconnect(Data), + ok. + +%% ------------------------------------------------------------------ +%% gen_statem callbacks +%% ------------------------------------------------------------------ +setup(enter, _OldState, Data)-> + %% each time we enter connecting_validator state we assume we are initiating a new + %% connection to a durable validators + %% thus ensure all streams are disconnected + ok = disconnect(Data), + erlang:send_after(?VALIDATOR_RECONNECT_DELAY, self(), find_validator), + {keep_state, + Data#data{val_public_ip = undefined, val_grpc_port = undefined, val_p2p_addr = undefined}}; +setup(info, find_validator, Data) -> + %% ask a random seed validator for the address of a 'proper' validator + %% we will then use this as our default durable validator + case find_validator() of + {error, _Reason} -> + {repeat_state, Data}; + {ok, ValIP, ValPort, ValP2P} -> + lager:info("*** Found validator with ip: ~p, port: ~p, addr: ~p", [ValIP, ValPort, ValP2P]), + {keep_state, + Data#data{val_public_ip = ValIP, val_grpc_port = ValPort, val_p2p_addr = ValP2P}, + [{next_event, info, connect_validator}]} + end; +setup(info, connect_validator, #data{val_public_ip = ValIP, val_grpc_port = ValGRPCPort, val_p2p_addr = ValP2P} = Data) -> + %% connect to our durable validator + case connect_validator(ValP2P, ValIP, ValGRPCPort) of + {ok, Connection} -> + #{http_connection := ConnectionPid} = Connection, + M = erlang:monitor(process, ConnectionPid), + {keep_state, + Data#data{connection = Connection, connection_pid = ConnectionPid, conn_monitor_ref = M}, + [{next_event, info, fetch_config}]}; + {error, _} -> + {repeat_state, Data} + end; +setup(info, fetch_config, #data{val_public_ip = ValIP, val_grpc_port = ValGRPCPort} = Data) -> + %% get necessary config data from our durable validator + case fetch_config(?CONFIG_VARS, ValIP, ValGRPCPort) of + ok -> + {keep_state, Data, [{next_event, info, connect_poc_stream}]}; + {error, _} -> + {repeat_state, Data} + end; +setup(info, connect_poc_stream, #data{connection = Connection, self_pub_key_bin = SelfPubKeyBin, self_sig_fun = SelfSigFun} = Data) -> + %% connect any required streams + %% we are interested in three streams, poc events, config change events, region params updates + case connect_stream_poc(Connection, SelfPubKeyBin, SelfSigFun) of + {ok, StreamPid} -> + M = erlang:monitor(process, StreamPid), + lager:info("monitoring stream poc pid ~p with ref ~p", [StreamPid, M]), + {keep_state, + Data#data{stream_poc_monitor_ref = M, stream_poc_pid = StreamPid}, + [{next_event, info, connect_config_stream}]}; + {error, _} -> + {repeat_state, Data} + end; +setup(info, connect_config_stream, #data{connection = Connection} = Data) -> + %% connect any required streams + %% we are interested in three streams, poc events, config change events, region params updates + case connect_stream_config_update(Connection) of + {ok, StreamPid} -> + M = erlang:monitor(process, StreamPid), + {keep_state, + Data#data{stream_config_update_monitor_ref = M, stream_config_update_pid = StreamPid}, + [{next_event, info, connect_region_params_stream}]}; + {error, _} -> + {repeat_state, Data} + end; +setup(info, connect_region_params_stream, #data{connection = Connection, self_pub_key_bin = SelfPubKeyBin, self_sig_fun = SelfSigFun} = Data) -> + %% connect any required streams + %% we are interested in three streams, poc events, config change events, region params updates + case connect_stream_region_params_update(Connection, SelfPubKeyBin, SelfSigFun) of + {ok, StreamPid} -> + M = erlang:monitor(process, StreamPid), + {next_state, connected, + Data#data{stream_region_params_update_monitor_ref = M, stream_region_params_update_pid = StreamPid}}; + {error, _} -> + {repeat_state, Data} + end; +setup(info, {'DOWN', _Ref, process, _, _Reason} = Event, Data) -> + lager:info("got down event ~p", [Event]), + %% handle down msgs, such as from our streams or validator connection + handle_down_event(setup, Event, Data); +setup({call, From}, _Msg, Data) -> + %% return an error for any call msgs whilst in setup state + {keep_state, Data, [{reply, From, {error, grpc_client_not_ready}}]}; +setup(_EventType, _Msg, Data) -> + %% ignore ev things else whist in setup state + lager:info("unhandled event whilst in ~p state: Type: ~p, Msg: ~p", [setup, _EventType, _Msg]), + {keep_state, Data}. + +connected(enter, _OldState, Data)-> + {keep_state, Data}; +connected(cast, {send_report, ReportType, Report, OnionKeyHash, RetryAttempts}, #data{connection = Connection, self_sig_fun = SelfSigFun, self_pub_key_bin = SelfPubKeyBin} = Data) -> + lager:info("send_report ~p with onionkeyhash ~p: ~p", [ReportType, OnionKeyHash, Report]), + ok = send_report(ReportType, Report, OnionKeyHash, SelfPubKeyBin, SelfSigFun, Connection, RetryAttempts), + {keep_state, Data}; +connected(cast, {update_config, Keys}, #data{val_public_ip = ValIP, val_grpc_port = ValPort} = Data) -> + lager:info("update_config for keys ~p", [Keys]), + _ = fetch_config(Keys, ValIP, ValPort), + {keep_state, Data}; +connected({call, From}, connection, #data{connection = Connection} = Data) -> + {keep_state, Data, [{reply, From, {ok, Connection}}]}; +connected(info, {'DOWN', _Ref, process, _, _Reason} = Event, Data) -> + lager:info("got down event ~p", [Event]), + %% handle down msgs, such as from our streams or validator connection + handle_down_event(connected, Event, Data); +connected(_EventType, _Msg, Data)-> + lager:info("unhandled event whilst in ~p state: Type: ~p, Msg: ~p", [connected, _EventType, _Msg]), + {keep_state, Data}. + +%% ------------------------------------------------------------------ +%% Internal functions +%% ------------------------------------------------------------------ +-spec disconnect(data())-> ok. +disconnect(_Data = #data{connection = undefined}) -> + ok; +disconnect(_Data = #data{connection = Connection}) -> + catch _ = grpc_client_custom:stop_connection(Connection), + ok. + +-spec find_validator() -> {error, any()} | {ok, string(), pos_integer(), string()}. +find_validator()-> + case application:get_env(miner, seed_validators) of + {ok, SeedValidators} -> + {_SeedP2PAddr, SeedValIP, SeedValGRPCPort} = lists:nth(rand:uniform(length(SeedValidators)), SeedValidators), + Req = build_validators_req(1), + case send_grpc_unary_req(SeedValIP, SeedValGRPCPort, Req, 'validators') of + {ok, #gateway_validators_resp_v1_pb{result = []}, _ReqDetails} -> + %% no routes, retry in a bit + lager:warning("failed to find any validator routing from seed validator ~p", [SeedValIP]), + {error, no_validators}; + {ok, #gateway_validators_resp_v1_pb{result = Routing}, _ReqDetails} -> + %% resp will contain the payload 'gateway_validators_resp_v1_pb' + [#routing_address_pb{pub_key = DurableValPubKeyBin, uri = DurableValURI}] = Routing, + DurableValP2PAddr = libp2p_crypto:pubkey_bin_to_p2p(DurableValPubKeyBin), + #{host := DurableValIP, port := DurableValGRPCPort} = uri_string:parse(binary_to_list(DurableValURI)), + {ok, DurableValIP, DurableValGRPCPort, DurableValP2PAddr}; + {error, Reason} = _Error -> + lager:warning("request to validator failed: ~p", [_Error]), + {error, Reason} + end; + _ -> + lager:warning("failed to find seed validators", []), + {error, find_validator_request_failed} + end. + +-spec connect_validator(string(), string(), pos_integer()) -> {error, any()} | {ok, grpc_client_custom:connection()}. +connect_validator(ValAddr, ValIP, ValPort) -> + try + lager:info("connecting to validator, p2paddr: ~p, ip: ~p, port: ~p", [ValAddr, ValIP, ValPort]), + case miner_poc_grpc_client_handler:connect(ValAddr, maybe_override_ip(ValIP), ValPort) of + {error, _} = Error -> + Error; + {ok, Connection} = Res-> + lager:info("successfully connected to validator via connection ~p", [Connection]), + Res + end + catch _Class:_Error:_Stack -> + lager:info("failed to connect to validator, will try again in a bit. Reason: ~p, Details: ~p, Stack: ~p", [_Class, _Error, _Stack]), + {error, connect_validator_failed} + end. + +-spec connect_stream_poc(grpc_client_custom:connection(), libp2p_crypto:pubkey_bin(), function()) -> {error, any()} | {ok, pid()}. +connect_stream_poc(Connection, SelfPubKeyBin, SelfSigFun) -> + lager:debug("establishing POC stream on connection ~p", [Connection]), + case miner_poc_grpc_client_handler:poc_stream(Connection, SelfPubKeyBin, SelfSigFun) of + {error, _Reason} = Error-> + Error; + {ok, Stream} = Res-> + lager:info("successfully connected poc stream ~p on connection ~p", [Stream, Connection]), + Res + end. + +-spec connect_stream_config_update(grpc_client_custom:connection()) -> {error, any()} | {ok, pid()}. +connect_stream_config_update(Connection) -> + lager:debug("establishing config_update stream on connection ~p", [Connection]), + case miner_poc_grpc_client_handler:config_update_stream(Connection) of + {error, _Reason} = Error-> + Error; + {ok, Stream} = Res-> + lager:info("successfully connected config update stream ~p on connection ~p", [Stream, Connection]), + Res + end. + +-spec connect_stream_region_params_update(grpc_client_custom:connection(), libp2p_crypto:pubkey_bin(), function()) -> {error, any()} | {ok, pid()}. +connect_stream_region_params_update(Connection, SelfPubKeyBin, SelfSigFun) -> + lager:debug("establishing region_params_update stream on connection ~p", [Connection]), + case miner_poc_grpc_client_handler:region_params_update_stream(Connection, SelfPubKeyBin, SelfSigFun) of + {error, _Reason} = Error-> + Error; + {ok, Stream} = Res-> + lager:info("successfully connected region params update stream ~p on connection ~p", [Stream, Connection]), + Res + end. + +-spec send_report(witness | receipt, any(), binary(), libp2p_crypto:pubkey_bin(), function(), grpc_client_custom:connection(), non_neg_integer()) -> ok. +send_report(_ReportType, _Report, _OnionKeyHash, _SelfPubKeyBin, _SigFun, _Connection, 0) -> + ok; +send_report(receipt = ReportType, Report, OnionKeyHash, _SelfPubKeyBin, SigFun, Connection, RetryAttempts) -> + EncodedReceipt = gateway_miner_client_pb:encode_msg(Report#blockchain_poc_receipt_v1_pb{signature = <<>>}, blockchain_poc_receipt_v1_pb), + SignedReceipt = Report#blockchain_poc_receipt_v1_pb{signature = SigFun(EncodedReceipt)}, + Req = #gateway_poc_report_req_v1_pb{ + onion_key_hash = OnionKeyHash, + msg = {ReportType, SignedReceipt}}, + do_send_report(Req, ReportType, Report, OnionKeyHash, Connection, RetryAttempts); +send_report(witness = ReportType, Report, OnionKeyHash, _SelfPubKeyBin, SigFun, Connection, RetryAttempts) -> + EncodedWitness = gateway_miner_client_pb:encode_msg(Report#blockchain_poc_witness_v1_pb{signature = <<>>}, blockchain_poc_witness_v1_pb), + SignedWitness = Report#blockchain_poc_witness_v1_pb{signature = SigFun(EncodedWitness)}, + Req = #gateway_poc_report_req_v1_pb{ + onion_key_hash = OnionKeyHash, + msg = {ReportType, SignedWitness}}, + do_send_report(Req, ReportType, Report, OnionKeyHash, Connection, RetryAttempts). + +-spec do_send_report(binary(), witness | receipt, any(), binary(), grpc_client_custom:connection(), non_neg_integer()) -> ok. +do_send_report(Req, ReportType, Report, OnionKeyHash, Connection, RetryAttempts) -> + %% ask validator for public uri of the challenger of this POC + case get_uri_for_challenger(OnionKeyHash, Connection) of + {ok, {IP, Port}} -> + %% send the report to our challenger + case send_grpc_unary_req(IP, Port, Req, 'send_report') of + {ok, _} -> + ok; + _ -> + ?MODULE:send_report(ReportType, Report, OnionKeyHash, RetryAttempts - 1) + end; + {error, _Reason} -> + ?MODULE:send_report(ReportType, Report, OnionKeyHash, RetryAttempts - 1) + end, + ok. + +-spec fetch_config([string()], string(), pos_integer()) -> {error, any()} | ok. +fetch_config(UpdatedKeys, ValIP, ValGRPCPort) -> + %% filter out keys we are not interested in + %% and then ask our validator for current values + %% for remaining keys + FilteredKeys = lists:filter(fun(K)-> lists:member(K, ?CONFIG_VARS) end, UpdatedKeys), + case FilteredKeys of + [] -> ok; + _ -> + %% retrieve some config from the returned validator + Req2 = build_config_req(FilteredKeys), + case send_grpc_unary_req(ValIP, ValGRPCPort, Req2, 'config') of + {ok, #gateway_config_resp_v1_pb{result = Vars}, _Req2Details} -> + [ + begin + {Name, Value} = blockchain_txn_vars_v1:from_var(Var), + application:set_env(miner, list_to_atom(Name), Value) + end || #blockchain_var_v1_pb{} = Var <- Vars], + ok; + {error, Reason, _Details} -> + {error, Reason}; + {error, Reason} -> + {error, Reason} + end + end. + +-spec send_grpc_unary_req(grpc_client_custom:connection(), any(), atom())-> {error, any(), map()} | {error, any()} | {ok, any(), map()} | {ok, map()}. +send_grpc_unary_req(undefined, _Req, _RPC) -> + {error, no_grpc_connection}; +send_grpc_unary_req(Connection, Req, RPC) -> + try + lager:info("send unary request: ~p", [Req]), + Res = grpc_client_custom:unary( + Connection, + Req, + 'helium.gateway', + RPC, + gateway_miner_client_pb, + [{callback_mod, miner_poc_grpc_client_handler}] + ), + lager:info("send unary result: ~p", [Res]), + process_unary_response(Res) + catch + _Class:_Error:_Stack -> + lager:warning("send unary failed: ~p, ~p, ~p", [_Class, _Error, _Stack]), + {error, req_failed} + end. + +-spec send_grpc_unary_req(string(), non_neg_integer(), any(), atom()) -> {error, any(), map()} | {error, any()} | {ok, any(), map()} | {ok, map()}. +send_grpc_unary_req(PeerIP, GRPCPort, Req, RPC)-> + try + lager:info("Send unary request via new connection to ip ~p: ~p", [PeerIP, Req]), + {ok, Connection} = grpc_client_custom:connect(tcp, maybe_override_ip(PeerIP), GRPCPort), + + Res = grpc_client_custom:unary( + Connection, + Req, + 'helium.gateway', + RPC, + gateway_miner_client_pb, + [{callback_mod, miner_poc_grpc_client_handler}] + ), + lager:info("New Connection, send unary result: ~p", [Res]), + %% we dont need the connection to hang around, so close it out + catch _ = grpc_client_custom:stop_connection(Connection), + process_unary_response(Res) + catch + _Class:_Error:_Stack -> + lager:warning("send unary failed: ~p, ~p, ~p", [_Class, _Error, _Stack]), + {error, req_failed} + end. + +-spec build_check_target_req(libp2p_crypto:pubkey_bin(), binary(), binary(), non_neg_integer(), binary(), libp2p_crypto:pubkey_bin(), function()) -> #gateway_poc_check_challenge_target_req_v1_pb{}. +build_check_target_req(ChallengerPubKeyBin, OnionKeyHash, BlockHash, ChallengeHeight, ChallengerSig, SelfPubKeyBin, SelfSigFun) -> + Req = #gateway_poc_check_challenge_target_req_v1_pb{ + address = SelfPubKeyBin, + challenger = ChallengerPubKeyBin, + block_hash = BlockHash, + onion_key_hash = OnionKeyHash, + height = ChallengeHeight, + notifier = ChallengerPubKeyBin, + notifier_sig = ChallengerSig, + challengee_sig = <<>> + }, + ReqEncoded = gateway_miner_client_pb:encode_msg(Req, gateway_poc_check_challenge_target_req_v1_pb), + Req#gateway_poc_check_challenge_target_req_v1_pb{challengee_sig = SelfSigFun(ReqEncoded)}. + +-spec build_validators_req(Quantity:: pos_integer()) -> #gateway_validators_req_v1_pb{}. +build_validators_req(Quantity) -> + #gateway_validators_req_v1_pb{ + quantity = Quantity + }. + +-spec build_config_req([string()]) -> #gateway_config_req_v1_pb{}. +build_config_req(Keys) -> + #gateway_config_req_v1_pb{ keys = Keys}. + +-spec build_poc_challenger_req(binary()) -> #gateway_poc_key_routing_data_req_v1_pb{}. +build_poc_challenger_req(OnionKeyHash) -> + #gateway_poc_key_routing_data_req_v1_pb{ key = OnionKeyHash}. + +%% TODO: return a better and consistent response +%%-spec process_unary_response(grpc_client_custom:unary_response()) -> {error, any(), map()} | {error, any()} | {ok, any(), map()} | {ok, map()}. +process_unary_response({ok, #{http_status := 200, result := #gateway_resp_v1_pb{msg = {success_resp, _Payload}, height = Height, signature = Sig}}}) -> + {ok, #{height => Height, signature => Sig}}; +process_unary_response({ok, #{http_status := 200, result := #gateway_resp_v1_pb{msg = {error_resp, Details}, height = Height, signature = Sig}}}) -> + #gateway_error_resp_pb{error = ErrorReason} = Details, + {error, ErrorReason, #{height => Height, signature => Sig}}; +process_unary_response({ok, #{http_status := 200, result := #gateway_resp_v1_pb{msg = {_RespType, Payload}, height = Height, signature = Sig}}}) -> + {ok, Payload, #{height => Height, signature => Sig}}; +process_unary_response({error, ClientError = #{error_type := 'client'}}) -> + lager:warning("grpc error response ~p", [ClientError]), + {error, grpc_client_error}; +process_unary_response({error, ClientError = #{error_type := 'grpc', http_status := 200, status_message := ErrorMsg}}) -> + lager:warning("grpc error response ~p", [ClientError]), + {error, ErrorMsg}; +process_unary_response(_Response) -> + lager:warning("unhandled grpc response ~p", [_Response]), + {error, unexpected_response}. + +handle_down_event(_CurState, {'DOWN', Ref, process, _, Reason}, Data = #data{conn_monitor_ref = Ref, connection = Connection}) -> + lager:warning("GRPC connection to validator is down, reconnecting. Reason: ~p", [Reason]), + _ = grpc_client_custom:stop_connection(Connection), + %% if the connection goes down, enter setup state to reconnect + {next_state, setup, Data}; +handle_down_event(_CurState, {'DOWN', Ref, process, _, Reason} = Event, Data = #data{stream_poc_monitor_ref = Ref, + connection = Connection, + self_pub_key_bin = SelfPubKeyBin, + self_sig_fun = SelfSigFun}) -> + %% the poc stream is meant to be long lived, we always want it up as long as we have a grpc connection + %% so if it goes down start it back up again + lager:warning("poc stream to validator is down, reconnecting. Reason: ~p", [Reason]), + case connect_stream_poc(Connection, SelfPubKeyBin, SelfSigFun) of + {ok, StreamPid} -> + M = erlang:monitor(process, StreamPid), + {keep_state, Data#data{stream_poc_monitor_ref = M, stream_poc_pid = StreamPid}}; + {error, _} -> + %% if stream reconnnect fails, replay the orig down msg to trigger another attempt + %% NOTE: not using transition actions below as want a delay before the msgs get processed again + erlang:send_after(?STREAM_RECONNECT_DELAY, self(), Event), + {keep_state, Data} + end; +handle_down_event(_CurState, {'DOWN', Ref, process, _, Reason} = Event, Data = #data{stream_config_update_monitor_ref = Ref, + connection = Connection}) -> + %% the config_update stream is meant to be long lived, we always want it up as long as we have a grpc connection + %% so if it goes down start it back up again + lager:warning("config_update stream to validator is down, reconnecting. Reason: ~p", [Reason]), + case connect_stream_config_update(Connection) of + {ok, StreamPid} -> + M = erlang:monitor(process, StreamPid), + {keep_state, Data#data{stream_config_update_monitor_ref = M, stream_config_update_pid = StreamPid}}; + {error, _} -> + %% if stream reconnnect fails, replay the orig down msg to trigger another attempt + %% NOTE: not using transition actions below as want a delay before the msgs get processed again + erlang:send_after(?STREAM_RECONNECT_DELAY, self(), Event), + {keep_state, Data} + end. + +-spec get_uri_for_challenger(binary(), grpc_client_custom:connection()) -> {ok, {string(), pos_integer()}} | {error, any()}. +get_uri_for_challenger(OnionKeyHash, Connection)-> + Req = build_poc_challenger_req(OnionKeyHash), + case send_grpc_unary_req(Connection, Req, 'poc_key_to_public_uri') of + {ok, #gateway_public_routing_data_resp_v1_pb{public_uri = URIData}, _Req2Details} -> + #routing_address_pb{uri = URI, pub_key = _PubKey} = URIData, + #{host := IP, port := Port} = uri_string:parse(binary_to_list(URI)), + {ok, {IP, Port}}; + {error, Reason, _Details} -> + {error, Reason}; + {error, Reason} -> + {error, Reason} + end. + +-ifdef(TEST). +maybe_override_ip(_IP)-> + "127.0.0.1". +-else. +maybe_override_ip(IP)-> + IP. +-endif. + diff --git a/src/poc/miner_poc_mgr.erl b/src/poc/miner_poc_mgr.erl new file mode 100644 index 000000000..b620d6c4f --- /dev/null +++ b/src/poc/miner_poc_mgr.erl @@ -0,0 +1,984 @@ +%%%------------------------------------------------------------------- +%%% @doc +%%% listens for block events, inspects the POCs in the block metadata +%%% and for each of our own keys which made it into the block +%%% kick off a POC +%%% @end +%%%------------------------------------------------------------------- +-module(miner_poc_mgr). + +-behaviour(gen_server). + +-include_lib("blockchain/include/blockchain_vars.hrl"). +-include_lib("public_key/include/public_key.hrl"). + +-define(ACTIVE_POCS, active_pocs). +-define(KEYS, keys). +-define(ADDR_HASH_FP_RATE, 1.0e-9). +-define(POC_DB_CF, {?MODULE, poc_db_cf_handle}). +-ifdef(TEST). +%% lifespan of a POC, after which we will +%% submit the receipts txn and delete the local poc data +-define(POC_TIMEOUT, 4). +%% timeout after which we will GC the public poc data, +%% we expect the receipt txn to be absorbed before this +-define(POC_RECEIPTS_ABSORB_TIMEOUT, 15). +-else. +-define(POC_TIMEOUT, 10). +-define(POC_RECEIPTS_ABSORB_TIMEOUT, 200). +-endif. + + +%% ------------------------------------------------------------------ +%% API exports +%% ------------------------------------------------------------------ +-export([ + start_link/1, + make_ets_table/0, + cached_poc_key/1, + save_poc_keys/2, + check_target/3, + report/4, + active_pocs/0, + local_poc_key/1, + local_poc/1 +]). +%% ------------------------------------------------------------------ +%% gen_server exports +%% ------------------------------------------------------------------ +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). + +%% ------------------------------------------------------------------ +%% record defs and macros +%% ------------------------------------------------------------------ +-record(addr_hash_filter, { + start :: pos_integer(), + height :: pos_integer(), + byte_size :: pos_integer(), + salt :: binary(), + bloom :: bloom_nif:bloom() +}). + +-record(poc_key_data, { + receive_height :: non_neg_integer(), + keys :: keys() +}). + +-record(local_poc, { + onion_key_hash :: binary(), + block_hash :: binary() | undefined, + keys :: keys() | undefined, + target :: libp2p_crypto:pubkey_bin(), + onion :: binary() | undefined, + secret :: binary() | undefined, + responses = #{}, + challengees = [] :: [libp2p_crypto:pubkey_bin()], + packet_hashes = [] :: [{libp2p_crypto:pubkey_bin(), binary()}], + start_height :: non_neg_integer() +}). + +-record(state, { + db :: rocksdb:db_handle(), + cf :: rocksdb:cf_handle(), + chain :: undefined | blockchain:blockchain(), + ledger :: undefined | blockchain:ledger(), + sig_fun :: undefined | libp2p_crypto:sig_fun(), + pub_key = undefined :: undefined | libp2p_crypto:pubkey_bin(), + addr_hash_filter :: undefined | #addr_hash_filter{}, + poc_timeout :: pos_integer() | undefined, + poc_receipts_absorb_timeout :: pos_integer() | undefined +}). +-type state() :: #state{}. +-type keys() :: #{secret => libp2p_crypto:privkey(), public => libp2p_crypto:pubkey()}. +-type poc_key() :: binary(). +-type cached_poc_key_data() :: #poc_key_data{}. +-type cached_poc_key_type() :: {POCKey :: poc_key(), POCKeyData :: #poc_key_data{}}. + +-type local_poc() :: #local_poc{}. +-type local_pocs() :: [local_poc()]. +-type local_poc_key() :: binary(). + +-export_type([keys/0, local_poc_key/0, cached_poc_key_data/0, cached_poc_key_type/0, local_poc/0, local_pocs/0]). + +%% ------------------------------------------------------------------ +%% API functions +%% ------------------------------------------------------------------ + +-spec local_poc_key(local_poc()) -> local_poc_key(). +local_poc_key(LocalPoC) -> + LocalPoC#local_poc.onion_key_hash. + +-spec start_link(#{}) -> {ok, pid()}. +start_link(Args) when is_map(Args) -> + case gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []) of + {ok, Pid} -> + %% if we have an ETS table reference, give ownership to the new process + %% we likely are the `heir', so we'll get it back if this process dies + case maps:find(tab1, Args) of + error -> + ok; + {ok, Tab1} -> + true = ets:give_away(Tab1, Pid, undefined) + end, + {ok, Pid}; + Other -> + Other + end. + +-spec make_ets_table() -> ok. +make_ets_table() -> + Tab1 = ets:new( + ?KEYS, + [ + named_table, + public, + {heir, self(), undefined} + ] + ), + Tab1. + +-spec save_poc_keys(CurHeight :: non_neg_integer(), [keys()]) -> ok. +save_poc_keys(CurHeight, KeyList) -> + %% these are the keys generated by this validator and submitted in the block + %% either none or a subset of these will actually make it to the block + %% we dont obviously know which may make it so we cache them all here + %% push each key set to ets with a hash of the public key as key + %% each new block we will then check if any of our cached keys made it into the block + %% and if so retrieve the private key for each + [ + begin + #{public := PubKey} = Keys, + OnionKeyHash = crypto:hash(sha256, libp2p_crypto:pubkey_to_bin(PubKey)), + POCKeyRec = #poc_key_data{receive_height = CurHeight, keys = Keys}, + lager:info("caching local poc keys with hash ~p", [OnionKeyHash]), + _ = cache_poc_key(OnionKeyHash, POCKeyRec) + end + || Keys <- KeyList + ], + ok. + +-spec cached_poc_key(poc_key()) -> {ok, cached_poc_key_type()} | false. +cached_poc_key(ID) -> + case ets:lookup(?KEYS, ID) of + [Res] -> {ok, Res}; + _ -> false + end. + +-spec active_pocs()->[local_poc()]. +active_pocs() -> + gen_server:call(?MODULE, {active_pocs}). + +-spec check_target( + Challengee :: libp2p_crypto:pubkey_bin(), + BlockHash :: binary(), + OnionKeyHash :: binary() +) -> false | {true, binary()} | {error, any()}. +check_target(Challengee, BlockHash, OnionKeyHash) -> + lager:info("*** check target with key ~p", [OnionKeyHash]), + LocalPOC = e2qc:cache( + local_pocs, + OnionKeyHash, + 30, + fun() -> ?MODULE:local_poc(OnionKeyHash) end + ), + lager:info("*** e2qc local POC check target result ~p", [LocalPOC]), + Res = + case LocalPOC of + {error, not_found} -> + %% if the cache returns not found it could be it hasnt yet been initialized + %% so check if we have a cached POC key. these are added at the point + %% a block is proposed and then before the block has been gossiped + %% if such a key exists its a strong indication its not yet initialized + %% OR the e2qc cache was called before the POC was initialised and it + %% has cached the {error, not_found} term + %% so if we have the key then check rocks again, + %% if still not available then its likely the POC hasnt been initialized + %% if found then invalidate the e2qc cache + case cached_poc_key(OnionKeyHash) of + {ok, {_KeyHash, _POCData}} -> + %% we do know this key + lager:info("*** ~p is a known key ~p", [OnionKeyHash]), + case ?MODULE:local_poc(OnionKeyHash) of + {error, _} -> + %% clients should retry after a period of time + {error, <<"queued_poc">>}; + {ok, #local_poc{block_hash = BlockHash, target = Challengee, onion = Onion}} -> + e2qc:evict(local_pocs, OnionKeyHash), + {true, Onion}; + {ok, #local_poc{block_hash = BlockHash, target = _OtherTarget}} -> + e2qc:evict(local_pocs, OnionKeyHash), + false; + {ok, #local_poc{block_hash = _OtherBlockHash, target = _Target}} -> + e2qc:evict(local_pocs, OnionKeyHash), + {error, mismatched_block_hash} + end; + _ -> + lager:info("*** ~p is NOT a known key", [OnionKeyHash]), + {error, <<"invalid_or_expired_poc">>} + end; + {ok, #local_poc{block_hash = BlockHash, target = Challengee, onion = Onion}} -> + {true, Onion}; + {ok, #local_poc{block_hash = BlockHash, target = _OtherTarget}} -> + false; + {ok, #local_poc{block_hash = _OtherBlockHash, target = _Target}} -> + {error, mismatched_block_hash}; + _ -> + false + end, + lager:info("*** check target result for key ~p: ~p", [OnionKeyHash, Res]), + Res. + +-spec report( + Report :: {witness, blockchain_poc_witness_v1:poc_witness()} | {receipt, blockchain_poc_receipt_v1:receipt()}, + OnionKeyHash :: binary(), + Peer :: libp2p_crypto:pubkey_bin(), + P2PAddr :: libp2p_crypto:peer_id()) -> ok. +report(Report, OnionKeyHash, Peer, P2PAddr) -> + gen_server:cast(?MODULE, {Report, OnionKeyHash, Peer, P2PAddr}). + +-spec local_poc(OnionKeyHash :: binary()) -> + {ok, local_poc()} | {error, any()}. +local_poc(OnionKeyHash) -> + case persistent_term:get(?POC_DB_CF, not_found) of + not_found -> {error, not_found}; + {DB, CF} -> + case rocksdb:get(DB, CF, OnionKeyHash, []) of + {ok, Bin} -> + [POC] = erlang:binary_to_term(Bin), + {ok, POC}; + not_found -> + {error, not_found}; + Error -> + lager:error("error: ~p", [Error]), + Error + end + end. + +%% ------------------------------------------------------------------ +%% gen_server functions +%% ------------------------------------------------------------------ +init(_Args) -> + lager:info("starting ~p", [?MODULE]), + erlang:send_after(500, self(), init), + {ok, PubKey, SigFun, _ECDHFun} = blockchain_swarm:keys(), + SelfPubKeyBin = libp2p_crypto:pubkey_to_bin(PubKey), + DB = miner_poc_mgr_db_owner:db(), + CF = miner_poc_mgr_db_owner:poc_mgr_cf(), + ok = persistent_term:put(?POC_DB_CF, {DB, CF}), + {ok, #state{ + db = DB, + cf = CF, + sig_fun = SigFun, + pub_key = SelfPubKeyBin + }}. + +handle_call({active_pocs}, _From, State = #state{}) -> + {reply, local_pocs(State), State}; +handle_call(_Request, _From, State = #state{}) -> + {reply, ok, State}. + +handle_cast({{witness, Witness}, OnionKeyHash, Peer, _PeerAddr}, State) -> + handle_witness(Witness, OnionKeyHash, Peer, State); +handle_cast({{receipt, Receipt}, OnionKeyHash, Peer, PeerAddr}, State) -> + handle_receipt(Receipt, OnionKeyHash, Peer, PeerAddr, State); +handle_cast(_Request, State) -> + {noreply, State}. + +handle_info(init, #state{chain = undefined} = State) -> + %% No chain + case blockchain_worker:blockchain() of + undefined -> + erlang:send_after(500, self(), init), + {noreply, State}; + Chain -> + ok = blockchain_event:add_handler(self()), + Ledger = blockchain:ledger(Chain), + ok = miner_poc:add_stream_handler(blockchain_swarm:tid(), miner_poc_report_handler), + SelfPubKeyBin = blockchain_swarm:pubkey_bin(), + POCTimeout = + case blockchain:config(?poc_timeout, Ledger) of + {ok, T1} -> T1; + _ -> ?POC_TIMEOUT + end, + POCReceiptsAborbTimeout = + case blockchain:config(?poc_receipts_absorb_timeout, Ledger) of + {ok, T2} -> T2; + _ -> ?POC_RECEIPTS_ABSORB_TIMEOUT + end, + {noreply, State#state{ + chain = Chain, + ledger = Ledger, + pub_key = SelfPubKeyBin, + poc_timeout = POCTimeout, + poc_receipts_absorb_timeout = POCReceiptsAborbTimeout + }} + end; +handle_info(init, State) -> + {noreply, State}; +handle_info({blockchain_event, {new_chain, NC}}, State) -> + {noreply, State#state{chain = NC}}; +handle_info({blockchain_event, _Event}, #state{chain = undefined} = State)-> + {noreply, State}; +handle_info( + {blockchain_event, {add_block, BlockHash, Sync, Ledger} = _Event}, + #state{chain = Chain} = State +)-> + CurPOCChallengerType = + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, V} -> V; + _ -> undefined + end, + lager:info("received add block event, sync is ~p, poc_challenge_type is ~p", [Sync, CurPOCChallengerType]), + State1 = maybe_init_addr_hash(State), + ok = handle_add_block_event(CurPOCChallengerType, BlockHash, Chain, State1), + {noreply, State1}; +%% TODO: review approach to syc blocks again +%%handle_info( +%% {blockchain_event, {add_block, _BlockHash, Sync, _Ledger} = _Event}, +%% #state{chain = _Chain} = State +%%) when Sync =:= true -> +%% lager:info("ignoring add block event, sync is ~p", [Sync]), +%% {noreply, State}; +handle_info(_Info, State = #state{}) -> + {noreply, State}. + +terminate(_Reason, _State = #state{}) -> + persistent_term:erase(?POC_DB_CF), + ok. + +%%%=================================================================== +%%% breakout functions +%%%=================================================================== +-spec handle_add_block_event( + POCChallengeType :: validator | undefined, + BlockHash :: binary(), + Chain :: blockchain:blockchain(), + State :: state() +) -> ok. +handle_add_block_event(POCChallengeType, BlockHash, Chain, State) when POCChallengeType == validator -> + case blockchain:get_block(BlockHash, Chain) of + {ok, Block} -> + %% save public data on each POC key found in the block to the ledger + %% that way all validators have access to this public data + %% however the validator which is running the POC will be the only node + %% which has the secret + ok = process_block_pocs(BlockHash, Block, State), + %% take care of GC + ok = purge_local_pocs(Block, State), + BlockHeight = blockchain_block:height(Block), + %% GC local pocs keys every 50 blocks + %% NOTE, we dont need to GC the public POCs on the ledger here + %% that GC is handled elsewhere via blockchain_ledger_v1:maybe_gc_pocs/2 + case BlockHeight rem 50 == 0 of + true -> ok = purge_pocs_keys(Block, State); + false -> ok + end; + _ -> + %% err what? + ok + end; +handle_add_block_event(_POCChallengeType, _BlockHash, _Chain, _State) -> + ok. + +-spec handle_witness( + Witness :: blockchain_poc_witness_v1:poc_witness(), + OnionKeyHash :: binary(), + Address :: libp2p_crypto:pubkey_bin(), + State :: #state{} +) -> {noreply, state()}. +handle_witness(Witness, OnionKeyHash, Peer, #state{chain = Chain} = State) -> + lager:info("got witness ~p with onionkeyhash ~p", [Witness, OnionKeyHash]), + %% Validate the witness is correct + Ledger = blockchain:ledger(Chain), + case validate_witness(Witness, Ledger) of + false -> + lager:warning("ignoring witness ~p for onionkeyhash ~p. Reason: invalid", [Witness, OnionKeyHash]), + {noreply, State}; + true -> + %% get the local POC + case ?MODULE:local_poc(OnionKeyHash) of + {error, _} -> + lager:warning("ignoring witness ~p for onionkeyhash ~p. Reason: no local_poc", [Witness, OnionKeyHash]), + {noreply, State}; + {ok, #local_poc{packet_hashes = PacketHashes, responses = Response0} = POC} -> + PacketHash = blockchain_poc_witness_v1:packet_hash(Witness), + GatewayWitness = blockchain_poc_witness_v1:gateway(Witness), + %% check this is a known layer of the packet + case lists:keyfind(PacketHash, 2, PacketHashes) of + false -> + lager:warning("Saw invalid witness with packet hash ~p and onionkeyhash ~p", [PacketHash, OnionKeyHash]), + {noreply, State}; + {GatewayWitness, PacketHash} -> + lager:warning("Saw self-witness from ~p for onionkeyhash ~p", [GatewayWitness, OnionKeyHash]), + {noreply, State}; + _ -> + Witnesses = maps:get(PacketHash, Response0, []), + PerHopMaxWitnesses = blockchain_utils:poc_per_hop_max_witnesses(Ledger), + case erlang:length(Witnesses) >= PerHopMaxWitnesses of + true -> + lager:warning("ignoring witness ~p for onionkeyhash ~p. Reason: exceeded per hop max witnesses", [Witness, OnionKeyHash]), + {noreply, State}; + false -> + %% Don't allow putting duplicate response in the witness list resp + Predicate = fun({_, W}) -> + blockchain_poc_witness_v1:gateway(W) == GatewayWitness + end, + Responses1 = + case lists:any(Predicate, Witnesses) of + false -> + maps:put( + PacketHash, + lists:keystore( + Peer, + 1, + Witnesses, + {Peer, Witness} + ), + Response0 + ); + true -> + Response0 + end, + UpdatedPOC = POC#local_poc{responses = Responses1}, + ok = write_local_poc(UpdatedPOC, State), + {noreply, State} + end + end + end + end. + +-spec handle_receipt( + Receipt :: blockchain_poc_receipt_v1:receipt(), + OnionKeyHash :: binary(), + Peer :: libp2p_crypto:pubkey_bin(), + PeerAddr :: libp2p_crypto:peer_id(), + State :: #state{} +) -> {noreply, state()}. +handle_receipt(Receipt, OnionKeyHash, Peer, PeerAddr, #state{chain = Chain} = State) -> + lager:info("got receipt ~p with onionkeyhash ~p", [Receipt, OnionKeyHash]), + Gateway = blockchain_poc_receipt_v1:gateway(Receipt), + LayerData = blockchain_poc_receipt_v1:data(Receipt), + Ledger = blockchain:ledger(Chain), + case blockchain_poc_receipt_v1:is_valid(Receipt, Ledger) of + false -> + lager:warning("ignoring invalid receipt ~p for onionkeyhash", [Receipt, OnionKeyHash]), + {noreply, State}; + true -> + %% get the POC data from the cache + case ?MODULE:local_poc(OnionKeyHash) of + {error, _} -> + lager:warning("ignoring receipt ~p for onionkeyhash ~p. Reason: no local_poc", [Receipt, OnionKeyHash]), + {noreply, State}; + {ok, #local_poc{challengees = Challengees, responses = Response0} = POC} -> + case lists:keyfind(Gateway, 1, Challengees) of + {Gateway, LayerData} -> + case maps:get(Gateway, Response0, undefined) of + undefined -> + IsFirstChallengee = + case hd(Challengees) of + {Gateway, _} -> + true; + _ -> + false + end, + %% compute address hash and compare to known ones + %% TODO - This needs refactoring, wont work as is + case check_addr_hash(PeerAddr, State) of + true when IsFirstChallengee -> + %% drop whole challenge because we should always be able to get the first hop's receipt + %% TODO: delete the cached POC here? + {noreply, State}; + true -> + {noreply, State}; + undefined -> + Responses1 = maps:put( + Gateway, + {Peer, Receipt}, + Response0 + ), + UpdatedPOC = POC#local_poc{responses = Responses1}, + ok = write_local_poc(UpdatedPOC, State), + {noreply, State}; + PeerHash -> + Responses1 = maps:put( + Gateway, + {Peer, + blockchain_poc_receipt_v1:addr_hash( + Receipt, + PeerHash + )}, + Response0 + ), + UpdatedPOC = POC#local_poc{responses = Responses1}, + ok = write_local_poc(UpdatedPOC, State), + {noreply, State} + end; + _ -> + lager:warning("Already got this receipt ~p for ~p ignoring", [ + Receipt, + Gateway + ]), + {noreply, State} + end; + {Gateway, OtherData} -> + lager:warning("Got incorrect layer data ~p from ~p (expected ~p) for onionkeyhash ~p", [ + Gateway, + OtherData, + Receipt, + OnionKeyHash + ]), + {noreply, State}; + false -> + lager:warning("Got unexpected receipt from ~p for onionkeyhash", [Gateway, OnionKeyHash]), + {noreply, State} + end + end + end. + +%% ------------------------------------------------------------------ +%% Internal functions +%% ------------------------------------------------------------------ +initialize_poc(BlockHash, POCStartHeight, Keys, Vars, #state{chain = Chain, pub_key = Challenger} = State) -> + Ledger = blockchain:ledger(Chain), + #{public := OnionCompactKey, secret := {ecc_compact, POCPrivKey}} = Keys, + POCPubKeyBin = libp2p_crypto:pubkey_to_bin(OnionCompactKey), + #'ECPrivateKey'{privateKey = PrivKeyBin} = POCPrivKey, + POCPrivKeyHash = crypto:hash(sha256, PrivKeyBin), + OnionKeyHash = crypto:hash(sha256, POCPubKeyBin), + lager:info("*** initializing POC at height ~p for local onion key hash ~p", [POCStartHeight, OnionKeyHash]), + Entropy = <>, + lager:info("*** entropy constructed using onionkeyhash ~p and blockhash ~p", [OnionKeyHash, BlockHash]), + ZoneRandState = blockchain_utils:rand_state(Entropy), + InitTargetRandState = blockchain_utils:rand_state(POCPrivKeyHash), + lager:info("*** ZoneRandState ~p", [ZoneRandState]), + lager:info("*** InitTargetRandState ~p", [InitTargetRandState]), + case blockchain_poc_target_v5:target(Challenger, InitTargetRandState, ZoneRandState, Ledger, Vars) of + {error, Reason}-> + lager:info("*** failed to find a target, reason ~p", [Reason]), + noop; + {ok, {TargetPubkeybin, TargetRandState}}-> + lager:info("*** found target ~p", [TargetPubkeybin]), + {ok, LastChallenge} = blockchain_ledger_v1:current_height(Ledger), + {ok, B} = blockchain:get_block(LastChallenge, Chain), + Time = blockchain_block:time(B), + Path = blockchain_poc_path_v4:build(TargetPubkeybin, TargetRandState, Ledger, Time, Vars), + lager:info("path created ~p", [Path]), + N = erlang:length(Path), + [<> | LayerData] = blockchain_txn_poc_receipts_v2:create_secret_hash( + Entropy, + N + 1 + ), + OnionList = lists:zip([libp2p_crypto:bin_to_pubkey(P) || P <- Path], LayerData), + {Onion, Layers} = blockchain_poc_packet_v2:build(Keys, IV, OnionList), + [_|LayerHashes] = [crypto:hash(sha256, L) || L <- Layers], + Challengees = lists:zip(Path, LayerData), + PacketHashes = lists:zip(Path, LayerHashes), + Secret = libp2p_crypto:keys_to_bin(Keys), + %% save the POC data to our local cache + LocalPOC = #local_poc{ + onion_key_hash = OnionKeyHash, + block_hash = BlockHash, + target = TargetPubkeybin, + onion = Onion, + secret = Secret, + challengees = Challengees, + packet_hashes = PacketHashes, + keys = Keys, + start_height = POCStartHeight + }, + ok = write_local_poc(LocalPOC, State), + lager:info("starting poc for challengeraddr ~p, onionhash ~p", [Challenger, OnionKeyHash]), + ok + end. + +-spec process_block_pocs( + BlockHash :: blockchain_block:hash(), + Block :: blockchain_block:block(), + State :: state() +) -> ok. +process_block_pocs( + BlockHash, + Block, + #state{chain = Chain} = State +) -> + Ledger = blockchain:ledger(Chain), + BlockHeight = blockchain_block:height(Block), + %% get the ephemeral keys from the block + %% these will be a prop with tuples as {MemberPosInCG, PocKeyHash} + BlockPocEphemeralKeys = blockchain_block_v1:poc_keys(Block), + [ + begin + %% the published key is a hash of the public key, aka the onion key hash + %% use this to check our local cache containing the secret keys of POCs owned by this validator + %% if it is one of this local validators POCs, then kick it off + case cached_poc_key(OnionKeyHash) of + {ok, {_KeyHash, #poc_key_data{keys = Keys}}} -> + lager:info("found local poc key, starting a poc for ~p", [OnionKeyHash]), + %% its a locally owned POC key, so kick off a new POC + Vars = blockchain_utils:vars_binary_keys_to_atoms(maps:from_list(blockchain_ledger_v1:snapshot_vars(Ledger))), + spawn_link(fun() -> initialize_poc(BlockHash, BlockHeight, Keys, Vars, State) end); + _ -> + lager:info("failed to find local poc key for ~p", [OnionKeyHash]), + noop + end + end + || {_CGPos, OnionKeyHash} <- BlockPocEphemeralKeys + ], + ok. + +-spec purge_local_pocs( + Block :: blockchain_block:block(), + State :: state() +) -> ok. +purge_local_pocs( + Block, + #state{chain = Chain, pub_key = SelfPubKeyBin, sig_fun = SigFun, poc_timeout = POCTimeout} = State +) -> + %% iterate over the local POCs in our rocksdb + %% end and clean up any which have exceeded their life span + %% these are POCs which were initiated by this node + %% and the data is known only to this node + BlockHeight = blockchain_block:height(Block), + LocalPOCs = local_pocs(State), + lists:foreach( + fun([#local_poc{start_height = POCStartHeight, onion_key_hash = OnionKeyHash} = POC]) -> + case (BlockHeight - POCStartHeight) > POCTimeout of + true -> + lager:info("*** purging local poc with key ~p", [OnionKeyHash]), + %% this POC's time is up, submit receipts we have received + ok = submit_receipts(POC, SelfPubKeyBin, SigFun, Chain), + %% as receipts have been submitted, we can delete the local poc from the db + %% the public poc data will remain until at least the receipt txn is absorbed + _ = delete_local_poc(OnionKeyHash, State); + _ -> + lager:info("*** not purging local poc with key ~p. BlockHeight: ~p, POCStartHeight: ~p", [OnionKeyHash, BlockHeight, POCStartHeight]), + ok + end + end, + LocalPOCs + ), + ok. + +-spec purge_pocs_keys( + Block :: blockchain_block:block(), + State :: state() +) -> ok. +purge_pocs_keys( + Block, + #state{poc_timeout = POCTimeout} = _State +) -> + %% iterate over the poc keys in our ets cache + %% these are a copy of the keys generated by this node + %% as part of its block creation ( whilst it is in the CG) + %% and submitted as part of the block metadata + %% one or more of these keys *may* make it into the block + %% we cache all our locally generated keys + %% and then each new block check if each mined key + %% for that block is one of our own + %% if it is then we initiate a new local POC + %% the keys are purged periodically + BlockHeight = blockchain_block:height(Block), + %% iterate over the cached POC keys, delete any which are beyond the lifespan of when the active POC would have ended + CachedPOCKeys = cached_poc_keys(), + lists:foreach( + fun({Key, #poc_key_data{receive_height = POCHeight}}) -> + case (BlockHeight - POCHeight) > POCTimeout of + true -> + %% the lifespan of any POC for this key has passed, we can GC + ok = delete_cached_poc_key(Key); + _ -> + ok + end + end, + CachedPOCKeys + ), + ok. + +-spec submit_receipts(local_poc(), libp2p_crypto:pubkey_bin(), libp2p_crypto:sig_fun(), blockchain:blockchain()) -> ok. +submit_receipts( + #local_poc{ + onion_key_hash = OnionKeyHash, + responses = Responses0, + secret = Secret, + packet_hashes = LayerHashes, + block_hash = BlockHash + } = _Data, + Challenger, + SigFun, + Chain +) -> + Path1 = lists:foldl( + fun({Challengee, LayerHash}, Acc) -> + {Address, Receipt} = maps:get(Challengee, Responses0, {make_ref(), undefined}), + %% get any witnesses not from the same p2p address and also ignore challengee as a witness (self-witness) + Witnesses = [ + W + || {A, W} <- maps:get(LayerHash, Responses0, []), A /= Address, A /= Challengee + ], + E = blockchain_poc_path_element_v1:new(Challengee, Receipt, Witnesses), + [E | Acc] + end, + [], + LayerHashes + ), + Txn0 = + case blockchain:config(?poc_version, blockchain:ledger(Chain)) of + {ok, PoCVersion} when PoCVersion >= 10 -> + blockchain_txn_poc_receipts_v2:new( + Challenger, + Secret, + OnionKeyHash, + lists:reverse(Path1), + BlockHash + ); + _ -> + %% hmm we shouldnt really hit here as this all started with poc version 10 + noop + end, + Txn1 = blockchain_txn:sign(Txn0, SigFun), + lager:info("submitting blockchain_txn_poc_receipts_v2 for onion key hash ~p: ~p", [OnionKeyHash, Txn0]), + case miner_consensus_mgr:in_consensus() of + false -> + lager:info("node is not in consensus", []), + ok = blockchain_txn_mgr:submit(Txn1, fun(_Result) -> noop end); + true -> + lager:info("node is in consensus", []), + _ = miner_hbbft_sidecar:submit(Txn1) + end, + + ok. + +-spec cache_poc_key(poc_key(), cached_poc_key_data()) -> true. +cache_poc_key(ID, Keys) -> + true = ets:insert(?KEYS, {ID, Keys}). + +-spec cached_poc_keys() -> [cached_poc_key_type()]. +cached_poc_keys() -> + ets:tab2list(?KEYS). + +-spec delete_cached_poc_key(poc_key()) -> ok. +delete_cached_poc_key(Key) -> + true = ets:delete(?KEYS, Key), + ok. + +-spec validate_witness(blockchain_poc_witness_v1:witness(), blockchain_ledger_v1:ledger()) -> + boolean(). +validate_witness(Witness, Ledger) -> + Gateway = blockchain_poc_witness_v1:gateway(Witness), + %% TODO this should be against the ledger at the time the receipt was mined + case blockchain_ledger_v1:find_gateway_info(Gateway, Ledger) of + {error, _Reason} -> + lager:warning("failed to get witness ~p info ~p", [Gateway, _Reason]), + false; + {ok, GwInfo} -> + case blockchain_ledger_gateway_v2:location(GwInfo) of + undefined -> + lager:warning("ignoring witness ~p location undefined", [Gateway]), + false; + _ -> + blockchain_poc_witness_v1:is_valid(Witness, Ledger) + end + end. + +check_addr_hash(_PeerAddr, #state{addr_hash_filter = undefined}) -> + undefined; +check_addr_hash(PeerAddr, #state{ + addr_hash_filter = #addr_hash_filter{byte_size = Size, salt = Hash, bloom = Bloom} +}) -> + case multiaddr:protocols(PeerAddr) of + [{"ip4", Address}, {_, _}] -> + {ok, Addr} = inet:parse_ipv4_address(Address), + Val = binary:part( + enacl:pwhash( + list_to_binary(tuple_to_list(Addr)), + binary:part(Hash, {0, enacl:pwhash_SALTBYTES()}) + ), + {0, Size} + ), + case bloom:check_and_set(Bloom, Val) of + true -> + true; + false -> + Val + end; + _ -> + undefined + end. + +-spec maybe_init_addr_hash(#state{}) -> #state{}. +maybe_init_addr_hash(#state{chain = undefined} = State) -> + %% no chain + State; +maybe_init_addr_hash(#state{chain = Chain, addr_hash_filter = undefined} = State) -> + %% check if we have the block we need + Ledger = blockchain:ledger(Chain), + case blockchain:config(?poc_addr_hash_byte_count, Ledger) of + {ok, Bytes} when is_integer(Bytes), Bytes > 0 -> + case blockchain:config(?poc_challenge_interval, Ledger) of + {ok, Interval} -> + {ok, Height} = blockchain:height(Chain), + StartHeight = max(Height - (Height rem Interval), 1), + %% check if we have this block + case blockchain:get_block(StartHeight, Chain) of + {ok, Block} -> + Hash = blockchain_block:hash_block(Block), + %% ok, now we can build the filter + Gateways = blockchain_ledger_v1:gateway_count(Ledger), + {ok, Bloom} = bloom:new_optimal(Gateways, ?ADDR_HASH_FP_RATE), + sync_filter(Block, Bloom, Chain), + State#state{ + addr_hash_filter = #addr_hash_filter{ + start = StartHeight, + height = Height, + byte_size = Bytes, + salt = Hash, + bloom = Bloom + } + }; + _ -> + State + end; + _ -> + State + end; + _ -> + State + end; +maybe_init_addr_hash( + #state{ + chain = Chain, + addr_hash_filter = #addr_hash_filter{ + start = StartHeight, + height = Height, + byte_size = Bytes, + salt = Hash, + bloom = Bloom + } + } = State +) -> + Ledger = blockchain:ledger(Chain), + case blockchain:config(?poc_addr_hash_byte_count, Ledger) of + {ok, Bytes} when is_integer(Bytes), Bytes > 0 -> + case blockchain:config(?poc_challenge_interval, Ledger) of + {ok, Interval} -> + {ok, CurHeight} = blockchain:height(Chain), + case max(Height - (Height rem Interval), 1) of + StartHeight -> + case CurHeight of + Height -> + %% ok, everything lines up + State; + _ -> + case blockchain:get_block(Height + 1, Chain) of + {ok, Block} -> + sync_filter(Block, Bloom, Chain), + State#state{ + addr_hash_filter = #addr_hash_filter{ + start = StartHeight, + height = CurHeight, + byte_size = Bytes, + salt = Hash, + bloom = Bloom + } + }; + _ -> + State + end + end; + _NewStart -> + %% filter is stale + maybe_init_addr_hash(State#state{addr_hash_filter = undefined}) + end; + _ -> + State + end; + _ -> + State#state{addr_hash_filter = undefined} + end. + +sync_filter(StopBlock, Bloom, Blockchain) -> + blockchain:fold_chain( + fun(Blk, _) -> + blockchain_utils:find_txn(Blk, fun(T) -> + case blockchain_txn:type(T) == blockchain_txn_poc_receipts_v2 of + true -> + %% abuse side effects here for PERFORMANCE + [update_addr_hash(Bloom, E) || E <- blockchain_txn_poc_receipts_v2:path(T)]; + false -> + ok + end, + false + end), + case Blk == StopBlock of + true -> + return; + false -> + continue + end + end, + any, + element(2, blockchain:head_block(Blockchain)), + Blockchain + ). + +-spec update_addr_hash( + Bloom :: bloom_nif:bloom(), + Element :: blockchain_poc_path_element_v1:poc_element() +) -> ok. +update_addr_hash(Bloom, Element) -> + case blockchain_poc_path_element_v1:receipt(Element) of + undefined -> + ok; + Receipt -> + case blockchain_poc_receipt_v1:addr_hash(Receipt) of + undefined -> + ok; + Hash -> + bloom:set(Bloom, Hash) + end + end. + +%% ------------------------------------------------------------------ +%% DB functions +%% ------------------------------------------------------------------ + +%%-spec append_local_poc(NewLocalPOC :: local_poc(), +%% State :: state()) -> ok | {error, any()}. +%%append_local_poc(#local_poc{onion_key_hash=OnionKeyHash} = NewLocalPOC, #state{db=DB, cf=CF}=State) -> +%% case ?MODULE:local_poc(OnionKeyHash) of +%% {ok, SavedLocalPOCs} -> +%% %% check we're not writing something we already have +%% case lists:member(NewLocalPOC, SavedLocalPOCs) of +%% true -> +%% ok; +%% false -> +%% ToInsert = erlang:term_to_binary([NewLocalPOC | SavedLocalPOCs]), +%% rocksdb:put(DB, CF, OnionKeyHash, ToInsert, []) +%% end; +%% {error, not_found} -> +%% ToInsert = erlang:term_to_binary([NewLocalPOC]), +%% rocksdb:put(DB, CF, OnionKeyHash, ToInsert, []); +%% {error, _}=E -> +%% E +%% end. + +local_pocs(#state{db=DB, cf=CF}) -> + {ok, Itr} = rocksdb:iterator(DB, CF, []), + local_pocs(Itr, rocksdb:iterator_move(Itr, first), []). + +local_pocs(Itr, {error, invalid_iterator}, Acc) -> + catch rocksdb:iterator_close(Itr), + Acc; +local_pocs(Itr, {ok, _, LocalPOCBin}, Acc) -> + local_pocs(Itr, rocksdb:iterator_move(Itr, next), [binary_to_term(LocalPOCBin)|Acc]). + +-spec write_local_poc( LocalPOC ::local_poc(), + State :: state()) -> ok. +write_local_poc(#local_poc{onion_key_hash=OnionKeyHash} = LocalPOC, #state{db=DB, cf=CF}) -> + ToInsert = erlang:term_to_binary([LocalPOC]), + rocksdb:put(DB, CF, OnionKeyHash, ToInsert, []). + +-spec delete_local_poc( OnionKeyHash ::binary(), + State :: state()) -> ok. +delete_local_poc(OnionKeyHash, #state{db=DB, cf=CF}) -> + rocksdb:delete(DB, CF, OnionKeyHash, []). diff --git a/src/poc/miner_poc_mgr_db_owner.erl b/src/poc/miner_poc_mgr_db_owner.erl new file mode 100644 index 000000000..0e94da466 --- /dev/null +++ b/src/poc/miner_poc_mgr_db_owner.erl @@ -0,0 +1,189 @@ +%%%------------------------------------------------------------------- +%% @doc +%% == poc mgr db owner and related functions == +%% +%% * This process is started first in the miner supervision tree +%% * POC mgr will get the db reference from here when they init +%% * This process also traps exits and closes rocksdb (if need be) +%% * This process is responsible for serializing local POC updates to disk in a +%% batch write each write interval (currently 1000 millis) +%% * local POCs are POC which are running and active on this validator +%% +%% @end +%%%------------------------------------------------------------------- +-module(miner_poc_mgr_db_owner). + +-behavior(gen_server). + +%% api exports +-export([start_link/1, + db/0, + poc_mgr_cf/0, + write/2, + gc/1 + ]). + +%% gen_server exports +-export([ + init/1, + handle_call/3, + handle_cast/2, + handle_info/2, + terminate/2, + code_change/3 +]). + +-define(DB_FILE, "poc_mgr.db"). +-define(TICK, '__poc_write_tick'). + +-record(state, { + db :: rocksdb:db_handle(), + default :: rocksdb:cf_handle(), + poc_mgr_cf :: rocksdb:cf_handle(), + write_interval = 1000 :: pos_integer(), + tref :: reference(), + pending = #{} :: maps:map() + }). + +%% api functions +start_link(Args) -> + gen_server:start_link({local, ?MODULE}, ?MODULE, Args, []). + +-spec db() -> rocksdb:db_handle(). +db() -> + gen_server:call(?MODULE, db). + +-spec poc_mgr_cf() -> rocksdb:cf_handle(). +poc_mgr_cf() -> + gen_server:call(?MODULE, poc_mgr_cf). + +-spec write( POC :: miner_poc_mgr:local_poc(), + Skewed :: skewed:skewed()) -> ok. +write(POC, Skewed) -> + gen_server:cast(?MODULE, {write, POC, Skewed}). + +-spec gc( [ miner_poc_mgr:local_poc_onion_key_hash() ] ) -> ok. +gc(IDs) -> + gen_server:call(?MODULE, {gc, IDs}, infinity). + +%% gen_server callbacks +init(Args) -> + lager:info("~p init with ~p", [?MODULE, Args]), + erlang:process_flag(trap_exit, true), + BaseDir = maps:get(base_dir, Args), + CFs = maps:get(cfs, Args, ["default", "poc_mgr_cf"]), + {ok, DB, [DefaultCF, POCMgrCF]} = open_db(BaseDir, CFs), + WriteInterval = get_env(poc_mgr_write_interval, 100), + Tref = schedule_next_tick(WriteInterval), + {ok, #state{db=DB, default=DefaultCF, poc_mgr_cf=POCMgrCF, + tref=Tref, write_interval=WriteInterval}}. + +handle_call(db, _From, #state{db=DB}=State) -> + {reply, DB, State}; +handle_call(poc_mgr_cf, _From, #state{poc_mgr_cf=CF}=State) -> + {reply, CF, State}; +handle_call({gc, IDs}, _From, #state{pending=P, db=DB}=State)-> + {ok, Batch} = rocksdb:batch(), + ok = lists:foreach(fun(POCID) -> + ok = rocksdb:batch_delete(Batch, POCID) + end, IDs), + ok = rocksdb:write_batch(DB, Batch, []), + ok = rocksdb:release_batch(Batch), + {reply, ok, State#state{pending=maps:without(IDs, P)}}; +handle_call(_Msg, _From, State) -> + lager:warning("rcvd unknown call msg: ~p from: ~p", [_Msg, _From]), + {reply, ok, State}. + +handle_cast({write, POC, Skewed}, #state{pending=P}=State) -> + POCID = miner_poc_mgr:local_poc_key(POC), + %% defer encoding until write time + NewP = maps:put(POCID, {POC, Skewed}, P), + {noreply, State#state{pending=NewP}}; +handle_cast(_Msg, State) -> + lager:warning("rcvd unknown cast msg: ~p", [_Msg]), + {noreply, State}. + +handle_info({'EXIT', _From, _Reason} , #state{db=DB}=State) -> + lager:info("EXIT because: ~p, closing rocks: ~p", [_Reason, DB]), + ok = rocksdb:close(DB), + {stop, db_owner_exit, State}; +handle_info(?TICK, #state{pending=P, write_interval=W}=State) when map_size(P) == 0 -> + Tref = schedule_next_tick(W), + {noreply, State#state{tref=Tref}}; +handle_info(?TICK, #state{pending=P, db=DB, + write_interval=W}=State) -> + lager:info("~p pending writes this tick", [map_size(P)]), + ok = handle_batch_write(DB, P), + Tref = schedule_next_tick(W), + {noreply, State#state{tref=Tref, pending=#{}}}; +handle_info(_Msg, State) -> + lager:warning("rcvd unknown info msg: ~p", [_Msg]), + {noreply, State}. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +terminate(_Reason, #state{db=DB, + pending=P}) when map_size(P) == 0 -> + ok = rocksdb:close(DB), + ok; +terminate(_Reason, #state{db=DB, + pending=P}) -> + ok = handle_batch_write(DB, P), + ok = rocksdb:close(DB), + ok. + +%% Helper functions +-spec open_db(Dir::file:filename_all(), + CFNames::[string()]) -> {ok, rocksdb:db_handle(), [rocksdb:cf_handle()]} | + {error, any()}. +open_db(Dir, CFNames) -> + ok = filelib:ensure_dir(Dir), + DBDir = filename:join(Dir, ?DB_FILE), + GlobalOpts = application:get_env(rocksdb, global_opts, []), + DBOptions = [{create_if_missing, true}, {atomic_flush, true}] ++ GlobalOpts, + ExistingCFs = + case rocksdb:list_column_families(DBDir, DBOptions) of + {ok, CFs0} -> + CFs0; + {error, _} -> + ["default"] + end, + + CFOpts = GlobalOpts, + case rocksdb:open_with_cf(DBDir, DBOptions, [{CF, CFOpts} || CF <- ExistingCFs]) of + {error, _Reason}=Error -> + Error; + {ok, DB, OpenedCFs} -> + L1 = lists:zip(ExistingCFs, OpenedCFs), + L2 = lists:map( + fun(CF) -> + {ok, CF1} = rocksdb:create_column_family(DB, CF, CFOpts), + {CF, CF1} + end, + CFNames -- ExistingCFs + ), + L3 = L1 ++ L2, + {ok, DB, [proplists:get_value(X, L3) || X <- CFNames]} + end. + +schedule_next_tick(Interval) -> + erlang:send_after(Interval, self(), ?TICK). + +handle_batch_write(DB, P) -> + {ok, Batch} = rocksdb:batch(), + ok = maps:fold(fun(POCID, {POC, Skewed}, Acc) -> + Bin = term_to_binary({POC, + Skewed}), + ok = rocksdb:batch_put(Batch, POCID, Bin), + Acc + end, ok, P), + Res = rocksdb:write_batch(DB, Batch, []), + ok = rocksdb:release_batch(Batch), + Res. + +get_env(Key, Default) -> + case application:get_env(miner, Key, Default) of + {ok, X} -> X; + Default -> Default + end. diff --git a/src/poc/miner_poc_report_handler.erl b/src/poc/miner_poc_report_handler.erl new file mode 100644 index 000000000..932d1a1d9 --- /dev/null +++ b/src/poc/miner_poc_report_handler.erl @@ -0,0 +1,106 @@ +%%%------------------------------------------------------------------- +%% @doc +%% == Miner POC Stream Handler == +%% used to relay a receipt or witness report received by a validator +%% onto the actual challenger +%% @end +%%%------------------------------------------------------------------- +-module(miner_poc_report_handler). + +-behavior(libp2p_framed_stream). + +%% ------------------------------------------------------------------ +%% API Function Exports +%% ------------------------------------------------------------------ + +-export([ + server/4, + client/2, + decode/1 +]). + +%% ------------------------------------------------------------------ +%% libp2p_framed_stream Function Exports +%% ------------------------------------------------------------------ +-export([ + init/3, + handle_data/3, + handle_info/3, + send/2 +]). + +-record(state, { + peer :: undefined | libp2p_crypto:pubkey_bin(), + peer_addr :: undefined | string() +}). + +%% ------------------------------------------------------------------ +%% API Function Definitions +%% ------------------------------------------------------------------ +client(Connection, Args) -> + libp2p_framed_stream:client(?MODULE, Connection, Args). + +server(Connection, Path, _TID, Args) -> + libp2p_framed_stream:server(?MODULE, Connection, [Path | Args]). + +send(Pid, Data) -> + Pid ! {send, Data}. + +decode(Data) -> + try blockchain_poc_response_v1:decode(Data) of + Res -> Res + catch + _:_ -> + lager:error("got unknown data ~p", [Data]), + {error, failed_to_decode_report} + end. +%% ------------------------------------------------------------------ +%% libp2p_framed_stream Function Definitions +%% ------------------------------------------------------------------ +init(client, _Conn, _Args) -> + {ok, #state{}}; +init(server, Conn, _Args) -> + {_, PeerAddr} = libp2p_connection:addr_info(Conn), + {ok, #state{peer = identify(Conn), peer_addr = PeerAddr}}. + +handle_data(client, Data, State) -> + lager:info("client got data: ~p", [Data]), + %% client should not receive data + {stop, normal, State}; +handle_data(server, Payload, #state{peer = SelfPeer} = State) -> + {OnionKeyHash, Data} = binary_to_term(Payload), + lager:info("server got data, OnionKeyHash: ~p, Report: ~p", [OnionKeyHash, Data]), + P2PAddr = libp2p_crypto:pubkey_bin_to_p2p(SelfPeer), + try ?MODULE:decode(Data) of + {witness, _} = Report -> + ok = miner_poc_mgr:report(Report, OnionKeyHash, SelfPeer, P2PAddr); + {receipt, _} = Report -> + ok = miner_poc_mgr:report(Report, OnionKeyHash, SelfPeer, P2PAddr) + catch + _:_ -> + lager:error("got unknown data ~p", [Data]) + end, + %% we only expect one receipt/witness from the peer at a time + {stop, normal, State}. + +handle_info(client, {send, Data}, State) -> + lager:info("client sending data: ~p", [Data]), + %% send one and done + {stop, normal, State, Data}; +handle_info(_Type, _Msg, State) -> + lager:info("rcvd unknown type: ~p unknown msg: ~p", [_Type, _Msg]), + %% unexpected input, just close + {stop, normal, State}. + +identify(Conn) -> + case libp2p_connection:session(Conn) of + {ok, Session} -> + libp2p_session:identify(Session, self(), ?MODULE), + receive + {handle_identify, ?MODULE, {ok, Identify}} -> + libp2p_identify:pubkey_bin(Identify) + after 10000 -> erlang:error(failed_identify_timeout) + end; + {error, closed} -> + erlang:error(dead_session) + end. diff --git a/src/poc/miner_poc_statem.erl b/src/poc/miner_poc_statem.erl index 1469ba30c..956ef6496 100644 --- a/src/poc/miner_poc_statem.erl +++ b/src/poc/miner_poc_statem.erl @@ -121,7 +121,7 @@ witness(Address, Data) -> %% ------------------------------------------------------------------ init(Args) -> ok = blockchain_event:add_handler(self()), - ok = miner_poc:add_stream_handler(blockchain_swarm:tid()), + ok = miner_poc:add_stream_handler(blockchain_swarm:tid(), miner_poc_handler), ok = miner_onion:add_stream_handler(blockchain_swarm:tid()), Address = blockchain_swarm:pubkey_bin(), Blockchain = blockchain_worker:blockchain(), @@ -191,6 +191,13 @@ requesting(info, Msg, #data{blockchain = Chain} = Data) when Chain =:= undefined lager:warning("dropped ~p cause chain is still undefined", [Msg]), {keep_state, Data}; NewChain -> + Ledger = blockchain:ledger(NewChain), + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + ok; + _ -> + ok = miner_poc:add_stream_handler(blockchain_swarm:tid(), miner_poc_handler) + end, {keep_state, Data#data{blockchain=NewChain}, [{next_event, info, Msg}]} end; requesting(info, {blockchain_event, {add_block, BlockHash, Sync, Ledger}} = Msg, @@ -854,34 +861,39 @@ allow_request(BlockHash, #data{blockchain=Blockchain, POCInterval0 end, try - case blockchain_ledger_v1:find_gateway_info(Address, Ledger) of - {ok, GwInfo} -> - GwMode = blockchain_ledger_gateway_v2:mode(GwInfo), - case blockchain_ledger_gateway_v2:is_valid_capability(GwMode, ?GW_CAPABILITY_POC_CHALLENGER, Ledger) of - true -> - {ok, Block} = blockchain:get_block(BlockHash, Blockchain), - Height = blockchain_block:height(Block), - ChallengeOK = - case blockchain_ledger_gateway_v2:last_poc_challenge(GwInfo) of - undefined -> - lager:info("got block ~p @ height ~p (never challenged before)", [BlockHash, Height]), - true; - LastChallenge -> - case (Height - LastChallenge) > POCInterval of - true -> 1 == rand:uniform(max(10, POCInterval div 10)); - false -> false - end - end, - LocationOK = true, - LocationOK = miner_lora:location_ok(), - ChallengeOK andalso LocationOK; + case blockchain:config(?poc_challenger_type, Ledger) of + {ok, validator} -> + false; + _ -> + case blockchain_ledger_v1:find_gateway_info(Address, Ledger) of + {ok, GwInfo} -> + GwMode = blockchain_ledger_gateway_v2:mode(GwInfo), + case blockchain_ledger_gateway_v2:is_valid_capability(GwMode, ?GW_CAPABILITY_POC_CHALLENGER, Ledger) of + true -> + {ok, Block} = blockchain:get_block(BlockHash, Blockchain), + Height = blockchain_block:height(Block), + ChallengeOK = + case blockchain_ledger_gateway_v2:last_poc_challenge(GwInfo) of + undefined -> + lager:info("got block ~p @ height ~p (never challenged before)", [BlockHash, Height]), + true; + LastChallenge -> + case (Height - LastChallenge) > POCInterval of + true -> 1 == rand:uniform(max(10, POCInterval div 10)); + false -> false + end + end, + LocationOK = true, + LocationOK = miner_lora:location_ok(), + ChallengeOK andalso LocationOK; + _ -> + %% the GW is not allowed to send POC challenges + false + end; + %% mostly this is going to be unasserted full nodes _ -> - %% the GW is not allowed to send POC challenges false - end; - %% mostly this is going to be unasserted full nodes - _ -> - false + end end catch Class:Err:Stack -> lager:warning("error determining if request allowed: ~p:~p ~p", diff --git a/test/miner_ct_utils.erl b/test/miner_ct_utils.erl index 645795020..21f80e1a7 100644 --- a/test/miner_ct_utils.erl +++ b/test/miner_ct_utils.erl @@ -4,6 +4,7 @@ -include_lib("eunit/include/eunit.hrl"). -include_lib("blockchain/include/blockchain_vars.hrl"). -include_lib("blockchain/include/blockchain.hrl"). +-include_lib("blockchain/include/blockchain_txn_fees.hrl"). -include("miner_ct_macros.hrl"). -define(BASE_TMP_DIR, "./_build/test/tmp"). @@ -15,6 +16,7 @@ pmap/2, pmap/3, wait_until/1, wait_until/3, wait_until_disconnected/2, + wait_until_local_height/1, get_addrs/1, start_miner/2, start_node/1, @@ -36,6 +38,7 @@ init_base_dir_config/3, generate_keys/1, new_random_key/1, + new_random_key_with_sig_fun/1, stop_miners/1, stop_miners/2, start_miners/1, start_miners/2, height/1, @@ -50,6 +53,8 @@ shuffle/1, partition_miners/2, node2addr/2, + node2sigfun/2, + node2pubkeybin/2, addr2node/2, addr_list/1, blockchain_worker_check/1, @@ -58,6 +63,7 @@ wait_for_app_stop/2, wait_for_app_stop/3, wait_for_in_consensus/2, wait_for_in_consensus/3, wait_for_chain_var_update/3, wait_for_chain_var_update/4, + wait_for_lora_port/3, delete_dirs/2, initial_dkg/5, initial_dkg/6, confirm_balance/3, @@ -84,7 +90,8 @@ build_asserts/2, add_block/3, gen_gateways/2, gen_payments/1, gen_locations/1, - existing_vars/0, start_blockchain/2 + existing_vars/0, start_blockchain/2, + create_block/2 ]). chain_var_lookup_all(Key, Nodes) -> @@ -199,6 +206,18 @@ wait_for_equalized_heights(Miners) -> [Height] = UniqueHeights, Height. +wait_until_local_height(TargetHeight) -> + miner_ct_utils:wait_until( + fun() -> + C = blockchain_worker:blockchain(), + {ok, CurHeight} = blockchain:height(C), + ct:pal("local height ~p", [CurHeight]), + CurHeight >= TargetHeight + end, + 30, + timer:seconds(1) + ). + stop_miners(Miners) -> stop_miners(Miners, 60). @@ -493,6 +512,25 @@ wait_for_chain_var_update(Miners, Key, Value, Retries)-> Else -> Else end. +wait_for_lora_port(Miners, Mod, Retries)-> + ?noAssertAsync(begin + lists:all( + fun(Miner) -> + try + case ct_rpc:call(Miner, Mod, port, []) of + {error, _} -> + ct:pal("Failed to find lora port ~p via module ~p", [Miner, Mod]), + false; + _ -> true + end + catch _:_ -> + ct:pal("Failed to find lora port ~p", [Miner]), + false + end + end, Miners) + end, + Retries, timer:seconds(1)). + delete_dirs(DirWildcard, SubDir)-> Dirs = filelib:wildcard(DirWildcard), [begin @@ -607,9 +645,9 @@ start_node(Name) -> %% have the slave nodes monitor the runner node, so they can't outlive it NodeConfig = [ {monitor_master, true}, - {boot_timeout, 10}, - {init_timeout, 10}, - {startup_timeout, 10}, + {boot_timeout, 30}, + {init_timeout, 30}, + {startup_timeout, 30}, {startup_functions, [ {code, set_path, [CodePath]} ]}], @@ -696,6 +734,13 @@ shuffle(List) -> S. +node2sigfun(Node, KeyList) -> + {_Miner, {_TCPPort, _UDPPort, _JsonRpcPort}, _ECDH, _PubKey, _Addr, SigFun} = lists:keyfind(Node, 1, KeyList), + SigFun. + +node2pubkeybin(Node, KeyList) -> + {_Miner, {_TCPPort, _UDPPort, _JsonRpcPort}, _ECDH, _PubKey, Addr, _SigFun} = lists:keyfind(Node, 1, KeyList), + Addr. node2addr(Node, AddrList) -> {_, Addr} = lists:keyfind(Node, 1, AddrList), @@ -723,6 +768,10 @@ init_per_testcase(Mod, TestCase, Config0) -> Config = init_base_dir_config(Mod, TestCase, Config0), BaseDir = ?config(base_dir, Config), LogDir = ?config(log_dir, Config), + SplitMiners = proplists:get_value(split_miners_vals_and_gateways, Config, false), + NumValidators = proplists:get_value(num_validators, Config, 0), + LoadChainOnGateways = proplists:get_value(gateways_run_chain, Config, true), + os:cmd(os:find_executable("epmd")++" -daemon"), {ok, Hostname} = inet:gethostname(), @@ -737,23 +786,27 @@ init_per_testcase(Mod, TestCase, Config0) -> %% Miner configuration, can be input from os env TotalMiners = case TestCase of - restart_test -> - 4; + poc_grpc_dist_v11_test -> + 11; %% 5 vals, 6 gateways + poc_grpc_dist_v11_cn_test -> + 13; %% 5 vals, 8 gateways + poc_grpc_dist_v11_partitioned_test -> + 13; %% 5 vals, 8 gateways + poc_grpc_dist_v11_partitioned_lying_test -> + 13; %% 5 vals, 8 gateways _ -> get_config("T", 8) end, NumConsensusMembers = case TestCase of - group_change_test -> - 4; - restart_test -> - 4; - validator_transition_test -> - 4; - autoskip_chain_vars_test -> - 4; - autoskip_on_timeout_test -> - 4; + poc_grpc_dist_v11_test -> + NumValidators; + poc_grpc_dist_v11_cn_test -> + NumValidators; + poc_grpc_dist_v11_partitioned_test -> + NumValidators; + poc_grpc_dist_v11_partitioned_lying_test -> + NumValidators; _ -> get_config("N", 7) end, @@ -765,13 +818,15 @@ init_per_testcase(Mod, TestCase, Config0) -> BatchSize = get_config("BS", 500), Interval = get_config("INT", 5), - MinersAndPorts = miner_ct_utils:pmap( - fun(I) -> + MinersAndPorts = lists:reverse(lists:foldl( + fun(I, Acc) -> MinerName = list_to_atom(integer_to_list(I) ++ miner_ct_utils:randname(5)), - {start_node(MinerName), {45000, 0, JsonRpcBase + I}} + [{start_node(MinerName), {45000, 0, JsonRpcBase + I}} | Acc] end, + [], lists:seq(1, TotalMiners) - ), + )), + ct:pal("MinersAndPorts: ~p",[MinersAndPorts]), case lists:any(fun({{error, _}, _}) -> true; (_) -> false end, MinersAndPorts) of true -> @@ -782,10 +837,12 @@ init_per_testcase(Mod, TestCase, Config0) -> ok end, - Keys = miner_ct_utils:pmap( - fun({Miner, Ports}) -> - make_keys(Miner, Ports) - end, MinersAndPorts), + Keys = lists:reverse(lists:foldl( + fun({Miner, Ports}, Acc) -> + [make_keys(Miner, Ports) | Acc] + end, [], MinersAndPorts)), + + ct:pal("Keys: ~p", [Keys]), {_Miner, {_TCPPort, _UDPPort, _JsonRpcPort}, _ECDH, _PubKey, Addr, _SigFun} = hd(Keys), DefaultRouters = libp2p_crypto:pubkey_bin_to_p2p(Addr), @@ -799,9 +856,41 @@ init_per_testcase(Mod, TestCase, Config0) -> {default_routers, DefaultRouters}, {port, Port}], - ConfigResult = miner_ct_utils:pmap(fun(N) -> config_node(N, Options) end, Keys), + %% config nodes + ConfigResult = + case SplitMiners of + true -> + %% if config says to use validators for CG then + %% split key sets into validators and miners + %% first batch of keys up to NumConsensusMembers will be validators, rest gateways/miners + {ValKeys, GatewayKeys} = lists:split(NumValidators, Keys), + ct:pal("validator keys: ~p", [ValKeys]), + ct:pal("gateway keys: ~p", [GatewayKeys]), + %% carry the poc transport setting through to config node so that it can + %% set the app env var appropiately on each node + _GatewayConfigResult = miner_ct_utils:pmap(fun(N) -> config_node(N, [{gateways_run_chain, LoadChainOnGateways}, {mode, gateway} | Options]) end, GatewayKeys), + ValConfigResult = miner_ct_utils:pmap(fun(N) -> config_node(N, [{gateways_run_chain, LoadChainOnGateways}, {mode, validator} | Options]) end, ValKeys), + ValConfigResult; + _ -> + miner_ct_utils:pmap(fun(N) -> config_node(N, [{gateways_run_chain, LoadChainOnGateways}, {mode, validator} | Options]) end, Keys) + end, Miners = [M || {M, _} <- MinersAndPorts], + ct:pal("Miners: ~p", [Miners]), + + %% get a sep list of validator and gateway node names + %% if SplitMiners is false then all miners will be gateways + {Validators, Gateways} = + case SplitMiners of + true -> + lists:split(NumValidators, Miners); + _ -> + {[], Miners} + end, + + ct:pal("Validators: ~p", [Validators]), + ct:pal("Gateways: ~p", [Gateways]), + %% check that the config loaded correctly on each miner true = lists:all( fun(ok) -> true; @@ -812,10 +901,24 @@ init_per_testcase(Mod, TestCase, Config0) -> ConfigResult ), + %% hardcode some alias for our localhost miners + %% sibyl will not return routing data unless a miner/validator has a public address + %% so force an alias for each of our miners to a public IP + MinerAliases = lists:foldl( + fun({_, _, _, _, AliasAddr, _}, Acc) -> + P2PAddr = libp2p_crypto:pubkey_bin_to_p2p(AliasAddr), + [{P2PAddr, "/ip4/52.8.80.146/tcp/2154" } | Acc] + end, [], Keys), + ct:pal("miner aliases ~p", [MinerAliases]), + lists:foreach(fun(Miner)-> ct_rpc:call(Miner, application, set_env, [libp2p, node_aliases, MinerAliases]) end, Miners), + Addrs = get_addrs(Miners), + ct:pal("Addrs: ~p", [Addrs]), miner_ct_utils:pmap( fun(Miner) -> + TID = ct_rpc:call(Miner, blockchain_swarm, tid, [], 2000), + ct_rpc:call(Miner, miner_poc, add_stream_handler, [TID], 2000), Swarm = ct_rpc:call(Miner, blockchain_swarm, swarm, [], 2000), lists:foreach( fun(A) -> @@ -849,38 +952,158 @@ init_per_testcase(Mod, TestCase, Config0) -> end, Miners) end, 200, 150), + %% to enable the tests to run over grpc we need to deterministically set the grpc listen addr + %% with libp2p all the port data is in the peer entries + %% in the real world we would run grpc over a known port + %% but for the sake of the tests which run multiple nodes on a single instance + %% we need to choose a random port for each node + %% and the client needs to know which port was choosen + %% so for the sake of the tests what we do here is get the libp2p port + %% and run grpc on that value + 1000 + %% the client then just has to pull the libp2p peer data + %% retrieve the libp2p port and derive the grpc port from that + + GRPCServerConfigFun = fun(PeerPort)-> + [#{grpc_opts => #{service_protos => [gateway_pb], + services => #{'helium.gateway' => helium_gateway_service} + }, + + transport_opts => #{ssl => false}, + + listen_opts => #{port => PeerPort, + ip => {0,0,0,0}}, + + pool_opts => #{size => 2}, + + server_opts => #{header_table_size => 4096, + enable_push => 1, + max_concurrent_streams => unlimited, + initial_window_size => 65535, + max_frame_size => 16384, + max_header_list_size => unlimited}}] + end, + ok = lists:foreach(fun(Node) -> + Swarm = ct_rpc:call(Node, blockchain_swarm, swarm, []), + TID = ct_rpc:call(Node, blockchain_swarm, tid, []), + ListenAddrs = ct_rpc:call(Node, libp2p_swarm, listen_addrs, [Swarm]), + [H | _ ] = _SortedAddrs = ct_rpc:call(Node, libp2p_transport, sort_addrs, [TID, ListenAddrs]), + [_, _, _IP,_, Libp2pPort] = _Full = re:split(H, "/"), + ThisPort = list_to_integer(binary_to_list(Libp2pPort)), + _ = ct_rpc:call(Node, application, set_env, [grpcbox, servers, GRPCServerConfigFun(ThisPort + 1000)]), + _ = ct_rpc:call(Node, application, ensure_all_started, [grpcbox]), + ok + + end, Miners), + + %% setup a bunch of aliases for the running miner grpc hosts + %% as per above, each such port will be the equivilent libp2p port + 1000 + %% these grpc aliases are added purely for testing purposes + %% no current need to support in the wild + MinerGRPCPortAliases = lists:foldl( + fun({Miner, _, _, _, GrpcAliasAddr, _}, Acc) -> + P2PAddr = libp2p_crypto:pubkey_bin_to_p2p(GrpcAliasAddr), + Swarm = ct_rpc:call(Miner, blockchain_swarm, swarm, []), + TID = ct_rpc:call(Miner, blockchain_swarm, tid, []), + ListenAddrs = ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm]), + [H | _ ] = _SortedAddrs = ct_rpc:call(Miner, libp2p_transport, sort_addrs, [TID, ListenAddrs]), + [_, _, _IP,_, Libp2pPort] = _Full = re:split(H, "/"), + GrpcPort = list_to_integer(binary_to_list(Libp2pPort)) + 1000, + [{P2PAddr, {GrpcPort, false}} | Acc] + end, [], Keys), + ct:pal("miner grpc port aliases ~p", [MinerGRPCPortAliases]), + + %% create a list of validators and for each their p2p addr, ip addr and grpc port + %% use this list to set an app env var to provide a list of default validators to which + %% gateways can connect + %% only used when testing grpc gateways + SeedValidators = lists:foldl( + fun({Miner, _, _, _, ValAddr, _}, Acc) -> + P2PAddr = libp2p_crypto:pubkey_bin_to_p2p(ValAddr), + Swarm = ct_rpc:call(Miner, blockchain_swarm, swarm, []), + TID = ct_rpc:call(Miner, blockchain_swarm, tid, []), + ListenAddrs = ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm]), + [H | _ ] = _SortedAddrs = ct_rpc:call(Miner, libp2p_transport, sort_addrs, [TID, ListenAddrs]), + [_, _, _IP,_, Libp2pPort] = _Full = re:split(H, "/"), + GrpcPort = list_to_integer(binary_to_list(Libp2pPort)) + 1000, + [{P2PAddr, "127.0.0.1", GrpcPort} | Acc] + end, [], Keys), + ct:pal("seed validators: ~p", [SeedValidators]), + + %% set any required env vars for grpc gateways + lists:foreach(fun(Gateway)-> + ct_rpc:call(Gateway, application, set_env, [miner, seed_validators, SeedValidators]), + ct_rpc:call(Gateway, application, set_env, [miner, gateways_run_chain, LoadChainOnGateways]) + end, Gateways), + + %% set any required env vars for validators + lists:foreach(fun(Val)-> + ct_rpc:call(Val, application, set_env, [sibyl, node_grpc_port_aliases, MinerGRPCPortAliases]), + ct_rpc:call(Val, application, set_env, [sibyl, poc_mgr_mod, miner_poc_mgr]), + ct_rpc:call(Val, application, set_env, [sibyl, poc_report_handler, miner_poc_report_handler]) + end, Validators), + %% accumulate the address of each miner - MinerTaggedAddresses = lists:foldl( + MinerTaggedAddresses = lists:reverse(lists:foldl( fun(Miner, Acc) -> - Address = ct_rpc:call(Miner, blockchain_swarm, pubkey_bin, []), - [{Miner, Address} | Acc] + PubKeyBin = ct_rpc:call(Miner, blockchain_swarm, pubkey_bin, []), + [{Miner, PubKeyBin} | Acc] end, [], Miners - ), + )), + ct:pal("MinerTaggedAddresses: ~p", [MinerTaggedAddresses]), + %% save a version of the address list with the miner and address tuple %% and then a version with just a list of addresses {_Keys, Addresses} = lists:unzip(MinerTaggedAddresses), + {ValidatorAddrs, GatewayAddrs} = + case SplitMiners of + true -> + lists:split(NumValidators, Addresses); + _ -> + {[], Addresses} + end, + + ct:pal("Validator Addrs: ~p", [ValidatorAddrs]), + ct:pal("Gateway Addrs: ~p", [GatewayAddrs]), + {ok, _} = ct_cover:add_nodes(Miners), %% wait until we get confirmation the miners are fully up %% which we are determining by the miner_consensus_mgr being registered - %% QUESTION: is there a better process to use to determine things are healthy - %% and which works for both in consensus and non consensus miners? - ok = miner_ct_utils:wait_for_registration(Miners, miner_consensus_mgr), - %ok = miner_ct_utils:wait_for_registration(Miners, blockchain_worker), + %% if we have a split of validators and gateways, we only need to wait on the validators + %% otherwise wait for all gateways + case SplitMiners of + true -> + ok = miner_ct_utils:wait_for_registration(Validators, miner_consensus_mgr); + false -> + ok = miner_ct_utils:wait_for_registration(Miners, miner_consensus_mgr) + end, - UpdatedMinersAndPorts = lists:map(fun({Miner, {TCPPort, _, JsonRpcPort}}) -> - {ok, RandomPort} = ct_rpc:call(Miner, miner_lora, port, []), - ct:pal("~p is listening for packet forwarder on ~p", [Miner, RandomPort]), - {Miner, {TCPPort, RandomPort, JsonRpcPort}} - end, MinersAndPorts), + %% get a sep list of ports for validators and gateways + {ValidatorPorts, GatewayPorts} = + case SplitMiners of + true -> + lists:split(NumValidators, MinersAndPorts); + _ -> + {[], MinersAndPorts} + end, + UpdatedValidatorPorts = lists:map(fun({Miner, {TCPPort, _, JsonRpcPort}}) -> + {Miner, {TCPPort, ignore, JsonRpcPort}} + end, ValidatorPorts), [ {miners, Miners}, + {validators, Validators}, + {gateways, Gateways}, + {validator_addrs, ValidatorAddrs}, + {gateway_addrs, GatewayAddrs}, + {addrs, Addrs}, {keys, Keys}, - {ports, UpdatedMinersAndPorts}, + {ports, UpdatedValidatorPorts ++ GatewayPorts}, + {validator_ports, UpdatedValidatorPorts}, + {gateway_ports, GatewayPorts}, {node_options, Options}, {addresses, Addresses}, {tagged_miner_addresses, MinerTaggedAddresses}, @@ -894,17 +1117,19 @@ init_per_testcase(Mod, TestCase, Config0) -> ]. get_addrs(Miners) -> - miner_ct_utils:pmap( - fun(Miner) -> + lists:foldl( + fun(Miner, Acc) -> Swarm = ct_rpc:call(Miner, blockchain_swarm, swarm, [], 2000), + ct:pal("swarm ~p ~p", [Miner, Swarm]), true = miner_ct_utils:wait_until( fun() -> - length(ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm], 2000)) > 0 - end), - ct:pal("swarm ~p ~p", [Miner, Swarm]), - [H|_] = ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm], 2000), - H - end, Miners). + length(ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm], 5000)) > 0 + end, 20, 1000), + + [H|_] = ct_rpc:call(Miner, libp2p_swarm, listen_addrs, [Swarm], 5000), + ct:pal("miner ~p has addr ~p", [Miner, H]), + [H | Acc] + end, [], Miners). make_keys(Miner, Ports) -> #{secret := GPriv, public := GPub} = @@ -923,6 +1148,9 @@ config_node({Miner, {TCPPort, UDPPort, JSONRPCPort}, ECDH, PubKey, _Addr, SigFun Curve = proplists:get_value(curve, Options), DefaultRouters = proplists:get_value(default_routers, Options), Port = proplists:get_value(port, Options), + Mode = proplists:get_value(mode, Options, gateway), + LoadChainOnGateways = proplists:get_value(gateways_run_chain, Options, true), + ct:pal("Miner ~p", [Miner]), ct_rpc:call(Miner, cover, start, []), @@ -959,7 +1187,9 @@ config_node({Miner, {TCPPort, UDPPort, JSONRPCPort}, ECDH, PubKey, _Addr, SigFun %% set miner configuration ct_rpc:call(Miner, application, set_env, [miner, curve, Curve]), ct_rpc:call(Miner, application, set_env, [miner, jsonrpc_port, JSONRPCPort]), - ct_rpc:call(Miner, application, set_env, [miner, mode, validator]), + ct_rpc:call(Miner, application, set_env, [miner, mode, Mode]), + ct_rpc:call(Miner, application, set_env, [miner, gateways_run_chain, LoadChainOnGateways]), + ct_rpc:call(Miner, application, set_env, [miner, radio_device, {{127,0,0,1}, UDPPort, {127,0,0,1}, TCPPort}]), ct_rpc:call(Miner, application, set_env, [miner, stabilization_period_start, 2]), ct_rpc:call(Miner, application, set_env, [miner, default_routers, [DefaultRouters]]), @@ -990,7 +1220,8 @@ end_per_testcase(TestCase, Config) -> case ?config(tc_status, Config) of ok -> %% test passed, we can cleanup - cleanup_per_testcase(TestCase, Config); +%% cleanup_per_testcase(TestCase, Config), + ok; _ -> %% leave results alone for analysis ok @@ -1230,6 +1461,10 @@ new_random_key(Curve) -> #{secret := PrivKey, public := PubKey} = libp2p_crypto:generate_keys(Curve), {PrivKey, PubKey}. +new_random_key_with_sig_fun(Curve) -> + #{secret := PrivKey, public := PubKey} = libp2p_crypto:generate_keys(Curve), + SigFun = libp2p_crypto:mk_sig_fun(PrivKey), + {PrivKey, PubKey, SigFun}. %%-------------------------------------------------------------------- %% @doc @@ -1513,7 +1748,8 @@ create_block(ConsensusMembers, Txs) -> election_epoch => 1, epoch_start => 1, seen_votes => [], - bba_completion => <<>>}), + bba_completion => <<>>, + poc_keys => []}), BinBlock = blockchain_block:serialize(blockchain_block:set_signatures(Block0, [])), Signatures = signatures(ConsensusMembers, BinBlock), Block1 = blockchain_block:set_signatures(Block0, Signatures), diff --git a/test/miner_poc_SUITE.erl b/test/miner_poc_SUITE.erl index 879490f91..c7dde334e 100644 --- a/test/miner_poc_SUITE.erl +++ b/test/miner_poc_SUITE.erl @@ -849,13 +849,28 @@ exec_dist_test(TestCase, Config, VarMap, Status) -> setup_dist_test(TestCase, Config, VarMap, Status) -> Miners = ?config(miners, Config), - MinersAndPorts = ?config(ports, Config), {_, Locations} = lists:unzip(initialize_chain(Miners, TestCase, Config, VarMap)), GenesisBlock = miner_ct_utils:get_genesis_block(Miners, Config), - RadioPorts = [ P || {_Miner, {_TP, P, _JRPCP}} <- MinersAndPorts ], + + ok = miner_ct_utils:load_genesis_block(GenesisBlock, Miners, Config), + %% the radio ports used to be fetched from miner lora as part of init_per_testcase + %% but the port is only opened now after a chain is up and been consulted to + %% determine if validators are running POCs + %% So now we have wait until the chain is up and miner lora has opened the port + true = miner_ct_utils:wait_for_lora_port(Miners, miner_lora, 30), + + RadioPorts = lists:map( + fun(Miner) -> + {ok, RandomPort} = ct_rpc:call(Miner, miner_lora, port, []), + ct:pal("~p is listening for packet forwarder on ~p", [Miner, RandomPort]), + RandomPort + end, + Miners), + +%% RadioPorts = [ P || {_Miner, {_TP, P, _JRPCP}} <- MinersAndPorts ], {ok, _FakeRadioPid} = miner_fake_radio_backplane:start_link(maps:get(?poc_version, VarMap), 45000, lists:zip(RadioPorts, Locations), Status), - ok = miner_ct_utils:load_genesis_block(GenesisBlock, Miners, Config), + miner_fake_radio_backplane ! go, %% wait till height 10 ok = miner_ct_utils:wait_for_gte(height, Miners, 10, all, 30), @@ -1171,7 +1186,8 @@ common_poc_vars(Config) -> ?poc_v4_target_prob_score_wt => 0.8, ?poc_v4_target_score_curve => 5, ?poc_target_hex_parent_res => 5, - ?poc_v5_target_prob_randomness_wt => 0.0}. + ?poc_v5_target_prob_randomness_wt => 0.0, + ?poc_witness_consideration_limit => 20}. do_common_partition_checks(TestCase, Config, VarMap) -> Miners = ?config(miners, Config), @@ -1364,7 +1380,7 @@ do_common_partition_lying_checks(TestCase, Config, VarMap) -> extra_vars(poc_v11) -> POCVars = maps:merge(extra_vars(poc_v10), miner_poc_test_utils:poc_v11_vars()), - RewardVars = #{reward_version => 5, rewards_txn_version => 2}, + RewardVars = #{reward_version => 5, rewards_txn_version => 2, poc_witness_consideration_limit => 20}, maps:merge(POCVars, RewardVars); extra_vars(poc_v10) -> maps:merge(extra_poc_vars(), diff --git a/test/miner_poc_grpc_SUITE.erl b/test/miner_poc_grpc_SUITE.erl new file mode 100644 index 000000000..8414f683c --- /dev/null +++ b/test/miner_poc_grpc_SUITE.erl @@ -0,0 +1,589 @@ +-module(miner_poc_grpc_SUITE). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("blockchain/include/blockchain_vars.hrl"). +-include_lib("blockchain/include/blockchain.hrl"). + +-export([ + groups/0, all/0, test_cases/0, init_per_group/2, end_per_group/2, init_per_testcase/2, end_per_testcase/2 +]). + +-export([ + poc_grpc_dist_v11_test/1, + poc_grpc_dist_v11_cn_test/1, + poc_grpc_dist_v11_partitioned_test/1, + poc_grpc_dist_v11_partitioned_lying_test/1 +]). + +-define(SFLOCS, [631210968910285823, 631210968909003263, 631210968912894463, 631210968907949567]). +-define(NYLOCS, [631243922668565503, 631243922671147007, 631243922895615999, 631243922665907711]). +-define(AUSTINLOCS1, [631781084745290239, 631781089167934463, 631781054839691775, 631781050465723903]). +-define(AUSTINLOCS2, [631781452049762303, 631781453390764543, 631781452924144639, 631781452838965759]). +-define(LALOCS, [631236297173835263, 631236292179769855, 631236329165333503, 631236328049271807]). +-define(CNLOCS1, [ + 631649369216118271, %% spare-tortilla-raccoon + 631649369235022335, %% kind-tangerine-octopus + 631649369177018879, %% damp-hemp-pangolin + 631649369175419391 %% fierce-lipstick-poodle + ]). + +-define(CNLOCS2, [ + 631649369213830655, %% raspy-parchment-pike + 631649369205533183, %% fresh-gingham-porpoise + 631649369207629311, %% innocent-irish-pheasant + 631649368709059071 %% glorious-eggshell-finch + ]). + +%%-------------------------------------------------------------------- +%% COMMON TEST CALLBACK FUNCTIONS +%%-------------------------------------------------------------------- + +groups() -> + [{poc_grpc_with_chain, + [], + test_cases() + }, + {poc_grpc_no_chain, + [], + test_cases() + }]. + +%%-------------------------------------------------------------------- +%% @public +%% @doc +%% Running tests for this suite +%% @end +%%-------------------------------------------------------------------- +all() -> + [{group, poc_grpc_with_chain}, {group, poc_grpc_no_chain}]. + +test_cases() -> + [ + poc_grpc_dist_v11_test, + poc_grpc_dist_v11_cn_test, + poc_grpc_dist_v11_partitioned_test, + poc_grpc_dist_v11_partitioned_lying_test + ]. + +init_per_group(poc_grpc_with_chain, Config) -> + [ + {split_miners_vals_and_gateways, true}, + {num_validators, 5}, + {gateways_run_chain, true} | Config]; +init_per_group(poc_grpc_no_chain, Config) -> + [ + {split_miners_vals_and_gateways, true}, + {num_validators, 5}, + {gateways_run_chain, false} | Config]. + +init_per_testcase(TestCase, Config) -> + miner_ct_utils:init_per_testcase(?MODULE, TestCase, Config). + +end_per_testcase(TestCase, Config) -> + gen_server:stop(miner_fake_radio_backplane), + miner_ct_utils:end_per_testcase(TestCase, Config). + +end_per_group(_, _Config) -> + ok. +%%-------------------------------------------------------------------- +%% TEST CASES +%%-------------------------------------------------------------------- +poc_grpc_dist_v11_test(Config) -> + CommonPOCVars = common_poc_vars(Config), + ExtraVars = extra_vars(grpc), + run_dist_with_params(poc_grpc_dist_v11_test, Config, maps:merge(CommonPOCVars, ExtraVars)). + +poc_grpc_dist_v11_cn_test(Config) -> + CommonPOCVars = common_poc_vars(Config), + ExtraVars = extra_vars(grpc), + run_dist_with_params(poc_grpc_dist_v11_cn_test, Config, maps:merge(CommonPOCVars, ExtraVars)). + +poc_grpc_dist_v11_partitioned_test(Config) -> + CommonPOCVars = common_poc_vars(Config), + ExtraVars = extra_vars(grpc), + run_dist_with_params(poc_grpc_dist_v11_partitioned_test, Config, maps:merge(CommonPOCVars, ExtraVars)). + +poc_grpc_dist_v11_partitioned_lying_test(Config) -> + CommonPOCVars = common_poc_vars(Config), + ExtraVars = extra_vars(grpc), + run_dist_with_params(poc_grpc_dist_v11_partitioned_lying_test, Config, maps:merge(CommonPOCVars, ExtraVars)). + +%% ------------------------------------------------------------------ +%% Internal Function Definitions +%% ------------------------------------------------------------------ + +run_dist_with_params(TestCase, Config, VarMap) -> + run_dist_with_params(TestCase, Config, VarMap, true). + +run_dist_with_params(TestCase, Config, VarMap, Status) -> + ok = setup_dist_test(TestCase, Config, VarMap, Status), + %% Execute the test + ok = exec_dist_test(TestCase, Config, VarMap, Status), + %% show the final receipt counter + Validators = ?config(validators, Config), + FinalReceiptMap = challenger_receipts_map(find_receipts(Validators)), + ct:pal("FinalReceiptMap: ~p", [FinalReceiptMap]), + ct:pal("FinalReceiptCounter: ~p", [receipt_counter(FinalReceiptMap)]), + %% The test endeth here + ok. + +exec_dist_test(poc_grpc_dist_v11_partitioned_lying_test, Config, VarMap, _Status) -> + do_common_partition_lying_checks(poc_grpc_dist_v11_partitioned_lying_test, Config, VarMap); +exec_dist_test(poc_grpc_dist_v11_partitioned_test, Config, VarMap, _Status) -> + do_common_partition_checks(poc_grpc_dist_v11_partitioned_test, Config, VarMap); +exec_dist_test(_TestCase, Config, VarMap, Status) -> + Validators = ?config(validators, Config), + %% Print scores before we begin the test + InitialScores = gateway_scores(Config), + ct:pal("InitialScores: ~p", [InitialScores]), + %% check that every miner has issued a challenge + case Status of + %% expect failure and exit + false -> + ?assert(check_validators_are_creating_poc_keys(Validators)); + true -> + ?assert(check_validators_are_creating_poc_keys(Validators)), + %% Check that the receipts are growing + case maps:get(?poc_version, VarMap, 11) of + V when V >= 10 -> + %% There are no paths in v11 or v10 for that matter, so we'll consolidate + %% the checks for both poc-v10 and poc-v11 here + true = miner_ct_utils:wait_until( + fun() -> + %% Check if we have some receipts + C2 = maps:size(challenger_receipts_map(find_receipts(Validators))) > 0, + %% Check there are some poc rewards + RewardsMD = get_rewards_md(Config), + ct:pal("RewardsMD: ~p", [RewardsMD]), + C3 = check_non_empty_poc_rewards(take_poc_challengee_and_witness_rewards(RewardsMD)), + ct:pal("C2: ~p, C3: ~p", [C2, C3]), + C2 andalso C3 + end, + 25, 5000), + FinalRewards = get_rewards(Config), + ct:pal("FinalRewards: ~p", [FinalRewards]), + ok; + _ -> + ok + end + end, + ok. + +setup_dist_test(TestCase, Config, VarMap, Status) -> + AllMiners = ?config(miners, Config), + Validators = ?config(validators, Config), + Gateways = ?config(gateways, Config), + RunChainOnGateways = proplists:get_value(gateways_run_chain, Config, true), + {_, Locations} = lists:unzip(initialize_chain(Validators, TestCase, Config, VarMap)), + + case RunChainOnGateways of + true -> + _ = miner_ct_utils:integrate_genesis_block(hd(Validators), Gateways); + false -> + ok + end, + + %% the radio ports used to be fetched from miner lora as part of init_per_testcase + %% but the port is only opened now after a chain is up and been consulted to + %% determine if validators are running POCs + %% So now we have wait until the chain is up and miner lora has opened the port + true = miner_ct_utils:wait_for_lora_port(Gateways, miner_lora_light, 30), + + RadioPorts = lists:map( + fun(Gateway) -> + {ok, RandomPort} = ct_rpc:call(Gateway, miner_lora_light, port, []), + ct:pal("~p is listening for packet forwarder on ~p", [Gateway, RandomPort]), + RandomPort + end, + Gateways), + {ok, _FakeRadioPid} = miner_fake_radio_backplane:start_link(maps:get(?poc_version, VarMap), 45000, + lists:zip(RadioPorts, Locations), Status), + miner_fake_radio_backplane ! go, + %% wait till height 2 + case RunChainOnGateways of + true -> + ok = miner_ct_utils:wait_for_gte(height, AllMiners, 2, all, 30); + false -> + ok = miner_ct_utils:wait_for_gte(height, Validators, 2, all, 30) + end, + ok. + +gen_locations(poc_grpc_dist_v11_partitioned_lying_test, _, _) -> + {?AUSTINLOCS1 ++ ?LALOCS, lists:duplicate(4, hd(?AUSTINLOCS1)) ++ lists:duplicate(4, hd(?LALOCS))}; +gen_locations(poc_grpc_dist_v11_partitioned_test, _, _) -> + %% These are taken from the ledger + {?AUSTINLOCS1 ++ ?LALOCS, ?AUSTINLOCS1 ++ ?LALOCS}; +gen_locations(poc_grpc_dist_v11_cn_test, _, _) -> + %% Actual locations are the same as the claimed locations for the dist test + {?CNLOCS1 ++ ?CNLOCS2, ?CNLOCS1 ++ ?CNLOCS2}; +gen_locations(_TestCase, Addresses, VarMap) -> + LocationJitter = case maps:get(?poc_version, VarMap, 1) of + V when V > 3 -> + 100; + _ -> + 1000000 + end, + + Locs = lists:foldl( + fun(I, Acc) -> + [h3:from_geo({37.780586, -122.469470 + I/LocationJitter}, 13)|Acc] + end, + [], + lists:seq(1, length(Addresses)) + ), + {Locs, Locs}. + +initialize_chain(_AllMiners, TestCase, Config, VarMap) -> + AllAddresses = ?config(addresses, Config), + Validators = ?config(validators, Config), + ValidatorAddrs = ?config(validator_addrs, Config), + GatewayAddrs = ?config(gateway_addrs, Config), + + N = ?config(num_consensus_members, Config), + Curve = ?config(dkg_curve, Config), + Keys = libp2p_crypto:generate_keys(ecc_compact), + InitialVars = miner_ct_utils:make_vars(Keys, VarMap), + InitialPaymentTransactions = [blockchain_txn_coinbase_v1:new(Addr, 5000) || Addr <- AllAddresses], + AddValTxns = [blockchain_txn_gen_validator_v1:new(Addr, Addr, ?bones(10000)) || Addr <- ValidatorAddrs], + + {ActualLocations, ClaimedLocations} = gen_locations(TestCase, GatewayAddrs, VarMap), + ct:pal("GatewayAddrs: ~p, ActualLocations: ~p, ClaimedLocations: ~p",[GatewayAddrs, ActualLocations, ClaimedLocations]), + AddressesWithLocations = lists:zip(GatewayAddrs, ActualLocations), + AddressesWithClaimedLocations = lists:zip(GatewayAddrs, ClaimedLocations), + InitialGenGatewayTxns = [blockchain_txn_gen_gateway_v1:new(Addr, Addr, Loc, 0) || {Addr, Loc} <- AddressesWithLocations], + InitialTransactions = InitialVars ++ InitialPaymentTransactions ++ AddValTxns ++ InitialGenGatewayTxns, + {ok, DKGCompletedNodes} = miner_ct_utils:initial_dkg(Validators, InitialTransactions, ValidatorAddrs, N, Curve), + + %% integrate genesis block + _GenesisLoadResults = miner_ct_utils:integrate_genesis_block(hd(DKGCompletedNodes), Validators -- DKGCompletedNodes), + AddressesWithClaimedLocations. + + +find_receipts(Validators) -> + [V | _] = Validators, + Chain = ct_rpc:call(V, blockchain_worker, blockchain, []), + Blocks = ct_rpc:call(V, blockchain, blocks, [Chain]), + lists:flatten(lists:foldl(fun({_Hash, Block}, Acc) -> + Txns = blockchain_block:transactions(Block), + Height = blockchain_block:height(Block), + Receipts = lists:filter(fun(T) -> + blockchain_txn:type(T) == blockchain_txn_poc_receipts_v2 + end, + Txns), + TaggedReceipts = lists:map(fun(R) -> + {Height, R} + end, + Receipts), + TaggedReceipts ++ Acc + end, + [], + maps:to_list(Blocks))). + +challenger_receipts_map(Receipts) -> + ReceiptMap = lists:foldl( + fun({_Height, Receipt}=R, Acc) -> + {ok, Challenger} = erl_angry_purple_tiger:animal_name(libp2p_crypto:bin_to_b58(blockchain_txn_poc_receipts_v2:challenger(Receipt))), + case maps:get(Challenger, Acc, undefined) of + undefined -> + maps:put(Challenger, [R], Acc); + List -> + maps:put(Challenger, lists:keysort(1, [R | List]), Acc) + end + end, + #{}, + Receipts), + + ct:pal("ReceiptMap: ~p", [ReceiptMap]), + + ReceiptMap. + +check_validators_are_creating_poc_keys([Val |_] = _Validators) -> + Chain = ct_rpc:call(Val, blockchain_worker, blockchain, []), + Ledger = ct_rpc:call(Val, blockchain, ledger, [Chain]), + {ok, CurHeight} = ct_rpc:call(Val, blockchain_ledger_v1, current_height, [Ledger]), + {ok, Block} = ct_rpc:call(Val, blockchain_ledger_v1, get_block, [CurHeight, Ledger]), + case blockchain_block_v1:poc_keys(Block) of + [] -> false; + [_] -> true + end. + +check_partitioned_lying_path_growth(_TestCase, Miners) -> + ReceiptMap = challenger_receipts_map(find_receipts(Miners)), + ct:pal("ReceiptMap: ~p", [ReceiptMap]), + not check_subsequent_path_growth(ReceiptMap). + +receipt_counter(ReceiptMap) -> + lists:foldl(fun({Name, ReceiptList}, Acc) -> + Counts = lists:map(fun({Height, ReceiptTxn}) -> + {Height, length(blockchain_txn_poc_receipts_v2:path(ReceiptTxn))} + end, + ReceiptList), + maps:put(Name, Counts, Acc) + end, + #{}, + maps:to_list(ReceiptMap)). + +gateway_scores(Config) -> + [V | _] = ?config(validators, Config), + Addresses = ?config(gateway_addrs, Config), + Chain = ct_rpc:call(V, blockchain_worker, blockchain, []), + Ledger = ct_rpc:call(V, blockchain, ledger, [Chain]), + lists:foldl(fun(Address, Acc) -> + {ok, S} = ct_rpc:call(V, blockchain_ledger_v1, gateway_score, [Address, Ledger]), + {ok, Name} = erl_angry_purple_tiger:animal_name(libp2p_crypto:bin_to_b58(Address)), + maps:put(Name, S, Acc) + end, + #{}, + Addresses). + +common_poc_vars(Config) -> + N = ?config(num_consensus_members, Config), + BlockTime = ?config(block_time, Config), + Interval = ?config(election_interval, Config), + BatchSize = ?config(batch_size, Config), + Curve = ?config(dkg_curve, Config), + %% Don't put the poc version here + %% Add it to the map in the tests above + #{?block_time => BlockTime, + ?election_interval => Interval, + ?num_consensus_members => N, + ?batch_size => BatchSize, + ?dkg_curve => Curve, + ?election_version => 6, %% TODO validators + ?poc_challenge_interval => 15, + ?poc_v4_exclusion_cells => 10, + ?poc_v4_parent_res => 11, + ?poc_v4_prob_bad_rssi => 0.01, + ?poc_v4_prob_count_wt => 0.3, + ?poc_v4_prob_good_rssi => 1.0, + ?poc_v4_prob_no_rssi => 0.5, + ?poc_v4_prob_rssi_wt => 0.3, + ?poc_v4_prob_time_wt => 0.3, + ?poc_v4_randomness_wt => 0.1, + ?poc_v4_target_challenge_age => 300, + ?poc_v4_target_exclusion_cells => 6000, + ?poc_v4_target_prob_edge_wt => 0.2, + ?poc_v4_target_prob_score_wt => 0.8, + ?poc_v4_target_score_curve => 5, + ?poc_target_hex_parent_res => 5, + ?poc_v5_target_prob_randomness_wt => 0.0}. + +do_common_partition_checks(_TestCase, Config, VarMap) -> + Validators = ?config(validators, Config), + %% Print scores before we begin the test + InitialScores = gateway_scores(Config), + ct:pal("InitialScores: ~p", [InitialScores]), + true = miner_ct_utils:wait_until( + fun() -> + case maps:get(poc_version, VarMap, 1) of + V when V >= 10 -> + %% There is no path to check, so do both poc-v10 and poc-v11 checks here + %% Check that every miner has issued a challenge + C1 = check_validators_are_creating_poc_keys(Validators), + %% Check there are some poc rewards + RewardsMD = get_rewards_md(Config), + ct:pal("RewardsMD: ~p", [RewardsMD]), + C2 = check_non_empty_poc_rewards(take_poc_challengee_and_witness_rewards(RewardsMD)), + ct:pal("C1: ~p, C2: ~p", [C1, C2]), + C1 andalso C2; + _ -> + ok + end + end, 60, 5000), + %% Print scores after execution + FinalScores = gateway_scores(Config), + ct:pal("FinalScores: ~p", [FinalScores]), + FinalRewards = get_rewards(Config), + ct:pal("FinalRewards: ~p", [FinalRewards]), + ok. + +balances(Config) -> + [V | _] = ?config(validators, Config), + Addresses = ?config(validator_addrs, Config), + [miner_ct_utils:get_balance(V, Addr) || Addr <- Addresses]. + +take_poc_challengee_and_witness_rewards(RewardsMD) -> + %% only take poc_challengee and poc_witness rewards + POCRewards = lists:foldl( + fun({Ht, MDMap}, Acc) -> + [{Ht, maps:with([poc_challengee, poc_witness], MDMap)} | Acc] + end, + [], + RewardsMD), + ct:pal("POCRewards: ~p", [POCRewards]), + POCRewards. + +check_non_empty_poc_rewards(POCRewards) -> + lists:any( + fun({_Ht, #{poc_challengee := R1, poc_witness := R2}}) -> + maps:size(R1) > 0 andalso maps:size(R2) > 0 + end, + POCRewards). + + +get_rewards_md(Config) -> + %% NOTE: It's possible that the calculations below may blow up + %% since we are folding the entire chain here and some subsequent + %% ledger_at call in rewards_metadata blows up. Investigate + + [V | _] = ?config(validators, Config), + Chain = ct_rpc:call(V, blockchain_worker, blockchain, []), + {ok, Head} = ct_rpc:call(V, blockchain, head_block, [Chain]), + + Filter = fun(T) -> blockchain_txn:type(T) == blockchain_txn_rewards_v2 end, + Fun = fun(Block, Acc) -> + case blockchain_utils:find_txn(Block, Filter) of + [T] -> + Start = blockchain_txn_rewards_v2:start_epoch(T), + End = blockchain_txn_rewards_v2:end_epoch(T), + MDRes = ct_rpc:call(V, blockchain_txn_rewards_v2, calculate_rewards_metadata, [ + Start, + End, + Chain + ]), + case MDRes of + {ok, MD} -> + [{blockchain_block:height(Block), MD} | Acc]; + _ -> + Acc + end; + _ -> + Acc + end + end, + Res = ct_rpc:call(V, blockchain, fold_chain, [Fun, [], Head, Chain]), + Res. + + +get_rewards(Config) -> + %% default to rewards_v1 + get_rewards(Config, blockchain_txn_rewards_v2). + +get_rewards(Config, RewardType) -> + [Val | _] = ?config(validators, Config), + Chain = ct_rpc:call(Val, blockchain_worker, blockchain, []), + Blocks = ct_rpc:call(Val, blockchain, blocks, [Chain]), + maps:fold(fun(_, Block, Acc) -> + case blockchain_block:transactions(Block) of + [] -> + Acc; + Ts -> + Rewards = lists:filter(fun(T) -> + blockchain_txn:type(T) == RewardType + end, + Ts), + lists:flatten([Rewards | Acc]) + end + end, + [], + Blocks). + +check_poc_rewards(RewardsTxns) -> + %% Get all rewards types + RewardTypes = lists:foldl(fun(RewardTxn, Acc) -> + Types = [blockchain_txn_reward_v1:type(R) || R <- blockchain_txn_rewards_v2:rewards(RewardTxn)], + lists:flatten([Types | Acc]) + end, + [], + RewardsTxns), + lists:any(fun(T) -> + T == poc_challengees orelse T == poc_witnesses + end, + RewardTypes). + +do_common_partition_lying_checks(TestCase, Config, VarMap) -> + Validators = ?config(validators, Config), + %% Print scores before we begin the test + InitialScores = gateway_scores(Config), + ct:pal("InitialScores: ~p", [InitialScores]), + %% Print scores before we begin the test + InitialBalances = balances(Config), + ct:pal("InitialBalances: ~p", [InitialBalances]), + + true = miner_ct_utils:wait_until( + fun() -> + case maps:get(poc_version, VarMap, 11) of + V when V > 10 -> + %% Check that every miner has issued a challenge + C1 = check_validators_are_creating_poc_keys(Validators), + %% TODO: What to check when the partitioned nodes are lying about their locations + C1; + _ -> + %% Check that every miner has issued a challenge + C1 = check_validators_are_creating_poc_keys(Validators), + %% Since we have two static location partitioned networks, where + %% both are lying about their distances, the paths should + %% never get longer than 1 + C2 = check_partitioned_lying_path_growth(TestCase, Validators), + C1 andalso C2 + end + end, + 40, 5000), + %% Print scores after execution + FinalScores = gateway_scores(Config), + ct:pal("FinalScores: ~p", [FinalScores]), + %% Print rewards + Rewards = get_rewards(Config), + ct:pal("Rewards: ~p", [Rewards]), + %% Print balances after execution + FinalBalances = balances(Config), + ct:pal("FinalBalances: ~p", [FinalBalances]), + %% There should be no poc_witness or poc_challengees rewards + ?assert(not check_poc_rewards(Rewards)), + ok. + +extra_vars(grpc) -> + GrpcVars = #{ + ?poc_challenge_rate => 1, + ?poc_challenger_type => validator, + ?poc_timeout => 4, + ?poc_receipts_absorb_timeout => 2 + }, + maps:merge(extra_vars(poc_v11), GrpcVars); +extra_vars(poc_v11) -> + POCVars = maps:merge(extra_vars(poc_v10), miner_poc_test_utils:poc_v11_vars()), + RewardVars = #{reward_version => 5, rewards_txn_version => 2}, + maps:merge(POCVars, RewardVars); +extra_vars(poc_v10) -> + maps:merge(extra_poc_vars(), + #{?poc_version => 10, + ?data_aggregation_version => 2, + ?consensus_percent => 0.06, + ?dc_percent => 0.325, + ?poc_challengees_percent => 0.18, + ?poc_challengers_percent => 0.0095, + ?poc_witnesses_percent => 0.0855, + ?securities_percent => 0.34, + ?reward_version => 5, + ?rewards_txn_version => 2, + ?poc_challenge_rate => 1, + ?poc_challenger_type => validator, + ?poc_timeout => 4, + ?poc_receipts_absorb_timeout => 2, + ?election_interval => 10, + ?block_time => 5000 + }); +extra_vars(poc_v8) -> + maps:merge(extra_poc_vars(), #{?poc_version => 8}); +extra_vars(_) -> + {error, poc_v8_and_above_only}. + +extra_poc_vars() -> + #{?poc_good_bucket_low => -132, + ?poc_good_bucket_high => -80, + ?poc_v5_target_prob_randomness_wt => 1.0, + ?poc_v4_target_prob_edge_wt => 0.0, + ?poc_v4_target_prob_score_wt => 0.0, + ?poc_v4_prob_rssi_wt => 0.0, + ?poc_v4_prob_time_wt => 0.0, + ?poc_v4_randomness_wt => 0.5, + ?poc_v4_prob_count_wt => 0.0, + ?poc_centrality_wt => 0.5, + ?poc_max_hop_cells => 2000}. + +check_subsequent_path_growth(ReceiptMap) -> + PathLengths = [ length(blockchain_txn_poc_receipts_v2:path(Txn)) || {_, Txn} <- lists:flatten(maps:values(ReceiptMap)) ], + ct:pal("PathLengths: ~p", [PathLengths]), + lists:any(fun(L) -> L > 1 end, PathLengths). + diff --git a/test/miner_poc_test_utils.erl b/test/miner_poc_test_utils.erl index 8cfe23325..32bea7902 100644 --- a/test/miner_poc_test_utils.erl +++ b/test/miner_poc_test_utils.erl @@ -128,12 +128,16 @@ download_serialized_region(URL) -> PrivDir = filename:join([Dir, "priv"]), ok = filelib:ensure_dir(PrivDir ++ "/"), ok = ssl:start(), - {ok, {{_, 200, "OK"}, _, Body}} = httpc:request(URL), - FName = hd(string:tokens(hd(lists:reverse(string:tokens(URL, "/"))), "?")), - FPath = filename:join([PrivDir, FName]), - ok = file:write_file(FPath, Body), - {ok, Data} = file:read_file(FPath), - Data. + case httpc:request(URL) of + {ok, {{_, 200, "OK"}, _, Body}} -> + FName = hd(string:tokens(hd(lists:reverse(string:tokens(URL, "/"))), "?")), + FPath = filename:join([PrivDir, FName]), + ok = file:write_file(FPath, Body), + {ok, Data} = file:read_file(FPath), + Data; + _ -> + <<>> + end. make_params(RegionParams) -> lists:foldl( @@ -150,4 +154,4 @@ construct_param(P) -> BW = proplists:get_value(<<"bandwidth">>, P), MaxEIRP = proplists:get_value(<<"max_eirp">>, P), Spreading = blockchain_region_spreading_v1:new(proplists:get_value(<<"spreading">>, P)), - blockchain_region_param_v1:new(CF, BW, MaxEIRP, Spreading). + blockchain_region_param_v1:new(CF, BW, MaxEIRP, Spreading). \ No newline at end of file diff --git a/test/miner_txn_mgr_SUITE.erl b/test/miner_txn_mgr_SUITE.erl index c9a00a6af..704a088b7 100644 --- a/test/miner_txn_mgr_SUITE.erl +++ b/test/miner_txn_mgr_SUITE.erl @@ -633,4 +633,4 @@ nonce_updated_for_miner(Addr, ExpectedNonce, ConMiners)-> Nonce == ExpectedNonce end, ConMiners), [true] == lists:usort(HaveNoncesIncremented) - end, 200, 1000). + end, 200, 1000). \ No newline at end of file