diff --git a/Cargo.lock b/Cargo.lock index 101abe51e3..8fe4c3912d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -268,6 +268,7 @@ dependencies = [ "slog", "slog-json", "slog-term", + "stx-genesis", "time 0.2.16", "tini", "url", diff --git a/Cargo.toml b/Cargo.toml index 3cd5cf3e57..b62376c2f7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -89,6 +89,7 @@ features = ["std"] [dev-dependencies] assert-json-diff = "1.0.0" criterion = "0.3" +stx_genesis = { package = "stx-genesis", path = "./stx-genesis/."} [features] default = ["developer-mode"] diff --git a/bns-test/jest.config.js b/bns-test/jest.config.js index 5aae18d763..665d2bcf92 100644 --- a/bns-test/jest.config.js +++ b/bns-test/jest.config.js @@ -11,6 +11,7 @@ module.exports = { }, }, moduleFileExtensions: ['js', 'ts', 'd.ts'], + testTimeout: 2000000 // setupFiles: ['./tests/global-setup.ts'], // setupFilesAfterEnv: ['./tests/setup.ts'], }; diff --git a/bns-test/package.json b/bns-test/package.json index cf9dda0eb7..ca1ad0d05a 100644 --- a/bns-test/package.json +++ b/bns-test/package.json @@ -16,7 +16,7 @@ "lint": "tslint -p tsconfig.json 'src/**/*.ts' 'test/**/*.ts'", "lint-fix": "tslint --fix -p tsconfig.json 'src/**/*.ts' 'test/**/*.ts'", "test": "jest", - "test:watch": "jest --watch --coverage=false --runInBand" + "test:watch": "jest --watch --coverage=false" }, "engines": { "node": ">=10" diff --git a/bns-test/src/bns-client.ts b/bns-test/src/bns-client.ts index 8ba63f4564..018d950958 100644 --- a/bns-test/src/bns-client.ts +++ b/bns-test/src/bns-client.ts @@ -108,7 +108,7 @@ export class BNSClient extends Client { } // (name-import (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) // (zonefile-hash (buff 20))) async nameImport(namespace: string, name: string, @@ -168,7 +168,7 @@ export class BNSClient extends Client { } // (name-register (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) // (salt (buff 20)) // (zonefile-hash (buff 20))) async nameRegister(namespace: string, @@ -190,7 +190,7 @@ export class BNSClient extends Client { } // (name-update (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) // (zonefile-hash (buff 20))) async nameUpdate(namespace: string, name: string, @@ -210,7 +210,7 @@ export class BNSClient extends Client { } // (name-transfer (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) // (new-owner principal) // (zonefile-hash (optional (buff 20)))) async nameTransfer(namespace: string, @@ -235,7 +235,7 @@ export class BNSClient extends Client { } // (name-revoke (namespace (buff 20)) - // (name (buff 16))) + // (name (buff 48))) async nameRevoke(namespace: string, name: string, params: { @@ -253,7 +253,7 @@ export class BNSClient extends Client { } // (name-renewal (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) // (stx-to-burn uint) // (new-owner (optional principal)) // (zonefile-hash (optional (buff 20)))) @@ -281,7 +281,7 @@ export class BNSClient extends Client { } // (get-name-zonefile (namespace (buff 20)) - // (name (buff 16))) + // (name (buff 48))) async getNameZonefile(namespace: string, name: string, params: { @@ -299,7 +299,7 @@ export class BNSClient extends Client { } // (can-name-be-registered (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) async canNameBeRegistered(namespace: string, name: string): Promise { const args = [`0x${this.toHexString(namespace)}`, `0x${this.toHexString(name)}`]; @@ -315,7 +315,7 @@ export class BNSClient extends Client { } // (get-name-price (namespace (buff 20)) - // (name (buff 16)) + // (name (buff 48)) async getNamePrice(namespace: string, name: string): Promise { const args = [`0x${this.toHexString(namespace)}`, `0x${this.toHexString(name)}`]; diff --git a/bns-test/test/name_import.test.ts b/bns-test/test/name_import.test.ts index 619871cb06..2a5c9e157a 100644 --- a/bns-test/test/name_import.test.ts +++ b/bns-test/test/name_import.test.ts @@ -103,7 +103,7 @@ describe("BNS Test Suite - NAME_IMPORT", () => { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('Returned: u12'); + expect(receipt.result).include('Returned: u146'); receipt = await bns.namespaceReveal( cases[0].namespace, @@ -222,7 +222,7 @@ describe("BNS Test Suite - NAME_IMPORT", () => { expect(receipt.success).eq(true); - // Charlie trying to register 'alpha.blockstack' should fail + // Charlie trying to register 'alpha.blockstack' should succeed receipt = await bns.namePreorder( cases[0].namespace, "alpha", @@ -231,7 +231,7 @@ describe("BNS Test Suite - NAME_IMPORT", () => { sender: charlie }); expect(receipt.success).eq(true); - expect(receipt.result).include('u29'); + expect(receipt.result).include('u163'); receipt = await bns.nameRegister( cases[0].namespace, @@ -280,7 +280,7 @@ describe("BNS Test Suite - NAME_IMPORT", () => { expect(receipt.success).eq(false); // Resolving an imported name should fail after expiration - await bns.mineBlocks(100); + await bns.mineBlocks(5100); receipt = await bns.getNameZonefile( cases[0].namespace, diff --git a/bns-test/test/name_preorder.test.ts b/bns-test/test/name_preorder.test.ts index 35c207ab05..c7a8b5c7ef 100644 --- a/bns-test/test/name_preorder.test.ts +++ b/bns-test/test/name_preorder.test.ts @@ -120,7 +120,7 @@ describe("BNS Test Suite - NAME_PREORDER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u33'); + expect(receipt.result).include('u167'); // should fail if the same order is being re-submitted by Bob receipt = await bns.namePreorder( @@ -142,11 +142,11 @@ describe("BNS Test Suite - NAME_PREORDER", () => { sender: alice }); expect(receipt.success).eq(true); - expect(receipt.result).include('u35'); + expect(receipt.result).include('u169'); // should succeed once claimability TTL expired - await mineBlocks(bns, 10); + await mineBlocks(bns, 154); receipt = await bns.namePreorder( cases[0].namespace, "bob", @@ -155,7 +155,7 @@ describe("BNS Test Suite - NAME_PREORDER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u46'); + expect(receipt.result).include('u324'); }); }); }); \ No newline at end of file diff --git a/bns-test/test/name_prices.test.ts b/bns-test/test/name_prices.test.ts index fa7ee0fe9e..eea6ab185b 100644 --- a/bns-test/test/name_prices.test.ts +++ b/bns-test/test/name_prices.test.ts @@ -94,14 +94,13 @@ import { await bns.deployContract(); }); - it("Testing name prices", async () => { // Given a launched namespace 'blockstack', owned by Alice var receipt = await bns.namespacePreorder(cases[0].namespace, cases[0].salt, cases[0].value, { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); receipt = await bns.namespaceReveal( cases[0].namespace, diff --git a/bns-test/test/name_register.test.ts b/bns-test/test/name_register.test.ts index 1828524525..b0931046a8 100644 --- a/bns-test/test/name_register.test.ts +++ b/bns-test/test/name_register.test.ts @@ -112,7 +112,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[1].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u33'); + expect(receipt.result).include('u167'); receipt = await bns.namespaceReveal( cases[1].namespace, @@ -132,7 +132,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { 100, { sender: bob }); - expect(receipt.result).include('u35'); + expect(receipt.result).include('u169'); expect(receipt.success).eq(true); receipt = await bns.nameRegister( @@ -152,7 +152,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u37'); + expect(receipt.result).include('u171'); receipt = await bns.namespaceReveal( cases[0].namespace, @@ -194,7 +194,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u42'); + expect(receipt.result).include('u176'); // should fail receipt = await bns.nameRegister( @@ -216,7 +216,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u44'); + expect(receipt.result).include('u178'); // Bob registering the name 'Bob.blockstack' should fail receipt = await bns.nameRegister( @@ -238,7 +238,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u46'); + expect(receipt.result).include('u180'); // Bob registering the name 'bob.blockstack' // should succeed @@ -282,7 +282,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: charlie }); expect(receipt.success).eq(true); - expect(receipt.result).include('u50'); + expect(receipt.result).include('u184'); receipt = await bns.nameRegister( cases[0].namespace, @@ -309,7 +309,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u53'); + expect(receipt.result).include('u187'); receipt = await bns.nameRegister( cases[0].namespace, @@ -323,7 +323,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { // should succeed once 'bob.blockstack' is expired - await mineBlocks(bns, cases[0].renewalRule); + await mineBlocks(bns, cases[0].renewalRule + 5000); receipt = await bns.namePreorder( cases[0].namespace, @@ -333,7 +333,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u65'); + expect(receipt.result).include('u5199'); receipt = await bns.getNameZonefile( cases[0].namespace, @@ -370,7 +370,7 @@ describe("BNS Test Suite - NAME_REGISTER", () => { sender: charlie }); expect(receipt.success).eq(true); - expect(receipt.result).include('u69'); + expect(receipt.result).include('u5203'); receipt = await bns.nameRegister( cases[0].namespace, diff --git a/bns-test/test/name_renewal.test.ts b/bns-test/test/name_renewal.test.ts index 6f473c78f3..0613068063 100644 --- a/bns-test/test/name_renewal.test.ts +++ b/bns-test/test/name_renewal.test.ts @@ -275,7 +275,7 @@ describe("BNS Test Suite - NAME_RENEWAL", () => { // When Bob is renewing 'bob.blockstack' at block #56 (expired) // should fail renewing - await mineBlocks(bns, 16); + await mineBlocks(bns, 16 + 5000); receipt = await bns.getNameZonefile( cases[0].namespace, @@ -307,7 +307,7 @@ describe("BNS Test Suite - NAME_RENEWAL", () => { sender: dave }); expect(receipt.success).eq(true); - expect(receipt.result).include('u56'); + expect(receipt.result).include('u5190'); block_height += 1; receipt = await bns.nameRegister( diff --git a/bns-test/test/name_revoke.test.ts b/bns-test/test/name_revoke.test.ts index bcb72156e0..55ffdbb168 100644 --- a/bns-test/test/name_revoke.test.ts +++ b/bns-test/test/name_revoke.test.ts @@ -221,7 +221,7 @@ describe("BNS Test Suite - NAME_REVOKE", () => { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u24'); + expect(receipt.result).include('u158'); receipt = await bns.nameRegister( cases[0].namespace, @@ -267,7 +267,7 @@ describe("BNS Test Suite - NAME_REVOKE", () => { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u28'); + expect(receipt.result).include('u162'); receipt = await bns.nameRegister( cases[0].namespace, @@ -326,7 +326,7 @@ describe("BNS Test Suite - NAME_REVOKE", () => { sender: alice }); expect(receipt.success).eq(true); - expect(receipt.result).include('u34'); + expect(receipt.result).include('u168'); receipt = await bns.nameRegister( cases[0].namespace, diff --git a/bns-test/test/name_transfer.test.ts b/bns-test/test/name_transfer.test.ts index bda287c66b..0ca0129747 100644 --- a/bns-test/test/name_transfer.test.ts +++ b/bns-test/test/name_transfer.test.ts @@ -142,7 +142,7 @@ describe("BNS Test Suite - NAME_TRANSFER", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u16'); + expect(receipt.result).include('u150'); block_height += 1; receipt = await bns.nameRegister( @@ -164,7 +164,7 @@ describe("BNS Test Suite - NAME_TRANSFER", () => { sender: charlie }); expect(receipt.success).eq(true); - expect(receipt.result).include('u18'); + expect(receipt.result).include('u152'); block_height += 1; receipt = await bns.nameRegister( @@ -280,7 +280,7 @@ describe("BNS Test Suite - NAME_TRANSFER", () => { "charlie", { sender: cases[0].nameOwner }); - expect(receipt.result).include('0x00'); + expect(receipt.result).include('(zonefile-hash 0x)'); expect(receipt.success).eq(true); // Bob should not be able to update 'charlie.blockstack' @@ -321,7 +321,7 @@ describe("BNS Test Suite - NAME_TRANSFER", () => { "bob", { sender: cases[0].nameOwner }); - expect(receipt.result).include('0x00'); + expect(receipt.result).include('(zonefile-hash 0x))'); expect(receipt.success).eq(true); // Bob should be able to update its zonefile diff --git a/bns-test/test/name_update.test.ts b/bns-test/test/name_update.test.ts index 4c15d90945..128632770c 100644 --- a/bns-test/test/name_update.test.ts +++ b/bns-test/test/name_update.test.ts @@ -181,7 +181,7 @@ describe("BNS Test Suite - NAME_UPDATE", () => { sender: cases[0].nameOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u20'); + expect(receipt.result).include('u154'); receipt = await bns.nameRegister( cases[0].namespace, diff --git a/bns-test/test/namespace_preorder.test.ts b/bns-test/test/namespace_preorder.test.ts index eaf3ea5c4c..dc2f10aea3 100644 --- a/bns-test/test/namespace_preorder.test.ts +++ b/bns-test/test/namespace_preorder.test.ts @@ -162,7 +162,7 @@ describe("BNS Test Suite - NAMESPACE_PREORDER", () => { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); it("should succeed when Alice pre-orders 'id', 'stx-to-burn' = 9600 (balance ok)", async () => { @@ -170,7 +170,7 @@ describe("BNS Test Suite - NAMESPACE_PREORDER", () => { sender: cases[1].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); // Given an existing pre-order for 'blockstack' registered by Alice @@ -181,7 +181,7 @@ describe("BNS Test Suite - NAMESPACE_PREORDER", () => { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); // When Alice submits a pre-order with the same salted hashed namespace // should fail if TTL is still valid @@ -189,7 +189,7 @@ describe("BNS Test Suite - NAMESPACE_PREORDER", () => { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u13'); + expect(receipt.result).include('u147'); // Let's mine 5 blocks and check await mineBlocks(bns, 5); @@ -199,13 +199,13 @@ describe("BNS Test Suite - NAMESPACE_PREORDER", () => { expect(receipt.success).eq(false); expect(receipt.error).include('1003'); - // Let's mine 6 more blocks and check (TTL = 10 (< 5 + 6)) - await mineBlocks(bns, 6); + // Let's mine 136 more blocks and check (TTL = 144) + await mineBlocks(bns, 136); receipt = await bns.namespacePreorder(cases[0].namespace, cases[0].salt, cases[0].value, { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u26'); // 20 blocks simulated initially + 11 blocks simulated + TTL + expect(receipt.result).include('u290'); // 20 blocks simulated initially + 11 blocks simulated + TTL }); }); diff --git a/bns-test/test/namespace_ready.test.ts b/bns-test/test/namespace_ready.test.ts index 416b98bd7a..d657257a1b 100644 --- a/bns-test/test/namespace_ready.test.ts +++ b/bns-test/test/namespace_ready.test.ts @@ -99,7 +99,7 @@ describe("BNS Test Suite - NAMESPACE_READY", () => { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); receipt = await bns.namespaceReveal( cases[0].namespace, @@ -132,7 +132,7 @@ describe("BNS Test Suite - NAMESPACE_READY", () => { sender: cases[1].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u16'); + expect(receipt.result).include('u150'); receipt = await bns.namespaceReveal( cases[1].namespace, @@ -147,7 +147,7 @@ describe("BNS Test Suite - NAMESPACE_READY", () => { // Launching the namespace // should fail if launchability TTL expired - await mineBlocks(bns, 11); + await mineBlocks(bns, 52595); receipt = await bns.namespaceReady(cases[1].namespace, { sender: cases[1].namespaceOwner }); diff --git a/bns-test/test/namespace_reveal.test.ts b/bns-test/test/namespace_reveal.test.ts index 14f7a470b3..0ef00b388e 100644 --- a/bns-test/test/namespace_reveal.test.ts +++ b/bns-test/test/namespace_reveal.test.ts @@ -87,7 +87,7 @@ describe("BNS Test Suite - NAMESPACE_REVEAL", () => { beforeEach(async () => { let receipt = await bns.namespacePreorder(cases[1].namespace, cases[1].salt, cases[1].value, { sender: cases[1].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); it("should fail", async () => { @@ -102,7 +102,7 @@ describe("BNS Test Suite - NAMESPACE_REVEAL", () => { beforeEach(async () => { let receipt = await bns.namespacePreorder(cases[0].namespace, cases[0].salt, cases[0].value, { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); it("should fail if the sender changed", async () => { @@ -134,7 +134,7 @@ describe("BNS Test Suite - NAMESPACE_REVEAL", () => { beforeEach(async () => { let receipt = await bns.namespacePreorder(cases[0].namespace, cases[0].salt, cases[0].value, { sender: cases[0].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); @@ -155,7 +155,7 @@ describe("BNS Test Suite - NAMESPACE_REVEAL", () => { beforeEach(async () => { let receipt = await bns.namespacePreorder(cases[1].namespace, cases[1].salt, 96, { sender: bob }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); it("should fail", async () => { @@ -175,7 +175,7 @@ describe("BNS Test Suite - NAMESPACE_REVEAL", () => { beforeEach(async () => { let receipt = await bns.namespacePreorder(cases[1].namespace, cases[1].salt, cases[1].value, { sender: cases[1].namespaceOwner }); expect(receipt.success).eq(true); - expect(receipt.result).include('u12'); + expect(receipt.result).include('u146'); }); it("should succeed if the price-function, lifetime, namespace and salt are valid", async () => { diff --git a/bns-test/test/races.test.ts b/bns-test/test/races.test.ts index 399c832fe5..8aa8f5f4ac 100644 --- a/bns-test/test/races.test.ts +++ b/bns-test/test/races.test.ts @@ -189,7 +189,7 @@ describe("BNS Test Suite - RACES", () => { // After a NAMESPACE_LAUNCHABILITY_TTL+ blocks, the namespace should expire // As a consequence, the imported names should stop resolving - await bns.mineBlocks(40); + await bns.mineBlocks(52595); receipt = await bns.getNameZonefile( cases[0].namespace, "alpha", { diff --git a/src/burnchains/burnchain.rs b/src/burnchains/burnchain.rs index c3ac039d02..ae7dbcdc5a 100644 --- a/src/burnchains/burnchain.rs +++ b/src/burnchains/burnchain.rs @@ -109,6 +109,7 @@ impl BurnchainStateTransition { pub fn from_block_ops( sort_tx: &mut SortitionHandleTx, + burnchain: &Burnchain, parent_snapshot: &BlockSnapshot, block_ops: &Vec, missed_commits: &Vec, @@ -164,68 +165,89 @@ impl BurnchainStateTransition { let mut windowed_block_commits = vec![block_commits]; let mut windowed_missed_commits = vec![]; - // build a map of intended sortition -> missed commit for the missed commits - // discovered in this block - let mut missed_commits_map: HashMap<_, Vec<_>> = HashMap::new(); - for missed in missed_commits.iter() { - if let Some(commits_at_sortition) = - missed_commits_map.get_mut(&missed.intended_sortition) - { - commits_at_sortition.push(missed); - } else { - missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + if !burnchain.is_in_prepare_phase(parent_snapshot.block_height + 1) + && parent_snapshot.block_height + 1 <= burnchain.pox_constants.sunset_end + { + // PoX reward-phase is active! + // build a map of intended sortition -> missed commit for the missed commits + // discovered in this block. + let mut missed_commits_map: HashMap<_, Vec<_>> = HashMap::new(); + for missed in missed_commits.iter() { + if let Some(commits_at_sortition) = + missed_commits_map.get_mut(&missed.intended_sortition) + { + commits_at_sortition.push(missed); + } else { + missed_commits_map.insert(missed.intended_sortition.clone(), vec![missed]); + } } - } - for blocks_back in 0..(MINING_COMMITMENT_WINDOW - 1) { - if parent_snapshot.block_height < (blocks_back as u64) { - debug!("Mining commitment window shortened because block height is less than window size"; - "block_height" => %parent_snapshot.block_height, - "window_size" => %MINING_COMMITMENT_WINDOW); - break; - } - let block_height = parent_snapshot.block_height - (blocks_back as u64); - let sortition_id = match sort_tx.get_block_snapshot_by_height(block_height)? { - Some(sn) => sn.sortition_id, - None => break, - }; - windowed_block_commits.push(SortitionDB::get_block_commits_by_block( - sort_tx.tx(), - &sortition_id, - )?); - let mut missed_commits_at_height = - SortitionDB::get_missed_commits_by_intended(sort_tx.tx(), &sortition_id)?; - if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { - missed_commits_at_height - .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + for blocks_back in 0..(MINING_COMMITMENT_WINDOW - 1) { + if parent_snapshot.block_height < (blocks_back as u64) { + debug!("Mining commitment window shortened because block height is less than window size"; + "block_height" => %parent_snapshot.block_height, + "window_size" => %MINING_COMMITMENT_WINDOW); + break; + } + let block_height = parent_snapshot.block_height - (blocks_back as u64); + let sortition_id = match sort_tx.get_block_snapshot_by_height(block_height)? { + Some(sn) => sn.sortition_id, + None => break, + }; + windowed_block_commits.push(SortitionDB::get_block_commits_by_block( + sort_tx.tx(), + &sortition_id, + )?); + let mut missed_commits_at_height = + SortitionDB::get_missed_commits_by_intended(sort_tx.tx(), &sortition_id)?; + if let Some(missed_commit_in_block) = missed_commits_map.remove(&sortition_id) { + missed_commits_at_height + .extend(missed_commit_in_block.into_iter().map(|x| x.clone())); + } + + windowed_missed_commits.push(missed_commits_at_height); } + } else { + // PoX reward-phase is not active + debug!( + "Block {} is in a prepare phase or post-PoX sunset, so no windowing will take place", + parent_snapshot.block_height + 1 + ); - windowed_missed_commits.push(missed_commits_at_height); + assert_eq!(windowed_block_commits.len(), 1); + assert_eq!(windowed_missed_commits.len(), 0); } // reverse vecs so that windows are in ascending block height order windowed_block_commits.reverse(); windowed_missed_commits.reverse(); - // figure out if the PoX sunset finished during the window + // figure out if the PoX sunset finished during the window, + // and/or which sortitions must be PoB due to them falling in a prepare phase. let window_end_height = parent_snapshot.block_height + 1; let window_start_height = window_end_height + 1 - (windowed_block_commits.len() as u64); - let sunset_finished_at = if sunset_end <= window_start_height { - Some(0) - } else if sunset_end > window_end_height { - None - } else { - Some((sunset_end - window_start_height) as u8) - }; + let mut burn_blocks = vec![false; windowed_block_commits.len()]; + + // set burn_blocks flags to accomodate prepare phases and PoX sunset + for (i, b) in burn_blocks.iter_mut().enumerate() { + if sunset_end <= window_start_height + (i as u64) { + // past PoX sunset, so must burn + *b = true; + } else if burnchain.is_in_prepare_phase(window_start_height + (i as u64)) { + // must burn + *b = true; + } else { + // must not burn + *b = false; + } + } // calculate the burn distribution from these operations. - // The resulting distribution will contain the user burns that match block commits, and - // will only contain block commits that consume one leader key (multiple block commits that - // consume the same key will be rejected) + // The resulting distribution will contain the user burns that match block commits let burn_dist = BurnSamplePoint::make_min_median_distribution( windowed_block_commits, windowed_missed_commits, - sunset_finished_at, + burn_blocks, ); // find out which user burns and block commits we're going to take @@ -315,20 +337,16 @@ impl BurnchainSigner { } } - pub fn to_testnet_address(&self) -> String { + pub fn to_address(&self, network_type: BitcoinNetworkType) -> String { let addr_type = match &self.hash_mode { AddressHashMode::SerializeP2PKH | AddressHashMode::SerializeP2WPKH => { BitcoinAddressType::PublicKeyHash } _ => BitcoinAddressType::ScriptHash, }; - BitcoinAddress::from_bytes( - BitcoinNetworkType::Testnet, - addr_type, - &self.to_address_bits(), - ) - .unwrap() - .to_string() + BitcoinAddress::from_bytes(network_type, addr_type, &self.to_address_bits()) + .unwrap() + .to_string() } pub fn to_address_bits(&self) -> Vec { @@ -412,7 +430,7 @@ impl Burnchain { ), ("bitcoin", "regtest") => ( BurnchainParameters::bitcoin_regtest(), - PoxConstants::testnet_default(), + PoxConstants::regtest_default(), ), (_, _) => { return Err(burnchain_error::UnsupportedBurnchain); @@ -428,12 +446,17 @@ impl Burnchain { consensus_hash_lifetime: params.consensus_hash_lifetime, stable_confirmations: params.stable_confirmations, first_block_height: params.first_block_height, + initial_reward_start_block: params.initial_reward_start_block, first_block_hash: params.first_block_hash, first_block_timestamp: params.first_block_timestamp, pox_constants, }) } + pub fn is_mainnet(&self) -> bool { + self.network_id == NETWORK_ID_MAINNET + } + /// the expected sunset burn is: /// total_commit * (progress through sunset phase) / (sunset phase duration) pub fn expected_sunset_burn(&self, burn_height: u64, total_commit: u64) -> u64 { @@ -468,15 +491,10 @@ impl Burnchain { .expect("Overflowed u64 in calculating expected sunset_burn") } - pub fn is_reward_cycle_start(&self, block_height: u64) -> bool { - if block_height <= (self.first_block_height + 1) { - // not a reward cycle start if we're the first block after genesis. - false - } else { - let effective_height = block_height - self.first_block_height; - // first block of the new reward cycle - (effective_height % (self.pox_constants.reward_cycle_length as u64)) == 1 - } + pub fn is_reward_cycle_start(&self, burn_height: u64) -> bool { + let effective_height = burn_height - self.first_block_height; + // first block of the new reward cycle + (effective_height % (self.pox_constants.reward_cycle_length as u64)) == 1 } pub fn reward_cycle_to_block_height(&self, reward_cycle: u64) -> u64 { @@ -530,6 +548,7 @@ impl Burnchain { ) .unwrap(); ret.first_block_height = first_block_height; + ret.initial_reward_start_block = first_block_height; ret.first_block_hash = first_block_hash.clone(); ret } @@ -859,68 +878,6 @@ impl Burnchain { checked_ops } - /// Verify that two or more block commits do not consume the same VRF key. - /// If a key is consumed more than once, then pick the block-commit with the highest burn (to - /// stop griefing attacks). In case of ties, pick the block-commit that occurs earlier in the - /// block. - pub fn filter_block_commits_with_same_VRF_key( - checked_ops: Vec, - ) -> Vec { - debug!("Check Blockstack transactions: filter commits that consume the same VRF key"); - assert!(Burnchain::ops_are_sorted(&checked_ops)); - - let mut ret = Vec::with_capacity(checked_ops.len()); - - let mut collisions: HashMap<(u64, u32), BlockstackOperationType> = HashMap::new(); - for op in checked_ops.into_iter() { - match op { - BlockstackOperationType::LeaderBlockCommit(ref new_block_commit) => { - let key_loc = ( - new_block_commit.key_block_ptr as u64, - new_block_commit.key_vtxindex as u32, - ); - if let Some(existing_block_commit) = collisions.get_mut(&key_loc) { - if let BlockstackOperationType::LeaderBlockCommit(existing_block_commit) = - existing_block_commit - { - warn!( - "Block commit {} consumes the same VRF key as {}", - &new_block_commit.block_header_hash, - &existing_block_commit.block_header_hash - ); - if new_block_commit.burn_fee > existing_block_commit.burn_fee { - warn!("REJECTED({}) block-commit {} for {}: later competing commit {} for {} has a higher burn", - existing_block_commit.block_height, &existing_block_commit.txid, &existing_block_commit.block_header_hash, &new_block_commit.txid, &new_block_commit.block_header_hash); - collisions.insert(key_loc, op); - } else { - warn!("REJECTED({}) block-commit {} for {}: keeping earlier commit {} for {} which has a higher burn", - new_block_commit.block_height, &new_block_commit.txid, &new_block_commit.block_header_hash, - &existing_block_commit.txid, &existing_block_commit.block_header_hash); - } - } else { - unreachable!("Inserted non-block-commit"); - } - } else { - collisions.insert(key_loc, op); - } - } - _ => { - // preserve - ret.push(op); - } - } - } - - // fold back in - for (_, op) in collisions.into_iter() { - ret.push(op); - } - - // preserve block order - ret.sort_by(|ref a, ref b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); - ret - } - /// Top-level entry point to check and process a block. pub fn process_block( burnchain: &Burnchain, @@ -1537,7 +1494,8 @@ pub mod tests { working_dir: "/nope".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, first_block_timestamp: 0, first_block_hash: BurnchainHeaderHash::zero(), }; @@ -2349,156 +2307,6 @@ pub mod tests { } } - #[test] - fn test_filter_block_commits_with_same_VRF_key() { - let mut block_commits = vec![]; - - for i in 0..10 { - let op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash::from_bytes(&vec![ - i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - ]) - .unwrap(), - new_seed: VRFSeed::from_bytes(&vec![ - i, i, i, i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, - ]) - .unwrap(), - parent_block_ptr: 3, - parent_vtxindex: 1, - key_block_ptr: 2, // make them all try to use the same VRF key - key_vtxindex: 1, - memo: vec![i], - - burn_fee: (i + 1) as u64, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "02113c274c05ed0b7f9d08f41ca674b22e42188408caaff82a350b024442de353c", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid::from_bytes(&vec![ - i, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, i, - ]) - .unwrap(), - vtxindex: (i + 2) as u32, - block_height: 5, - burn_parent_modulus: (4 % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash([0xff; 32]), - }); - block_commits.push(op); - } - - let noncolliding_op = BlockstackOperationType::LeaderBlockCommit(LeaderBlockCommitOp { - sunset_burn: 0, - commit_outs: vec![], - block_header_hash: BlockHeaderHash([0xbb; 32]), - new_seed: VRFSeed([0xcc; 32]), - parent_block_ptr: 3, - parent_vtxindex: 1, - key_block_ptr: 2, - key_vtxindex: 2, - memo: vec![0x00], - - burn_fee: 256, - input: (Txid([0; 32]), 0), - apparent_sender: BurnchainSigner { - public_keys: vec![StacksPublicKey::from_hex( - "02113c274c05ed0b7f9d08f41ca674b22e42188408caaff82a350b024442de353c", - ) - .unwrap()], - num_sigs: 1, - hash_mode: AddressHashMode::SerializeP2PKH, - }, - - txid: Txid([0xdd; 32]), - vtxindex: 1, - block_height: 5, - burn_parent_modulus: (4 % BURN_BLOCK_MINED_AT_MODULUS) as u8, - burn_header_hash: BurnchainHeaderHash([0xff; 32]), - }); - - let mut csprng: ThreadRng = thread_rng(); - let keypair: VRFKeypair = VRFKeypair::generate(&mut csprng); - - let key_op = BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { - consensus_hash: ConsensusHash([0x11; 20]), - public_key: VRFPublicKey::from_bytes(&keypair.public.to_bytes()).unwrap(), - memo: vec![0, 0, 0, 0, 0], - address: StacksAddress { - version: 1, - bytes: Hash160([0x33; 20]), - }, - - txid: Txid::from_bytes(&vec![ - 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, - ]) - .unwrap(), - vtxindex: 100, - block_height: 5, - burn_header_hash: BurnchainHeaderHash([0xff; 32]), - }); - - // add a non-colliding one - block_commits.push(noncolliding_op.clone()); - block_commits.push(key_op.clone()); - - let winner = block_commits[9].clone(); - - block_commits.sort_by(|ref a, ref b| a.vtxindex().partial_cmp(&b.vtxindex()).unwrap()); - - let ops = Burnchain::filter_block_commits_with_same_VRF_key(block_commits); - assert_eq!(ops.len(), 3); - - // first op should be the non-colliding one - match (ops[0].clone(), noncolliding_op) { - ( - BlockstackOperationType::LeaderBlockCommit(ref op1), - BlockstackOperationType::LeaderBlockCommit(ref op2), - ) => { - assert_eq!(op1, op2); - } - (_, _) => { - assert!(false); - } - } - - // second op should be the colliding op with the higehst fee - match (ops[1].clone(), winner) { - ( - BlockstackOperationType::LeaderBlockCommit(ref op1), - BlockstackOperationType::LeaderBlockCommit(ref op2), - ) => { - assert_eq!(op1, op2); - } - (_, _) => { - assert!(false); - } - } - - // third op should be the leader key (untouched) - match (ops[2].clone(), key_op) { - ( - BlockstackOperationType::LeaderKeyRegister(ref op1), - BlockstackOperationType::LeaderKeyRegister(ref op2), - ) => { - assert_eq!(op1, op2); - } - (_, _) => { - assert!(false); - } - } - } - #[test] fn test_burn_snapshot_sequence() { let first_burn_hash = BurnchainHeaderHash::from_hex( @@ -2518,7 +2326,8 @@ pub mod tests { stable_confirmations: 7, first_block_timestamp: 0, first_block_hash: first_burn_hash, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, }; let mut leader_private_keys = vec![]; diff --git a/src/burnchains/mod.rs b/src/burnchains/mod.rs index eb0e0939bd..505a56ee5f 100644 --- a/src/burnchains/mod.rs +++ b/src/burnchains/mod.rs @@ -65,11 +65,6 @@ use util::hash::Hash160; use util::secp256k1::MessageSignature; -const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 1894315; -const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1606093490; -const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = - "000000000000003efa81a29f2ee638ca4d4928a073e68789bb06a4fc0b153653"; - #[derive(Serialize, Deserialize)] pub struct Txid(pub [u8; 32]); impl_array_newtype!(Txid, u8, 32); @@ -107,6 +102,7 @@ pub struct BurnchainParameters { pub first_block_height: u64, pub first_block_hash: BurnchainHeaderHash, pub first_block_timestamp: u32, + pub initial_reward_start_block: u64, } impl BurnchainParameters { @@ -126,9 +122,11 @@ impl BurnchainParameters { network_id: BITCOIN_NETWORK_ID_MAINNET, stable_confirmations: 7, consensus_hash_lifetime: 24, - first_block_height: 0, - first_block_hash: BurnchainHeaderHash::zero(), - first_block_timestamp: 0, + first_block_height: BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + first_block_hash: BurnchainHeaderHash::from_hex(BITCOIN_MAINNET_FIRST_BLOCK_HASH) + .unwrap(), + first_block_timestamp: BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP, + initial_reward_start_block: BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK, } } @@ -143,6 +141,7 @@ impl BurnchainParameters { first_block_hash: BurnchainHeaderHash::from_hex(BITCOIN_TESTNET_FIRST_BLOCK_HASH) .unwrap(), first_block_timestamp: BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP, + initial_reward_start_block: BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT - 10_000, } } @@ -153,9 +152,11 @@ impl BurnchainParameters { network_id: BITCOIN_NETWORK_ID_REGTEST, stable_confirmations: 1, consensus_hash_lifetime: 24, - first_block_height: 0, - first_block_hash: BurnchainHeaderHash::zero(), - first_block_timestamp: 0, + first_block_height: BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, + first_block_hash: BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH) + .unwrap(), + first_block_timestamp: BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, + initial_reward_start_block: BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, } } @@ -307,6 +308,7 @@ pub struct Burnchain { pub first_block_hash: BurnchainHeaderHash, pub first_block_timestamp: u32, pub pox_constants: PoxConstants, + pub initial_reward_start_block: u64, } #[derive(Debug, PartialEq, Clone, Serialize, Deserialize)] @@ -383,22 +385,34 @@ impl PoxConstants { 80, 25, 5, - POX_SUNSET_START, - POX_SUNSET_END, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, ) } pub fn testnet_default() -> PoxConstants { PoxConstants::new( - 150, // 120 reward slots; 30 prepare-phase slots - 30, - 20, + 50, // 40 reward slots; 10 prepare-phase slots + 10, + 6, 3333333333333333, - 5, - POX_SUNSET_START, - POX_SUNSET_END, + 1, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, ) // total liquid supply is 40000000000000000 µSTX } + + pub fn regtest_default() -> PoxConstants { + PoxConstants::new( + 5, + 1, + 1, + 3333333333333333, + 1, + BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_START, + BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT + POX_SUNSET_END, + ) + } } /// Structure for encoding our view of the network @@ -1270,7 +1284,7 @@ pub mod test { impl TestBurnchainNode { pub fn new() -> TestBurnchainNode { let first_block_height = 100; - let first_block_hash = FIRST_BURNCHAIN_BLOCK_HASH.clone(); + let first_block_hash = BurnchainHeaderHash([0u8; 32]); let db = SortitionDB::connect_test(first_block_height, &first_block_hash).unwrap(); TestBurnchainNode { sortdb: db, diff --git a/src/chainstate/burn/db/processing.rs b/src/chainstate/burn/db/processing.rs index 78350d363c..bb2b845d1e 100644 --- a/src/chainstate/burn/db/processing.rs +++ b/src/chainstate/burn/db/processing.rs @@ -121,7 +121,7 @@ impl<'a> SortitionHandleTx<'a> { let this_block_hash = block_header.block_hash.clone(); // make the burn distribution, and in doing so, identify the user burns that we'll keep - let state_transition = BurnchainStateTransition::from_block_ops(self, parent_snapshot, this_block_ops, missed_commits, burnchain.pox_constants.sunset_end) + let state_transition = BurnchainStateTransition::from_block_ops(self, burnchain, parent_snapshot, this_block_ops, missed_commits, burnchain.pox_constants.sunset_end) .map_err(|e| { error!("TRANSACTION ABORTED when converting {} blockstack operations in block {} ({}) to a burn distribution: {:?}", this_block_ops.len(), this_block_height, &this_block_hash, e); e @@ -192,9 +192,10 @@ impl<'a> SortitionHandleTx<'a> { // was this snapshot the first with mining? // compute the initial block rewards. let initialize_bonus = if snapshot.sortition && parent_snapshot.total_burn == 0 { - let blocks_without_winners = snapshot.block_height - self.context.first_block_height; + let blocks_without_winners = + snapshot.block_height - burnchain.initial_reward_start_block; let mut total_reward = 0; - for burn_block_height in self.context.first_block_height..snapshot.block_height { + for burn_block_height in burnchain.initial_reward_start_block..snapshot.block_height { total_reward += StacksChainState::get_coinbase_reward( burn_block_height, self.context.first_block_height, @@ -303,11 +304,7 @@ impl<'a> SortitionHandleTx<'a> { }); // block-wide check: no duplicate keys registered - let ret_filtered = Burnchain::filter_block_VRF_dups(blockstack_txs); - assert!(Burnchain::ops_are_sorted(&ret_filtered)); - - // block-wide check: at most one block-commit can consume a VRF key - let block_ops = Burnchain::filter_block_commits_with_same_VRF_key(ret_filtered); + let block_ops = Burnchain::filter_block_VRF_dups(blockstack_txs); assert!(Burnchain::ops_are_sorted(&block_ops)); // process them @@ -370,3 +367,136 @@ impl<'a> SortitionHandleTx<'a> { Ok(new_snapshot) } } + +#[cfg(test)] +mod tests { + use super::*; + use burnchains::bitcoin::{address::BitcoinAddress, BitcoinNetworkType}; + use burnchains::*; + use chainstate::burn::db::sortdb::{tests::test_append_snapshot, SortitionDB}; + use chainstate::burn::operations::{ + leader_block_commit::BURN_BLOCK_MINED_AT_MODULUS, LeaderBlockCommitOp, LeaderKeyRegisterOp, + }; + use chainstate::burn::*; + use chainstate::stacks::{StacksAddress, StacksPublicKey}; + use core::MICROSTACKS_PER_STACKS; + use util::{hash::hex_bytes, vrf::VRFPublicKey}; + + #[test] + fn test_initial_block_reward() { + let first_burn_hash = BurnchainHeaderHash([0; 32]); + + let leader_key = LeaderKeyRegisterOp { + consensus_hash: ConsensusHash([0x22; 20]), + public_key: VRFPublicKey::from_hex( + "a366b51292bef4edd64063d9145c617fec373bceb0758e98cd72becd84d54c7a", + ) + .unwrap(), + memo: vec![01, 02, 03, 04, 05], + address: StacksAddress::from_bitcoin_address( + &BitcoinAddress::from_scriptpubkey( + BitcoinNetworkType::Testnet, + &hex_bytes("76a9140be3e286a15ea85882761618e366586b5574100d88ac").unwrap(), + ) + .unwrap(), + ), + + txid: Txid::from_bytes_be( + &hex_bytes("1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562") + .unwrap(), + ) + .unwrap(), + vtxindex: 400, + block_height: 101, + burn_header_hash: BurnchainHeaderHash([0x01; 32]), + }; + + let block_commit = LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash([0x22; 32]), + new_seed: VRFSeed::from_hex( + "3333333333333333333333333333333333333333333333333333333333333333", + ) + .unwrap(), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 101, + key_vtxindex: 400, + memo: vec![0x80], + + commit_outs: vec![], + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes_be( + &hex_bytes("3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27cf") + .unwrap(), + ) + .unwrap(), + vtxindex: 400, + block_height: 102, + burn_parent_modulus: (101 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: BurnchainHeaderHash([0x03; 32]), + }; + + let mut burnchain = Burnchain::default_unittest(100, &first_burn_hash); + burnchain.initial_reward_start_block = 90; + let mut db = SortitionDB::connect_test(100, &first_burn_hash).unwrap(); + + let snapshot = test_append_snapshot( + &mut db, + BurnchainHeaderHash([0x01; 32]), + &vec![BlockstackOperationType::LeaderKeyRegister(leader_key)], + ); + + let next_block_header = BurnchainBlockHeader { + block_height: 102, + block_hash: BurnchainHeaderHash([0x03; 32]), + parent_block_hash: BurnchainHeaderHash([0x01; 32]), + num_txs: 1, + timestamp: 10, + }; + + { + let mut ic = SortitionHandleTx::begin(&mut db, &snapshot.sortition_id).unwrap(); + + let processed = ic + .process_block_ops( + &burnchain, + &snapshot, + &next_block_header, + vec![BlockstackOperationType::LeaderBlockCommit(block_commit)], + None, + PoxId::initial(), + None, + 0, + ) + .unwrap(); + + let reward_per_block = ic + .get_initial_mining_bonus_per_block(&processed.0.sortition_id) + .unwrap() + .unwrap(); + let remaining = ic + .get_initial_mining_bonus_remaining(&processed.0.sortition_id) + .unwrap(); + assert_eq!( + reward_per_block, + 1000 * (MICROSTACKS_PER_STACKS as u128) * (102 - 90) + / (INITIAL_MINING_BONUS_WINDOW as u128) + ); + assert_eq!( + remaining, + reward_per_block * (INITIAL_MINING_BONUS_WINDOW as u128 - 1) + ); + } + } +} diff --git a/src/chainstate/burn/db/sortdb.rs b/src/chainstate/burn/db/sortdb.rs index 1228aa34e4..48e5cd66dd 100644 --- a/src/chainstate/burn/db/sortdb.rs +++ b/src/chainstate/burn/db/sortdb.rs @@ -86,7 +86,7 @@ use net::neighbors::MAX_NEIGHBOR_BLOCK_DELAY; use std::collections::HashMap; -use core::{FIRST_BURNCHAIN_BLOCK_HASH, FIRST_STACKS_BLOCK_HASH, INITIAL_MINING_BONUS_WINDOW}; +use core::FIRST_STACKS_BLOCK_HASH; use vm::representations::{ClarityName, ContractName}; use vm::types::Value; @@ -1755,31 +1755,30 @@ impl<'a> SortitionHandleConn<'a> { warn!("Missing parent"; "burn_header_hash" => %prepare_end_bhh); BurnchainError::MissingParentBlock })?; - let my_height = SortitionDB::get_block_height(self.deref(), &prepare_end_sortid)? + let block_height = SortitionDB::get_block_height(self.deref(), &prepare_end_sortid)? .expect("CORRUPTION: SortitionID known, but no block height in SQL store"); // if this block is the _end_ of a prepare phase, - if !(my_height % pox_consts.reward_cycle_length == 0) { - test_debug!( - "My height = {}, reward cycle length == {}", - my_height, - pox_consts.reward_cycle_length + let effective_height = block_height - self.context.first_block_height as u32; + let position_in_cycle = effective_height % pox_consts.reward_cycle_length; + if position_in_cycle != 0 { + debug!( + "effective_height = {}, reward cycle length == {}", + effective_height, pox_consts.reward_cycle_length ); return Err(CoordinatorError::NotPrepareEndBlock); } - let prepare_end = my_height; - // if this block isn't greater than prepare_length, then this shouldn't be the end of a prepare block - if prepare_end < pox_consts.prepare_length { - test_debug!( - "prepare_end = {}, pox_consts.prepare_length = {}", - prepare_end, - pox_consts.prepare_length + if effective_height == 0 { + debug!( + "effective_height = {}, reward cycle length == {}", + effective_height, pox_consts.reward_cycle_length ); - return Err(CoordinatorError::NotPrepareEndBlock); + return Ok(None); } - let prepare_begin = prepare_end - pox_consts.prepare_length; + let prepare_end = block_height; + let prepare_begin = prepare_end.saturating_sub(pox_consts.prepare_length); let mut candidate_anchors = HashMap::new(); let mut memoized_candidates: HashMap<_, (Txid, u64)> = HashMap::new(); @@ -2341,7 +2340,7 @@ impl<'a> SortitionDBConn<'a> { let ancestor_hash = SortitionDB::get_ancestor_snapshot(&self, height as u64, &chain_tip.sortition_id)? .map(|sn| sn.burn_header_hash) - .unwrap_or(FIRST_BURNCHAIN_BLOCK_HASH.clone()); + .unwrap_or(burnchain.first_block_hash.clone()); last_burn_block_hashes.insert(height, ancestor_hash); } @@ -3251,7 +3250,7 @@ impl<'a> SortitionHandleTx<'a> { Ok(root_hash) } - fn get_initial_mining_bonus_remaining( + pub fn get_initial_mining_bonus_remaining( &mut self, chain_tip: &SortitionId, ) -> Result { @@ -3260,7 +3259,7 @@ impl<'a> SortitionHandleTx<'a> { .unwrap_or(Ok(0)) } - fn get_initial_mining_bonus_per_block( + pub fn get_initial_mining_bonus_per_block( &mut self, chain_tip: &SortitionId, ) -> Result, db_error> { @@ -3916,7 +3915,7 @@ impl ChainstateDB for SortitionDB { } #[cfg(test)] -mod tests { +pub mod tests { use super::*; use util::db::Error as db_error; @@ -3963,7 +3962,7 @@ mod tests { tx.commit().unwrap(); } - fn test_append_snapshot( + pub fn test_append_snapshot( db: &mut SortitionDB, next_hash: BurnchainHeaderHash, block_ops: &Vec, diff --git a/src/chainstate/burn/distribution.rs b/src/chainstate/burn/distribution.rs index 3c53d423fd..796537ebf8 100644 --- a/src/chainstate/burn/distribution.rs +++ b/src/chainstate/burn/distribution.rs @@ -52,19 +52,19 @@ pub struct BurnSamplePoint { pub user_burns: Vec, } -#[derive(Clone)] +#[derive(Debug, Clone)] enum LinkedCommitIdentifier { Missed(MissedBlockCommit), Valid(LeaderBlockCommitOp), } -#[derive(Clone)] +#[derive(Debug, Clone)] struct LinkedCommitmentScore { rel_block_height: u8, op: LinkedCommitIdentifier, } -#[derive(PartialEq, Eq, Hash)] +#[derive(Debug, PartialEq, Eq, Hash)] struct UserBurnIdentifier { rel_block_height: u8, key_vtxindex: u16, @@ -93,6 +93,13 @@ impl LinkedCommitIdentifier { LinkedCommitIdentifier::Valid(ref op) => op.burn_fee, } } + + fn txid(&self) -> &Txid { + match self { + LinkedCommitIdentifier::Missed(ref op) => &op.txid, + LinkedCommitIdentifier::Valid(ref op) => &op.txid, + } + } } impl BurnSamplePoint { @@ -123,13 +130,17 @@ impl BurnSamplePoint { /// block commits and user support burns. /// /// All operations need to be supplied in an ordered Vec of Vecs containing - /// the ops at each block height in MINING_COMMITMENT_WINDOW + /// the ops at each block height in a mining commit window. Normally, this window + /// is the constant `MINING_COMMITMENT_WINDOW`, except during prepare-phases and post-PoX + /// sunset. In either of these two cases, the window is only one block. The code does not + /// consider which window is active; it merely deduces it by inspecting the length of the + /// given `block_commits` argument. /// /// If a burn refers to more than one commitment, its burn amount is *split* between those /// commitments /// - /// Burns are evaluated over MINING_COMMITMENT_WINDOW, where the effective burn for - /// a commitment is := min(last_burn_amount, median over MINING_COMMITMENT_WINDOW) + /// Burns are evaluated over the mining commitment window, where the effective burn for + /// a commitment is := min(last_burn_amount, median over the window) /// /// Returns the distribution, which consumes the given lists of operations. /// @@ -142,18 +153,22 @@ impl BurnSamplePoint { /// relative block heights start at 0 and increment towards the present. There /// will be no such commits for the current sortition, so this vec will have /// `missed_commits.len() = block_commits.len() - 1` - /// * `sunset_finished_at`: if set, this indicates that the PoX sunset finished before or - /// during the mining window. This value is the first index in the block_commits - /// for which PoX is fully disabled (i.e., the block commit has a single burn output). + /// * `burn_blocks`: this is a vector of booleans that indicate whether or not a block-commit + /// occurred during a PoB-only sortition or a possibly-PoX sortition. The former occurs + /// during either a prepare phase or after PoX sunset, and must have only one (burn) output. + /// The latter occurs everywhere else, and must have `OUTPUTS_PER_COMMIT` outputs after the + /// `OP_RETURN` payload. The length of this vector must be equal to the length of the + /// `block_commits` vector. `burn_blocks[i]` is `true` if the `ith` block-commit must be PoB. pub fn make_min_median_distribution( mut block_commits: Vec>, mut missed_commits: Vec>, - sunset_finished_at: Option, + burn_blocks: Vec, ) -> Vec { // sanity check - assert!(MINING_COMMITMENT_WINDOW > 0); let window_size = block_commits.len() as u8; + assert!(window_size > 0); BurnSamplePoint::sanity_check_window(&block_commits, &missed_commits); + assert_eq!(burn_blocks.len(), block_commits.len()); // first, let's link all of the current block commits to the priors let mut commits_with_priors: Vec<_> = @@ -186,29 +201,41 @@ impl BurnSamplePoint { .map(|missed| (missed.txid.clone(), missed)) .collect(); - let sunset_finished = if let Some(sunset_finished_at) = sunset_finished_at { - sunset_finished_at <= rel_block_height - } else { - false - }; - let expected_index = LeaderBlockCommitOp::expected_chained_utxo(sunset_finished); + // find the UTXO index that each last linked_commit must have spent in order to be + // chained to the block-commit (or missed-commit) at this relative block height + let commit_is_burn = burn_blocks[rel_block_height as usize]; + let expected_index = LeaderBlockCommitOp::expected_chained_utxo(commit_is_burn); + for linked_commit in commits_with_priors.iter_mut() { let end = linked_commit.iter().rev().find_map(|o| o.as_ref()).unwrap(); // guaranteed to be at least 1 non-none entry - // check that the commit is using the right output index + // if end spent a UTXO at this height, then it must match the expected index if end.op.spent_output() != expected_index { + test_debug!("Block-commit {} did not spent a UTXO at rel_block_height {}, because it spent output {},{} (expected {})", + end.op.txid(), rel_block_height, end.op.spent_output(), end.op.spent_txid(), expected_index); continue; } - let referenced_op = - if let Some(referenced_commit) = cur_commits_map.remove(&end.op.spent_txid()) { - // found a chained utxo - Some(LinkedCommitIdentifier::Valid(referenced_commit)) - } else if let Some(missed_op) = cur_missed_map.remove(&end.op.spent_txid()) { - // found a missed commit - Some(LinkedCommitIdentifier::Missed(missed_op)) - } else { - None - }; + + // find out which block-commit we chained to + let referenced_op = if let Some(referenced_commit) = + cur_commits_map.remove(end.op.spent_txid()) + { + // found a chained utxo + Some(LinkedCommitIdentifier::Valid(referenced_commit)) + } else if let Some(missed_op) = cur_missed_map.remove(end.op.spent_txid()) { + // found a missed commit + Some(LinkedCommitIdentifier::Missed(missed_op)) + } else { + test_debug!( + "No chained UTXO to a valid or missing commit at relative block height {} from {}: ({},{})", + rel_block_height, + end.op.txid(), + end.op.spent_txid(), + end.op.spent_output() + ); + continue; + }; + // if we found a referenced op, connect it if let Some(referenced_op) = referenced_op { linked_commit[(window_size - 1 - rel_block_height) as usize] = @@ -285,7 +312,7 @@ impl BurnSamplePoint { _consumed_leader_keys: Vec, user_burns: Vec, ) -> Vec { - Self::make_min_median_distribution(vec![all_block_candidates], vec![], None) + Self::make_min_median_distribution(vec![all_block_candidates], vec![], vec![true]) } /// Calculate the ranges between 0 and 2**256 - 1 over which each point in the burn sample @@ -467,7 +494,7 @@ mod tests { LeaderBlockCommitOp { block_header_hash: BlockHeaderHash(block_header_hash), new_seed: VRFSeed([0; 32]), - parent_block_ptr: 0, + parent_block_ptr: (block_id - 1) as u32, parent_vtxindex: 0, key_block_ptr: vrf_ident, key_vtxindex: 0, @@ -540,13 +567,14 @@ mod tests { let mut result = BurnSamplePoint::make_min_median_distribution( commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], - Some(3), + vec![false, false, false, true, true, true], ); assert_eq!(result.len(), 2, "Should be two miners"); result.sort_by_key(|sample| sample.candidate.txid); + // block-commits are currently malformed -- the post-sunset commits spend the wrong UTXO. assert_eq!(result[0].burns, 1); assert_eq!(result[1].burns, 1); @@ -575,7 +603,7 @@ mod tests { let mut result = BurnSamplePoint::make_min_median_distribution( commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], - Some(3), + vec![false, false, false, true, true, true], ); assert_eq!(result.len(), 2, "Should be two miners"); @@ -646,7 +674,7 @@ mod tests { let mut result = BurnSamplePoint::make_min_median_distribution( commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], - None, + vec![false, false, false, false, false, false], ); assert_eq!(result.len(), 2, "Should be two miners"); @@ -710,7 +738,7 @@ mod tests { let mut result = BurnSamplePoint::make_min_median_distribution( commits.clone(), vec![vec![]; (MINING_COMMITMENT_WINDOW - 1) as usize], - None, + vec![false, false, false, false, false, false], ); assert_eq!(result.len(), 2, "Should be two miners"); @@ -769,7 +797,7 @@ mod tests { let mut result = BurnSamplePoint::make_min_median_distribution( commits.clone(), missed_commits.clone(), - None, + vec![false, false, false, false, false, false], ); assert_eq!(result.len(), 2, "Should be two miners"); diff --git a/src/chainstate/burn/operations/leader_block_commit.rs b/src/chainstate/burn/operations/leader_block_commit.rs index 110cb48767..92e73d8d84 100644 --- a/src/chainstate/burn/operations/leader_block_commit.rs +++ b/src/chainstate/burn/operations/leader_block_commit.rs @@ -148,9 +148,9 @@ impl LeaderBlockCommitOp { } as u8; } - pub fn expected_chained_utxo(sunset_finished: bool) -> u32 { - if sunset_finished { - 2 // if sunset has occurred, chained commits should spend the output after the burn commit + pub fn expected_chained_utxo(burn_only: bool) -> u32 { + if burn_only { + 2 // if sunset has occurred, or we're in the prepare phase, then chained commits should spend the output after the burn commit } else { // otherwise, it's the output after the last PoX output (OUTPUTS_PER_COMMIT as u32) + 1 @@ -394,6 +394,18 @@ impl LeaderBlockCommitOp { previous_is_burn && output_addr.is_burn() }) } + + pub fn spent_txid(&self) -> &Txid { + &self.input.0 + } + + pub fn spent_output(&self) -> u32 { + self.input.1 + } + + pub fn is_first_block(&self) -> bool { + self.parent_block_ptr == 0 && self.parent_vtxindex == 0 + } } impl StacksMessageCodec for LeaderBlockCommitOp { @@ -439,6 +451,16 @@ pub struct MissedBlockCommit { pub intended_sortition: SortitionId, } +impl MissedBlockCommit { + pub fn spent_txid(&self) -> &Txid { + &self.input.0 + } + + pub fn spent_output(&self) -> u32 { + self.input.1 + } +} + impl RewardSetInfo { /// Takes an Option and produces the commit_outs /// for a corresponding LeaderBlockCommitOp. If RewardSetInfo is none, @@ -533,7 +555,20 @@ impl LeaderBlockCommitOp { let expect_pox_descendant = if self.all_outputs_burn() { false } else { - if self.commit_outs.len() != reward_set_info.recipients.len() { + let mut check_recipients: Vec<_> = reward_set_info + .recipients + .iter() + .map(|(addr, _)| addr.clone()) + .collect(); + + if check_recipients.len() == 1 { + // If the number of recipients in the set was even, we need to pad + // with a burn address + check_recipients + .push(StacksAddress::burn_address(burnchain.is_mainnet())) + } + + if self.commit_outs.len() != check_recipients.len() { warn!( "Invalid block commit: expected {} PoX transfers, but commit has {}", reward_set_info.recipients.len(), @@ -544,11 +579,6 @@ impl LeaderBlockCommitOp { // sort check_recipients and commit_outs so that we can perform an // iterative equality check - let mut check_recipients: Vec<_> = reward_set_info - .recipients - .iter() - .map(|(addr, _)| addr.clone()) - .collect(); check_recipients.sort(); let mut commit_outs = self.commit_outs.clone(); commit_outs.sort(); @@ -1433,7 +1463,8 @@ mod tests { working_dir: "/nope".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, first_block_timestamp: 0, first_block_hash: first_burn_hash.clone(), }; @@ -1557,19 +1588,6 @@ mod tests { vec![], ]; - let consumed_leader_keys = vec![ - // 122 - vec![], - // 123 - vec![], - // 124 - vec![], - // 125 - vec![leader_key_1.clone()], - // 126 - vec![], - ]; - let tip_index_root = { let mut prev_snapshot = SortitionDB::get_first_block_snapshot(db.conn()).unwrap(); for i in 0..block_header_hashes.len() { @@ -2055,6 +2073,56 @@ mod tests { }, res: Ok(()), }, + CheckFixture { + // accept -- also consumes leader_key_1 + op: LeaderBlockCommitOp { + sunset_burn: 0, + block_header_hash: BlockHeaderHash::from_bytes( + &hex_bytes( + "2222222222222222222222222222222222222222222222222222222222222222", + ) + .unwrap(), + ) + .unwrap(), + new_seed: VRFSeed::from_bytes( + &hex_bytes( + "3333333333333333333333333333333333333333333333333333333333333333", + ) + .unwrap(), + ) + .unwrap(), + parent_block_ptr: 0, + parent_vtxindex: 0, + key_block_ptr: 124, + key_vtxindex: 456, + memo: vec![0x80], + commit_outs: vec![], + + burn_fee: 12345, + input: (Txid([0; 32]), 0), + apparent_sender: BurnchainSigner { + public_keys: vec![StacksPublicKey::from_hex( + "02d8015134d9db8178ac93acbc43170a2f20febba5087a5b0437058765ad5133d0", + ) + .unwrap()], + num_sigs: 1, + hash_mode: AddressHashMode::SerializeP2PKH, + }, + + txid: Txid::from_bytes_be( + &hex_bytes( + "3c07a0a93360bc85047bbaadd49e30c8af770f73a37e10fec400174d2e5f27cf", + ) + .unwrap(), + ) + .unwrap(), + vtxindex: 444, + block_height: 126, + burn_parent_modulus: (125 % BURN_BLOCK_MINED_AT_MODULUS) as u8, + burn_header_hash: block_126_hash.clone(), + }, + res: Ok(()), + }, ]; for (ix, fixture) in fixtures.iter().enumerate() { diff --git a/src/chainstate/burn/operations/leader_key_register.rs b/src/chainstate/burn/operations/leader_key_register.rs index f9bc7ae23a..14a9e630d9 100644 --- a/src/chainstate/burn/operations/leader_key_register.rs +++ b/src/chainstate/burn/operations/leader_key_register.rs @@ -109,7 +109,7 @@ impl LeaderKeyRegisterOp { 0 2 3 23 55 80 |------|--|---------------|-----------------------|---------------------------| magic op consensus hash proving public key memo - + (ignored) (ignored) Note that `data` is missing the first 3 bytes -- the magic and op have been stripped */ @@ -205,6 +205,7 @@ impl StacksMessageCodec for LeaderKeyRegisterOp { 0 2 3 23 55 80 |------|--|---------------|-----------------------|---------------------------| magic op consensus hash proving public key memo + (ignored) (ignored) */ fn consensus_serialize(&self, fd: &mut W) -> Result<(), net_error> { write_next(fd, &(Opcodes::LeaderKeyRegister as u8))?; @@ -234,7 +235,11 @@ impl LeaderKeyRegisterOp { LeaderKeyRegisterOp::parse_from_tx(block_header.block_height, &block_header.block_hash, tx) } - pub fn check(&self, burnchain: &Burnchain, tx: &mut SortitionHandleTx) -> Result<(), op_error> { + pub fn check( + &self, + _burnchain: &Burnchain, + tx: &mut SortitionHandleTx, + ) -> Result<(), op_error> { ///////////////////////////////////////////////////////////////// // Keys must be unique -- no one can register the same key twice ///////////////////////////////////////////////////////////////// @@ -250,23 +255,6 @@ impl LeaderKeyRegisterOp { return Err(op_error::LeaderKeyAlreadyRegistered); } - ///////////////////////////////////////////////////////////////// - // Consensus hash must be recent and valid - ///////////////////////////////////////////////////////////////// - - let consensus_hash_recent = tx.is_fresh_consensus_hash( - burnchain.consensus_hash_lifetime.into(), - &self.consensus_hash, - )?; - - if !consensus_hash_recent { - warn!( - "Invalid leader key registration: invalid consensus hash {}", - &self.consensus_hash - ); - return Err(op_error::LeaderKeyBadConsensusHash); - } - Ok(()) } } @@ -511,7 +499,8 @@ pub mod tests { working_dir: "/nope".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, first_block_hash: first_burn_hash.clone(), first_block_timestamp: 0, }; @@ -692,43 +681,6 @@ pub mod tests { }, res: Err(op_error::LeaderKeyAlreadyRegistered), }, - CheckFixture { - // reject -- invalid consensus hash - op: LeaderKeyRegisterOp { - consensus_hash: ConsensusHash::from_bytes( - &hex_bytes("1000000000000000000000000000000000000000").unwrap(), - ) - .unwrap(), - public_key: VRFPublicKey::from_bytes( - &hex_bytes( - "bb519494643f79f1dea0350e6fb9a1da88dfdb6137117fc2523824a8aa44fe1c", - ) - .unwrap(), - ) - .unwrap(), - memo: vec![01, 02, 03, 04, 05], - address: StacksAddress::from_bitcoin_address( - &BitcoinAddress::from_scriptpubkey( - BitcoinNetworkType::Testnet, - &hex_bytes("76a9140be3e286a15ea85882761618e366586b5574100d88ac") - .unwrap(), - ) - .unwrap(), - ), - - txid: Txid::from_bytes_be( - &hex_bytes( - "1bfa831b5fc56c858198acb8e77e5863c1e9d8ac26d49ddb914e24d8d4083562", - ) - .unwrap(), - ) - .unwrap(), - vtxindex: 456, - block_height: 123, - burn_header_hash: block_123_hash.clone(), - }, - res: Err(op_error::LeaderKeyBadConsensusHash), - }, CheckFixture { // accept op: LeaderKeyRegisterOp { diff --git a/src/chainstate/burn/operations/mod.rs b/src/chainstate/burn/operations/mod.rs index 10c343e2fe..3912e861c1 100644 --- a/src/chainstate/burn/operations/mod.rs +++ b/src/chainstate/burn/operations/mod.rs @@ -78,7 +78,6 @@ pub enum Error { // all the things that can go wrong with leader key register LeaderKeyAlreadyRegistered, - LeaderKeyBadConsensusHash, // all the things that can go wrong with user burn supports UserBurnSupportBadConsensusHash, @@ -125,10 +124,6 @@ impl fmt::Display for Error { Error::LeaderKeyAlreadyRegistered => { write!(f, "Leader key has already been registered") } - Error::LeaderKeyBadConsensusHash => { - write!(f, "Leader key has an invalid consensus hash") - } - Error::UserBurnSupportBadConsensusHash => { write!(f, "User burn support has an invalid consensus hash") } diff --git a/src/chainstate/burn/operations/user_burn_support.rs b/src/chainstate/burn/operations/user_burn_support.rs index 3a3780b873..ecb02ea475 100644 --- a/src/chainstate/burn/operations/user_burn_support.rs +++ b/src/chainstate/burn/operations/user_burn_support.rs @@ -510,7 +510,8 @@ mod tests { working_dir: "/nope".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, first_block_timestamp: 0, first_block_hash: first_burn_hash.clone(), }; diff --git a/src/chainstate/burn/sortition.rs b/src/chainstate/burn/sortition.rs index c7fced73da..fd27287dbd 100644 --- a/src/chainstate/burn/sortition.rs +++ b/src/chainstate/burn/sortition.rs @@ -470,7 +470,8 @@ mod test { consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_timestamp: 0, - first_block_height: first_block_height, + first_block_height, + initial_reward_start_block: first_block_height, first_block_hash: first_burn_hash.clone(), }; diff --git a/src/chainstate/coordinator/mod.rs b/src/chainstate/coordinator/mod.rs index ee18789414..2c6469fc2b 100644 --- a/src/chainstate/coordinator/mod.rs +++ b/src/chainstate/coordinator/mod.rs @@ -251,6 +251,7 @@ impl<'a, T: BlockEventDispatcher> attachments_tx: SyncSender>, dispatcher: &mut T, comms: CoordinatorReceivers, + atlas_config: AtlasConfig, ) where T: BlockEventDispatcher, { @@ -281,7 +282,7 @@ impl<'a, T: BlockEventDispatcher> dispatcher: Some(dispatcher), notifier: arc_notices, reward_set_provider: OnChainRewardSetProvider(), - atlas_config: AtlasConfig::default(), + atlas_config, }; loop { @@ -580,14 +581,15 @@ impl<'a, T: BlockEventDispatcher, N: CoordinatorNotices, U: RewardSetProvider> &mut self, burn_header: &BurnchainBlockHeader, ) -> Result, Error> { - let sortition_tip = self + let sortition_tip_id = self .canonical_sortition_tip .as_ref() .expect("FATAL: Processing anchor block, but no known sortition tip"); + get_reward_cycle_info( burn_header.block_height, &burn_header.parent_block_hash, - sortition_tip, + sortition_tip_id, &self.burnchain, &mut self.chain_state_db, &self.sortition_db, diff --git a/src/chainstate/coordinator/tests.rs b/src/chainstate/coordinator/tests.rs index 29176b7a77..432f88a65d 100644 --- a/src/chainstate/coordinator/tests.rs +++ b/src/chainstate/coordinator/tests.rs @@ -446,7 +446,7 @@ fn make_genesis_block_with_recipients( let proof = VRF::prove(vrf_key, sortition_tip.sortition_hash.as_bytes()); - let mut builder = StacksBlockBuilder::make_block_builder( + let mut builder = StacksBlockBuilder::make_regtest_block_builder( &parent_stacks_header, proof.clone(), 0, @@ -462,11 +462,15 @@ fn make_genesis_block_with_recipients( builder.epoch_finish(epoch_tx); let commit_outs = if let Some(recipients) = recipients { - recipients + let mut commit_outs = recipients .recipients .iter() .map(|(a, _)| a.clone()) - .collect() + .collect::>(); + if commit_outs.len() == 1 { + commit_outs.push(StacksAddress::burn_address(false)) + } + commit_outs } else { vec![] }; @@ -504,7 +508,9 @@ fn make_genesis_block_with_recipients( fn make_stacks_block( sort_db: &SortitionDB, state: &mut StacksChainState, + burnchain: &Burnchain, parent_block: &BlockHeaderHash, + parent_height: u64, miner: &StacksPrivateKey, my_burn: u64, vrf_key: &VRFPrivateKey, @@ -513,7 +519,9 @@ fn make_stacks_block( make_stacks_block_with_recipients( sort_db, state, + burnchain, parent_block, + parent_height, miner, my_burn, vrf_key, @@ -528,7 +536,9 @@ fn make_stacks_block( fn make_stacks_block_with_recipients( sort_db: &SortitionDB, state: &mut StacksChainState, + burnchain: &Burnchain, parent_block: &BlockHeaderHash, + parent_height: u64, miner: &StacksPrivateKey, my_burn: u64, vrf_key: &VRFPrivateKey, @@ -538,7 +548,9 @@ fn make_stacks_block_with_recipients( make_stacks_block_with_recipients_and_sunset_burn( sort_db, state, + burnchain, parent_block, + parent_height, miner, my_burn, vrf_key, @@ -555,7 +567,9 @@ fn make_stacks_block_with_recipients( fn make_stacks_block_with_recipients_and_sunset_burn( sort_db: &SortitionDB, state: &mut StacksChainState, + burnchain: &Burnchain, parent_block: &BlockHeaderHash, + parent_height: u64, miner: &StacksPrivateKey, my_burn: u64, vrf_key: &VRFPrivateKey, @@ -567,7 +581,9 @@ fn make_stacks_block_with_recipients_and_sunset_burn( make_stacks_block_with_input( sort_db, state, + burnchain, parent_block, + parent_height, miner, my_burn, vrf_key, @@ -585,7 +601,9 @@ fn make_stacks_block_with_recipients_and_sunset_burn( fn make_stacks_block_with_input( sort_db: &SortitionDB, state: &mut StacksChainState, + burnchain: &Burnchain, parent_block: &BlockHeaderHash, + parent_height: u64, miner: &StacksPrivateKey, my_burn: u64, vrf_key: &VRFPrivateKey, @@ -640,7 +658,7 @@ fn make_stacks_block_with_input( let iconn = sort_db.index_conn(); - let mut builder = StacksBlockBuilder::make_block_builder( + let mut builder = StacksBlockBuilder::make_regtest_block_builder( &parent_stacks_header, proof.clone(), total_burn, @@ -654,12 +672,18 @@ fn make_stacks_block_with_input( builder.epoch_finish(epoch_tx); let commit_outs = if let Some(recipients) = recipients { - recipients + let mut commit_outs = recipients .recipients .iter() .map(|(a, _)| a.clone()) - .collect() - } else if post_sunset_burn { + .collect::>(); + if commit_outs.len() == 1 { + // Padding with burn address if required + commit_outs.push(StacksAddress::burn_address(false)) + } + commit_outs + } else if post_sunset_burn || burnchain.is_in_prepare_phase(parent_height + 1) { + test_debug!("block-commit in {} will burn", parent_height + 1); vec![StacksAddress::burn_address(false)] } else { vec![] @@ -741,7 +765,7 @@ fn missed_block_commits() { // process sequential blocks, and their sortitions... let mut stacks_blocks: Vec<(SortitionId, StacksBlock)> = vec![]; - let mut last_txid: Option = None; + let mut last_input: Option<(Txid, u32)> = None; let b = get_burnchain(path, None); for ix in 0..vrf_keys.len() { @@ -771,12 +795,15 @@ fn missed_block_commits() { .test_get_next_block_recipients(&b, reward_cycle_info.as_ref()) .unwrap(); + let b = get_burnchain(path, pox_consts.clone()); let mut ops = vec![]; - if ix % 6 == 4 { + if ix % (MINING_COMMITMENT_WINDOW as usize) == 4 { let (mut bad_op, _) = make_stacks_block_with_input( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height - 2, miner, 10000, vrf_key, @@ -784,9 +811,17 @@ fn missed_block_commits() { next_block_recipients.as_ref(), 0, false, - (last_txid.as_ref().unwrap().clone(), 3), + last_input.as_ref().unwrap().clone(), ); - last_txid = Some(bad_op.txid()); + // NOTE: intended for block block_height - 2 + last_input = Some(( + bad_op.txid(), + if b.is_in_prepare_phase(next_mock_header.block_height - 2 + 1) { + 2 + } else { + (OUTPUTS_PER_COMMIT as u32) + 1 + }, + )); bad_op.set_block_height(next_mock_header.block_height); if let BlockstackOperationType::LeaderBlockCommit(ref mut op) = bad_op { op.burn_parent_modulus = @@ -795,6 +830,11 @@ fn missed_block_commits() { } else { panic!("Should be leader block commit"); } + test_debug!( + "bad_op meant for block {}: {:?}", + burnchain_tip.block_height - 2 + 1, + &bad_op + ); ops.push(bad_op); } @@ -813,7 +853,9 @@ fn missed_block_commits() { make_stacks_block_with_input( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, @@ -821,7 +863,7 @@ fn missed_block_commits() { next_block_recipients.as_ref(), 0, false, - (last_txid.as_ref().unwrap().clone(), 3), + last_input.as_ref().unwrap().clone(), ) }; @@ -832,7 +874,7 @@ fn missed_block_commits() { let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - if ix % 6 == 3 { + if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { // produce an empty block! produce_burn_block( &mut burnchain, @@ -842,7 +884,14 @@ fn missed_block_commits() { ); } else { // produce a block with one good op, - last_txid = Some(expected_winner.clone()); + last_input = Some(( + expected_winner, + if b.is_in_prepare_phase(next_mock_header.block_height) { + 2 + } else { + (OUTPUTS_PER_COMMIT as u32) + 1 + }, + )); produce_burn_block_do_not_set_height( &mut burnchain, &burnchain_tip.block_hash, @@ -853,12 +902,10 @@ fn missed_block_commits() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, pox_consts.clone()); - let tip = SortitionDB::get_canonical_burn_chain_tip(sort_db.conn()).unwrap(); let burn_distribution = get_burn_distribution(sort_db.conn(), &tip.sortition_id); eprintln!("{}", ix); - if ix % 6 == 3 { + if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { assert!( !tip.sortition, "Sortition should not have occurred because the only block commit was invalid" @@ -867,8 +914,8 @@ fn missed_block_commits() { stacks_blocks.push(stacks_blocks[ix - 1].clone()); } else { // how many commit do we expect to see counted in the current window? - let expected_window_commits = if ix >= 6 { - 5 + let expected_window_commits = if ix >= (MINING_COMMITMENT_WINDOW as usize) { + (MINING_COMMITMENT_WINDOW - 1) as usize } else { if ix >= 3 { ix @@ -877,7 +924,7 @@ fn missed_block_commits() { } }; // there were 2 burn blocks before we started mining - let expected_window_size = cmp::min(6, ix + 3); + let expected_window_size = cmp::min(MINING_COMMITMENT_WINDOW as usize, ix + 3); let min_burn = 1; let median_burn = if expected_window_commits > expected_window_size / 2 { @@ -889,14 +936,29 @@ fn missed_block_commits() { } else { 1 }; - let last_burn = if ix % 6 == 3 { 0 } else { 10000 }; + let last_burn = if ix % (MINING_COMMITMENT_WINDOW as usize) == 3 { + 0 + } else { + 10000 + }; - assert_eq!( - burn_distribution[0].burns, - cmp::min(last_burn, median_burn), - "Burn distribution should match at ix = {}", - ix - ); + if b.is_in_prepare_phase(next_mock_header.block_height) { + // in prepare phase -- no smoothing takes place + assert_eq!( + burn_distribution[0].burns, last_burn, + "Burn distribution should not do windowing at ix = {} block_height = {}", + ix, next_mock_header.block_height + ) + } else { + // in reward phase -- apply min median + assert_eq!( + burn_distribution[0].burns, + cmp::min(last_burn, median_burn), + "Burn distribution should match at ix = {} block_height = {}", + ix, + next_mock_header.block_height + ); + } assert_eq!(&tip.winning_block_txid, &expected_winner); @@ -915,8 +977,8 @@ fn missed_block_commits() { let stacks_tip = SortitionDB::get_canonical_stacks_chain_tip_hash(sort_db.conn()).unwrap(); let mut chainstate = get_chainstate(path); - // 1 block of every 6 is missed - let missed_blocks = vrf_keys.len() / 6; + // 1 block of every $MINING_COMMITMENT_WINDOW is missed + let missed_blocks = vrf_keys.len() / (MINING_COMMITMENT_WINDOW as usize); let expected_height = vrf_keys.len() - missed_blocks; assert_eq!( chainstate @@ -939,7 +1001,7 @@ fn missed_block_commits() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 5 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -997,6 +1059,10 @@ fn test_simple_setup() { for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { let mut burnchain = get_burnchain_db(path, None); let mut chainstate = get_chainstate(path); + let b = get_burnchain(path, None); + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let burnchain_blinded = get_burnchain_db(path_blinded, None); + let (op, block) = if ix == 0 { make_genesis_block( &sort_db, @@ -1011,15 +1077,16 @@ fn test_simple_setup() { make_stacks_block( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, ix as u32, ) }; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - let burnchain_blinded = get_burnchain_db(path_blinded, None); + produce_burn_block( &mut burnchain, &burnchain_tip.block_hash, @@ -1030,7 +1097,6 @@ fn test_simple_setup() { coord.handle_new_burnchain_block().unwrap(); coord_blind.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, None); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { // the "blinded" sortition db and the one that's processed all the blocks @@ -1095,7 +1161,7 @@ fn test_simple_setup() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 10 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } @@ -1104,7 +1170,7 @@ fn test_simple_setup() { let pox_id = ic.get_pox_id().unwrap(); assert_eq!( &pox_id.to_string(), - "10000000000", + "110000000000", "PoX ID should reflect the initial 'known' reward cycle at genesis" ); } @@ -1132,10 +1198,11 @@ fn test_simple_setup() { pox_id_string.push('1'); } + println!("=> {}", pox_id_string); assert_eq!( pox_id_at_tip.to_string(), // right-pad pox_id_string to 11 characters - format!("{:0<11}", pox_id_string) + format!("1{:0<11}", pox_id_string) ); } } @@ -1250,6 +1317,7 @@ fn test_sortition_with_reward_set() { } } + let b = get_burnchain(path, None); let (good_op, mut block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -1265,7 +1333,9 @@ fn test_sortition_with_reward_set() { make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, @@ -1285,7 +1355,9 @@ fn test_sortition_with_reward_set() { let (all_burn_op, all_burn_block) = make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner_burner, 10000, vrf_burner, @@ -1310,19 +1382,21 @@ fn test_sortition_with_reward_set() { .map(|ix| (p2pkh_from(&StacksPrivateKey::new()), ix as u16)) .collect() }; - let bad_block_recipipients = Some(RewardSetInfo { + let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner_wrong_out, 10000, vrf_burner, (ix + WRONG_OUTS_OFFSET) as u32, - bad_block_recipipients.as_ref(), + bad_block_recipients.as_ref(), ); ops.push(bad_outs_op); } @@ -1337,7 +1411,6 @@ fn test_sortition_with_reward_set() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, None); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; @@ -1386,7 +1459,7 @@ fn test_sortition_with_reward_set() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 10 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -1501,6 +1574,7 @@ fn test_sortition_with_burner_reward_set() { } } + let b = get_burnchain(path, None); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -1516,7 +1590,9 @@ fn test_sortition_with_burner_reward_set() { make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, @@ -1538,19 +1614,21 @@ fn test_sortition_with_burner_reward_set() { .map(|ix| (p2pkh_from(&StacksPrivateKey::new()), ix as u16)) .collect() }; - let bad_block_recipipients = Some(RewardSetInfo { + let bad_block_recipients = Some(RewardSetInfo { anchor_block: BlockHeaderHash([0; 32]), recipients, }); let (bad_outs_op, _) = make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner_wrong_out, 10000, vrf_burner, (ix + WRONG_OUTS_OFFSET) as u32, - bad_block_recipipients.as_ref(), + bad_block_recipients.as_ref(), ); ops.push(bad_outs_op); } @@ -1565,7 +1643,6 @@ fn test_sortition_with_burner_reward_set() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, None); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { started_first_reward_cycle = true; @@ -1613,7 +1690,7 @@ fn test_sortition_with_burner_reward_set() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 10 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -1718,6 +1795,8 @@ fn test_pox_btc_ops() { } } + let b = get_burnchain(path, None); + let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -1733,7 +1812,9 @@ fn test_pox_btc_ops() { make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 1000, vrf_key, @@ -1813,7 +1894,6 @@ fn test_pox_btc_ops() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, pox_consts.clone()); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { @@ -1870,7 +1950,7 @@ fn test_pox_btc_ops() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 5 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -1971,6 +2051,7 @@ fn test_stx_transfer_btc_ops() { } } + let b = get_burnchain(path, pox_consts.clone()); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -1986,7 +2067,9 @@ fn test_stx_transfer_btc_ops() { make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 1000, vrf_key, @@ -2107,7 +2190,6 @@ fn test_stx_transfer_btc_ops() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, pox_consts.clone()); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { @@ -2164,7 +2246,7 @@ fn test_stx_transfer_btc_ops() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111", + "111111111111", "PoX ID should reflect the 5 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -2278,6 +2360,7 @@ fn test_initial_coinbase_reward_distributions() { stacks_blocks[ix / 2 - 1].1.header.block_hash() }; + let b = get_burnchain(path, pox_consts.clone()); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( &sort_db, @@ -2293,7 +2376,9 @@ fn test_initial_coinbase_reward_distributions() { make_stacks_block_with_recipients( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 1000, vrf_key, @@ -2531,6 +2616,7 @@ fn test_sortition_with_sunset() { let sunset_burn = burnchain_conf.expected_sunset_burn(next_mock_header.block_height, 10000); let rest_commit = 10000 - sunset_burn; + let b = get_burnchain(path, pox_consts.clone()); let (good_op, block) = if ix == 0 { make_genesis_block_with_recipients( @@ -2547,7 +2633,9 @@ fn test_sortition_with_sunset() { make_stacks_block_with_recipients_and_sunset_burn( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, rest_commit, vrf_key, @@ -2566,7 +2654,9 @@ fn test_sortition_with_sunset() { let (bad_outs_op, _) = make_stacks_block_with_recipients_and_sunset_burn( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_wrong_out, @@ -2588,7 +2678,6 @@ fn test_sortition_with_sunset() { // handle the sortition coord.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, pox_consts.clone()); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { if new_burnchain_tip.block_height < sunset_ht { @@ -2645,7 +2734,7 @@ fn test_sortition_with_sunset() { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); assert_eq!(&pox_id.to_string(), - "11111111111111111", + "111111111111111111", "PoX ID should reflect the 10 reward cycles _with_ a known anchor block, plus the 'initial' known reward cycle at genesis"); } } @@ -2717,6 +2806,10 @@ fn test_pox_processable_block_in_different_pox_forks() { for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { let mut burnchain = get_burnchain_db(path, None); let mut chainstate = get_chainstate(path); + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let burnchain_blinded = get_burnchain_db(path_blinded, None); + let b = get_burnchain(path, None); + eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( @@ -2737,15 +2830,15 @@ fn test_pox_processable_block_in_different_pox_forks() { make_stacks_block( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, ix as u32, ) }; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - let burnchain_blinded = get_burnchain_db(path_blinded, None); produce_burn_block( &mut burnchain, &burnchain_tip.block_hash, @@ -2756,7 +2849,6 @@ fn test_pox_processable_block_in_different_pox_forks() { coord.handle_new_burnchain_block().unwrap(); coord_blind.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, None); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { eprintln!( @@ -2956,6 +3048,10 @@ fn test_pox_no_anchor_selected() { for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { let mut burnchain = get_burnchain_db(path, None); let mut chainstate = get_chainstate(path); + let b = get_burnchain(path, None); + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let burnchain_blinded = get_burnchain_db(path_blinded, None); + eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( @@ -2978,15 +3074,16 @@ fn test_pox_no_anchor_selected() { make_stacks_block( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, ix as u32, ) }; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - let burnchain_blinded = get_burnchain_db(path_blinded, None); + produce_burn_block( &mut burnchain, &burnchain_tip.block_hash, @@ -3072,13 +3169,13 @@ fn test_pox_no_anchor_selected() { { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "111"); + assert_eq!(&pox_id.to_string(), "1111"); } { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "101"); + assert_eq!(&pox_id.to_string(), "1101"); } for (sort_id, block) in stacks_blocks.iter() { @@ -3094,7 +3191,7 @@ fn test_pox_no_anchor_selected() { { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "111"); + assert_eq!(&pox_id.to_string(), "1111"); } let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); @@ -3157,6 +3254,10 @@ fn test_pox_fork_out_of_order() { for (ix, (vrf_key, miner)) in vrf_keys.iter().zip(committers.iter()).enumerate() { let mut burnchain = get_burnchain_db(path, None); let mut chainstate = get_chainstate(path); + let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); + let burnchain_blinded = get_burnchain_db(path_blinded, None); + let b = get_burnchain(path, None); + eprintln!("Making block {}", ix); let (op, block) = if ix == 0 { make_genesis_block( @@ -3181,15 +3282,15 @@ fn test_pox_fork_out_of_order() { make_stacks_block( &sort_db, &mut chainstate, + &b, &parent, + burnchain_tip.block_height, miner, 10000, vrf_key, ix as u32, ) }; - let burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); - let burnchain_blinded = get_burnchain_db(path_blinded, None); produce_burn_block( &mut burnchain, &burnchain_tip.block_hash, @@ -3200,7 +3301,6 @@ fn test_pox_fork_out_of_order() { coord.handle_new_burnchain_block().unwrap(); coord_blind.handle_new_burnchain_block().unwrap(); - let b = get_burnchain(path, None); let new_burnchain_tip = burnchain.get_canonical_chain_tip().unwrap(); if b.is_reward_cycle_start(new_burnchain_tip.block_height) { eprintln!( @@ -3267,13 +3367,13 @@ fn test_pox_fork_out_of_order() { { let ic = sort_db.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "1111"); + assert_eq!(&pox_id.to_string(), "11111"); } { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "1000"); + assert_eq!(&pox_id.to_string(), "11000"); } // now, we reveal to the blinded coordinator, but out of order. @@ -3296,7 +3396,7 @@ fn test_pox_fork_out_of_order() { { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "1110"); + assert_eq!(&pox_id.to_string(), "11110"); } let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); @@ -3327,7 +3427,7 @@ fn test_pox_fork_out_of_order() { { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "1110"); + assert_eq!(&pox_id.to_string(), "11110"); } let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); @@ -3388,7 +3488,7 @@ fn test_pox_fork_out_of_order() { { let ic = sort_db_blind.index_handle_at_tip(); let pox_id = ic.get_pox_id().unwrap(); - assert_eq!(&pox_id.to_string(), "1111"); + assert_eq!(&pox_id.to_string(), "11111"); } let block_height = eval_at_chain_tip(path_blinded, &sort_db_blind, "block-height"); diff --git a/src/chainstate/stacks/boot/bns.clar b/src/chainstate/stacks/boot/bns.clar index 2143cb8e19..df91b7472a 100644 --- a/src/chainstate/stacks/boot/bns.clar +++ b/src/chainstate/stacks/boot/bns.clar @@ -42,10 +42,10 @@ (define-constant ERR_PRINCIPAL_ALREADY_ASSOCIATED 3001) (define-constant ERR_INSUFFICIENT_FUNDS 4001) -(define-constant NAMESPACE_PREORDER_CLAIMABILITY_TTL u10) -(define-constant NAMESPACE_LAUNCHABILITY_TTL u10) -(define-constant NAME_PREORDER_CLAIMABILITY_TTL u10) -(define-constant NAME_GRACE_PERIOD_DURATION u5) +(define-constant NAMESPACE_PREORDER_CLAIMABILITY_TTL u144) +(define-constant NAMESPACE_LAUNCHABILITY_TTL u52595) +(define-constant NAME_PREORDER_CLAIMABILITY_TTL u144) +(define-constant NAME_GRACE_PERIOD_DURATION u5000) (define-data-var attachment-index uint u0) @@ -58,7 +58,7 @@ ;;;; Data (define-map namespaces - { namespace: (buff 20) } + (buff 20) { namespace-import: principal, revealed-at: uint, launched-at: (optional uint), @@ -76,16 +76,16 @@ { hashed-salted-namespace: (buff 20), buyer: principal } { created-at: uint, claimed: bool, stx-burned: uint }) -(define-non-fungible-token names { name: (buff 32), namespace: (buff 20) }) +(define-non-fungible-token names { name: (buff 48), namespace: (buff 20) }) ;; Rule 1-1 -> 1 principal, 1 name -(define-map owner-name { owner: principal } { name: (buff 32), namespace: (buff 20) }) +(define-map owner-name principal { name: (buff 48), namespace: (buff 20) }) ;; Only applies to non-revoked, non-expired names. ;; A principal can own many expired names (but they will be transferred away once someone re-registers them), ;; and can own many revoked names (but they do not resolve and cannot be transferred or updated). (define-map name-properties - { name: (buff 32), namespace: (buff 20) } + { name: (buff 48), namespace: (buff 20) } { registered-at: (optional uint), imported-at: (optional uint), revoked-at: (optional uint), @@ -171,13 +171,13 @@ (is-digit char) (is-special-char char))) -(define-private (has-vowels-chars (name (buff 32))) +(define-private (has-vowels-chars (name (buff 48))) (> (len (filter is-vowel name)) u0)) -(define-private (has-nonalpha-chars (name (buff 32))) +(define-private (has-nonalpha-chars (name (buff 48))) (> (len (filter is-nonalpha name)) u0)) -(define-private (has-invalid-chars (name (buff 32))) +(define-private (has-invalid-chars (name (buff 48))) (< (len (filter is-char-valid name)) (len name))) (define-private (name-lease-started-at? (namespace-launched-at (optional uint)) @@ -216,7 +216,7 @@ ;; Note: the following method is used in name-import and name-register. The latter ensure that the name ;; can be registered, the former does not. -(define-private (mint-or-transfer-name? (namespace (buff 20)) (name (buff 32)) (beneficiary principal)) +(define-private (mint-or-transfer-name? (namespace (buff 20)) (name (buff 48)) (beneficiary principal)) (let ( (current-owner (nft-get-owner? names (tuple (name name) (namespace namespace))))) ;; The principal can register a name @@ -233,13 +233,13 @@ beneficiary) (err ERR_NAME_COULD_NOT_BE_MINTED)) (map-set owner-name - { owner: beneficiary } + beneficiary { name: name, namespace: namespace }) (ok true)) (update-name-ownership? namespace name (unwrap-panic current-owner) beneficiary)))) (define-private (update-name-ownership? (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (from principal) (to principal)) (if (is-eq from to) @@ -248,14 +248,14 @@ (unwrap! (nft-transfer? names { name: name, namespace: namespace } from to) (err ERR_NAME_COULD_NOT_BE_TRANSFERED)) - (map-delete owner-name { owner: from }) + (map-delete owner-name from) (map-set owner-name - { owner: to } + to { name: name, namespace: namespace }) (ok true)))) (define-private (update-zonefile-and-props (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (registered-at (optional uint)) (imported-at (optional uint)) (revoked-at (optional uint)) @@ -285,7 +285,7 @@ zonefile-hash: zonefile-hash }))) (define-private (is-namespace-available (namespace (buff 20))) - (match (map-get? namespaces { namespace: namespace }) namespace-props + (match (map-get? namespaces namespace) namespace-props (begin ;; Is the namespace launched? (if (is-some (get launched-at namespace-props)) @@ -293,7 +293,7 @@ (> block-height (+ (get revealed-at namespace-props) NAMESPACE_LAUNCHABILITY_TTL)))) ;; Is the namespace expired? true)) -(define-private (compute-name-price (name (buff 32)) +(define-private (compute-name-price (name (buff 48)) (price-function (tuple (buckets (list 16 uint)) (base uint) (coeff uint) @@ -422,7 +422,7 @@ ;; The namespace will be set as "revealed" but not "launched", its price function, its renewal rules, its version, ;; and its import principal will be written to the `namespaces` table. (map-set namespaces - { namespace: namespace } + namespace { namespace-import: namespace-import, revealed-at: block-height, launched-at: none, @@ -434,12 +434,12 @@ ;; Once a namespace is revealed, the user has the option to populate it with a set of names. Each imported name is given ;; both an owner and some off-chain state. This step is optional; Namespace creators are not required to import names. (define-public (name-import (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (beneficiary principal) (zonefile-hash (buff 20))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND)))) ;; The name must only have valid chars (asserts! @@ -476,7 +476,7 @@ (define-public (namespace-ready (namespace (buff 20))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND)))) ;; The sender principal must match the namespace's import principal (asserts! @@ -492,7 +492,7 @@ (err ERR_NAMESPACE_PREORDER_LAUNCHABILITY_EXPIRED)) (let ((namespace-props-updated (merge namespace-props { launched-at: (some block-height) }))) ;; The namespace will be set to "launched" - (map-set namespaces { namespace: namespace } namespace-props-updated) + (map-set namespaces namespace namespace-props-updated) ;; Emit an event (print { namespace: namespace, status: "ready", properties: namespace-props-updated }) (ok true)))) @@ -531,13 +531,13 @@ ;; This is the second transaction to be sent. It reveals the salt and the name to all BNS nodes, ;; and assigns the name an initial public key hash and zone file hash (define-public (name-register (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (salt (buff 20)) (zonefile-hash (buff 20))) (let ( (hashed-salted-fqn (hash160 (concat (concat (concat name 0x2e) namespace) salt))) (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND))) (preorder (unwrap! (map-get? name-preorders { hashed-salted-fqn: hashed-salted-fqn, buyer: tx-sender }) @@ -579,7 +579,7 @@ ;; if you wanted to change the name's zone file contents. ;; For example, you would do this if you want to deploy your own Gaia hub and want other people to read from it. (define-public (name-update (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (zonefile-hash (buff 20))) (let ( (data (try! (check-name-ops-preconditions namespace name)))) @@ -601,7 +601,7 @@ ;; When transferring a name, you have the option to also clear the name's zone file hash (i.e. set it to null). ;; This is useful for when you send the name to someone else, so the recipient's name does not resolve to your zone file. (define-public (name-transfer (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (new-owner principal) (zonefile-hash (optional (buff 20)))) (let ( @@ -623,7 +623,7 @@ (get imported-at (get name-props data)) none (if (is-none zonefile-hash) - 0x00 + 0x (unwrap-panic zonefile-hash)) "name-transfer") (ok true))) @@ -634,7 +634,7 @@ ;; The name's zone file hash is set to null to prevent it from resolving. ;; You should only do this if your private key is compromised, or if you want to render your name unusable for whatever reason. (define-public (name-revoke (namespace (buff 20)) - (name (buff 32))) + (name (buff 48))) (let ( (data (try! (check-name-ops-preconditions namespace name)))) ;; Clear the zonefile @@ -644,7 +644,7 @@ (get registered-at (get name-props data)) (get imported-at (get name-props data)) (some block-height) - 0x00 + 0x "name-revoke") (ok true))) @@ -657,13 +657,13 @@ ;; You may, however, send a NAME_RENEWAL during this grace period to preserve your name. ;; If your name is in a namespace where names do not expire, then you never need to use this transaction. (define-public (name-renewal (namespace (buff 20)) - (name (buff 32)) + (name (buff 48)) (stx-to-burn uint) (new-owner (optional principal)) (zonefile-hash (optional (buff 20)))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND))) (owner (unwrap! (nft-get-owner? names { name: name, namespace: namespace }) @@ -729,20 +729,20 @@ (ok (unwrap-panic (element-at NAMESPACE_PRICE_TIERS (min u7 (- namespace-len u1))))))) -(define-read-only (get-name-price (namespace (buff 20)) (name (buff 32))) +(define-read-only (get-name-price (namespace (buff 20)) (name (buff 48))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND)))) (ok (compute-name-price name (get price-function namespace-props))))) -(define-read-only (check-name-ops-preconditions (namespace (buff 20)) (name (buff 32))) +(define-read-only (check-name-ops-preconditions (namespace (buff 20)) (name (buff 48))) (let ( (owner (unwrap! (nft-get-owner? names { name: name, namespace: namespace }) (err ERR_NAME_NOT_FOUND))) ;; The name must exist (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND))) (name-props (unwrap! (map-get? name-properties { name: name, namespace: namespace }) @@ -772,10 +772,10 @@ (define-read-only (can-namespace-be-registered (namespace (buff 20))) (ok (is-namespace-available namespace))) -(define-read-only (is-name-lease-expired (namespace (buff 20)) (name (buff 32))) +(define-read-only (is-name-lease-expired (namespace (buff 20)) (name (buff 48))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND))) (name-props (unwrap! (map-get? name-properties { name: name, namespace: namespace }) @@ -786,10 +786,10 @@ (ok false) (ok (> block-height (+ lifetime lease-started-at)))))) -(define-read-only (is-name-in-grace-period (namespace (buff 20)) (name (buff 32))) +(define-read-only (is-name-in-grace-period (namespace (buff 20)) (name (buff 48))) (let ( (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND))) (name-props (unwrap! (map-get? name-properties { name: name, namespace: namespace }) @@ -803,7 +803,7 @@ (<= block-height (+ (+ lifetime lease-started-at) NAME_GRACE_PERIOD_DURATION))))))) (define-read-only (can-receive-name (owner principal)) - (let ((current-owned-name (map-get? owner-name { owner: owner }))) + (let ((current-owned-name (map-get? owner-name owner))) (if (is-none current-owned-name) (ok true) (let ( @@ -822,10 +822,10 @@ (asserts! (is-some (get revoked-at name-props)) (ok false)) (ok true)))))))) -(define-read-only (can-name-be-registered (namespace (buff 20)) (name (buff 32))) +(define-read-only (can-name-be-registered (namespace (buff 20)) (name (buff 48))) (let ( (wrapped-name-props (map-get? name-properties { name: name, namespace: namespace })) - (namespace-props (unwrap! (map-get? namespaces { namespace: namespace }) (ok false)))) + (namespace-props (unwrap! (map-get? namespaces namespace) (ok false)))) ;; The name must only have valid chars (asserts! (not (has-invalid-chars name)) @@ -842,7 +842,7 @@ ;; Is lease expired? (is-name-lease-expired namespace name)))) -(define-read-only (name-resolve (namespace (buff 20)) (name (buff 32))) +(define-read-only (name-resolve (namespace (buff 20)) (name (buff 48))) (let ( (owner (unwrap! (nft-get-owner? names { name: name, namespace: namespace }) @@ -851,7 +851,7 @@ (map-get? name-properties { name: name, namespace: namespace }) (err ERR_NAME_NOT_FOUND))) (namespace-props (unwrap! - (map-get? namespaces { namespace: namespace }) + (map-get? namespaces namespace) (err ERR_NAMESPACE_NOT_FOUND)))) ;; The name must not be in grace period (asserts! @@ -866,4 +866,11 @@ (is-none (get revoked-at name-props)) (err ERR_NAME_REVOKED)) ;; Get the zonefile - (ok { zonefile-hash: (get zonefile-hash name-props), owner: owner }))) + (let ( + (lease-started-at (try! (name-lease-started-at? (get launched-at namespace-props) (get revealed-at namespace-props) name-props)))) + (ok { + zonefile-hash: (get zonefile-hash name-props), + owner: owner, + lease-started-at: lease-started-at, + lease-ending-at: (if (is-eq (get lifetime namespace-props) u0) none (some (+ lease-started-at (get lifetime namespace-props)))) + })))) diff --git a/src/chainstate/stacks/boot/contract_tests.rs b/src/chainstate/stacks/boot/contract_tests.rs index c49b3b01b7..91f067e87e 100644 --- a/src/chainstate/stacks/boot/contract_tests.rs +++ b/src/chainstate/stacks/boot/contract_tests.rs @@ -34,8 +34,9 @@ use vm::tests::{execute, is_committed, is_err_code, symbols_from_values}; use address::AddressHashMode; use core::{ - FIRST_BURNCHAIN_BLOCK_HASH, FIRST_BURNCHAIN_BLOCK_HEIGHT, FIRST_BURNCHAIN_BLOCK_TIMESTAMP, - FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, POX_REWARD_CYCLE_LENGTH, + BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, + POX_REWARD_CYCLE_LENGTH, }; use vm::types::Value::Response; @@ -110,6 +111,7 @@ impl From<&StacksPrivateKey> for Value { struct ClarityTestSim { marf: MarfedKV, height: u64, + fork: u64, } struct TestSimHeadersDB { @@ -122,7 +124,7 @@ impl ClarityTestSim { { let mut store = marf.begin( &StacksBlockId::sentinel(), - &StacksBlockId(test_sim_height_to_hash(0)), + &StacksBlockId(test_sim_height_to_hash(0, 0)), ); store @@ -141,7 +143,11 @@ impl ClarityTestSim { store.test_commit(); } - ClarityTestSim { marf, height: 0 } + ClarityTestSim { + marf, + height: 0, + fork: 0, + } } pub fn execute_next_block(&mut self, f: F) -> R @@ -149,8 +155,8 @@ impl ClarityTestSim { F: FnOnce(&mut OwnedEnvironment) -> R, { let mut store = self.marf.begin( - &StacksBlockId(test_sim_height_to_hash(self.height)), - &StacksBlockId(test_sim_height_to_hash(self.height + 1)), + &StacksBlockId(test_sim_height_to_hash(self.height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(self.height + 1, self.fork)), ); let r = { @@ -167,11 +173,37 @@ impl ClarityTestSim { r } + + pub fn execute_block_as_fork(&mut self, parent_height: u64, f: F) -> R + where + F: FnOnce(&mut OwnedEnvironment) -> R, + { + let mut store = self.marf.begin( + &StacksBlockId(test_sim_height_to_hash(parent_height, self.fork)), + &StacksBlockId(test_sim_height_to_hash(parent_height + 1, self.fork + 1)), + ); + + let r = { + let headers_db = TestSimHeadersDB { + height: parent_height + 1, + }; + let mut owned_env = + OwnedEnvironment::new(store.as_clarity_db(&headers_db, &NULL_BURN_STATE_DB)); + f(&mut owned_env) + }; + + store.test_commit(); + self.height = parent_height + 1; + self.fork += 1; + + r + } } -fn test_sim_height_to_hash(burn_height: u64) -> [u8; 32] { +fn test_sim_height_to_hash(burn_height: u64, fork: u64) -> [u8; 32] { let mut out = [0; 32]; out[0..8].copy_from_slice(&burn_height.to_le_bytes()); + out[8..16].copy_from_slice(&fork.to_le_bytes()); out } @@ -191,7 +223,7 @@ impl HeadersDB for TestSimHeadersDB { id_bhh: &StacksBlockId, ) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { - Some(FIRST_BURNCHAIN_BLOCK_HASH) + Some(BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap()) } else { self.get_burn_block_height_for_block(id_bhh)?; Some(BurnchainHeaderHash(id_bhh.0.clone())) @@ -216,18 +248,18 @@ impl HeadersDB for TestSimHeadersDB { fn get_burn_block_time_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { - Some(FIRST_BURNCHAIN_BLOCK_TIMESTAMP) + Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) } else { let burn_block_height = self.get_burn_block_height_for_block(id_bhh)? as u64; Some( - FIRST_BURNCHAIN_BLOCK_TIMESTAMP + burn_block_height - - FIRST_BURNCHAIN_BLOCK_HEIGHT as u64, + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64 + burn_block_height + - BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u64, ) } } fn get_burn_block_height_for_block(&self, id_bhh: &StacksBlockId) -> Option { if *id_bhh == *FIRST_INDEX_BLOCK_HASH { - Some(FIRST_BURNCHAIN_BLOCK_HEIGHT) + Some(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32) } else { let input_height = test_sim_hash_to_height(&id_bhh.0)?; if input_height > self.height { @@ -235,7 +267,7 @@ impl HeadersDB for TestSimHeadersDB { None } else { Some( - (FIRST_BURNCHAIN_BLOCK_HEIGHT as u64 + input_height) + (BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32 + input_height as u32) .try_into() .unwrap(), ) @@ -1171,6 +1203,8 @@ fn test_vote_fail() { ); }); + let fork_start = sim.height; + for _ in 0..1000 { sim.execute_next_block(|env| { env.execute_transaction( @@ -1220,6 +1254,30 @@ fn test_vote_fail() { }) ); }); + + // let's fork, and overcome the veto + sim.execute_block_as_fork(fork_start, |_| {}); + for _ in 0..1100 { + sim.execute_next_block(|_| {}); + } + + sim.execute_next_block(|env| { + // Assert confirmation passes because there are no vetos + assert_eq!( + env.execute_transaction( + (&USER_KEYS[0]).into(), + COST_VOTING_CONTRACT.clone(), + "confirm-miners", + &symbols_from_values(vec![Value::UInt(0)]) + ) + .unwrap() + .0, + Value::Response(ResponseData { + committed: true, + data: Value::Bool(true).into(), + }) + ); + }); } #[test] diff --git a/src/chainstate/stacks/boot/cost-voting.clar b/src/chainstate/stacks/boot/cost-voting.clar index a9ccc846cd..0854268c62 100644 --- a/src/chainstate/stacks/boot/cost-voting.clar +++ b/src/chainstate/stacks/boot/cost-voting.clar @@ -122,7 +122,7 @@ cost-function-name: cost-function-name, function-contract: function-contract, function-name: function-name, - expiration-block-height: (+ burn-block-height VOTE_LENGTH) }) + expiration-block-height: (+ block-height VOTE_LENGTH) }) (map-insert proposal-votes { proposal-id: (var-get proposal-count) } { votes: u0 }) (var-set proposal-count (+ (var-get proposal-count) u1)) (ok (- (var-get proposal-count) u1)))) @@ -135,14 +135,13 @@ (cur-votes (default-to u0 (get votes (map-get? proposal-votes { proposal-id: proposal-id })))) (cur-principal-votes (default-to u0 (get votes (map-get? principal-proposal-votes { address: tx-sender, - proposal-id: proposal-id })))) - ) + proposal-id: proposal-id }))))) ;; a vote must have a positive amount (asserts! (> amount u0) (err ERR_AMOUNT_NOT_POSITIVE)) ;; the vote must occur before the expiration - (asserts! (< burn-block-height expiration-block-height) (err ERR_PROPOSAL_EXPIRED)) + (asserts! (< block-height expiration-block-height) (err ERR_PROPOSAL_EXPIRED)) ;; the proposal must not already be voter confirmed (asserts! (is-none (map-get? vote-confirmed-proposals { proposal-id: proposal-id })) @@ -154,8 +153,7 @@ (map-set proposal-votes { proposal-id: proposal-id } { votes: (+ amount cur-votes) }) (map-set principal-proposal-votes { address: tx-sender, proposal-id: proposal-id} { votes: (+ amount cur-principal-votes)}) - (ok true)) -) + (ok true))) ;; Withdraw votes (define-public (withdraw-votes (proposal-id uint) (amount uint)) @@ -164,8 +162,7 @@ (cur-principal-votes (default-to u0 (get votes (map-get? principal-proposal-votes { address: tx-sender, proposal-id: proposal-id })))) - (sender tx-sender) - ) + (sender tx-sender)) (asserts! (> amount u0) (err ERR_AMOUNT_NOT_POSITIVE)) (asserts! (>= cur-principal-votes amount) (err ERR_INSUFFICIENT_FUNDS)) @@ -177,8 +174,7 @@ (map-set proposal-votes { proposal-id: proposal-id } { votes: (- cur-votes amount) }) (map-set principal-proposal-votes { address: tx-sender, proposal-id: proposal-id } { votes: (- cur-principal-votes amount) }) - (ok true)) -) + (ok true))) ;; Miner veto (define-public (veto (proposal-id uint)) @@ -190,14 +186,13 @@ (vetoed (default-to false (get vetoed (map-get? exercised-veto { proposal-id: proposal-id, veto-height: block-height })))) (last-miner (unwrap! (get-block-info? miner-address (- block-height u1)) - (err ERR_FETCHING_BLOCK_INFO))) - ) + (err ERR_FETCHING_BLOCK_INFO)))) ;; a miner can only veto once per block (asserts! (not vetoed) (err ERR_ALREADY_VETOED)) - ;; vetoes must be case within the veto period - (asserts! (< burn-block-height expiration-block-height) (err ERR_VETO_PERIOD_OVER)) + ;; vetoes must be cast within the veto period + (asserts! (< block-height expiration-block-height) (err ERR_VETO_PERIOD_OVER)) ;; a miner can only veto if they mined the previous block (asserts! (is-eq contract-caller last-miner) (err ERR_NOT_LAST_MINER)) @@ -209,8 +204,7 @@ (map-set proposal-vetos { proposal-id: proposal-id } { vetos: (+ u1 cur-vetos) }) (map-set exercised-veto { proposal-id: proposal-id, veto-height: block-height } { vetoed: true }) - (ok true)) -) + (ok true))) ;; Confirm proposal has reached required vote count (define-public (confirm-votes (proposal-id uint)) @@ -218,23 +212,19 @@ (votes (default-to u0 (get votes (map-get? proposal-votes { proposal-id: proposal-id })))) (proposal (unwrap! (map-get? proposals { proposal-id: proposal-id }) (err ERR_NO_SUCH_PROPOSAL))) (confirmed-count (var-get confirmed-proposal-count)) - ) - (let ( - (expiration-block-height (get expiration-block-height proposal)) - ) + (expiration-block-height (get expiration-block-height proposal))) ;; confirmation fails if invoked after proposal has expired - (asserts! (< burn-block-height expiration-block-height) (err ERR_PROPOSAL_EXPIRED)) + (asserts! (< block-height expiration-block-height) (err ERR_PROPOSAL_EXPIRED)) ;; confirmation fails if the required threshold of votes is not met (asserts! (>= (/ (* votes u100) stx-liquid-supply) REQUIRED_PERCENT_STX_VOTE) (err ERR_INSUFFICIENT_VOTES)) (map-insert vote-confirmed-proposals { proposal-id: proposal-id } - { expiration-block-height: (+ VETO_LENGTH burn-block-height) }) + { expiration-block-height: (+ VETO_LENGTH block-height) }) (ok true))) -) ;; Confirm proposal hasn't been vetoed (define-public (confirm-miners (proposal-id uint)) @@ -252,7 +242,7 @@ (map-set confirmed-count-at-block block-height (+ u1 confirmed-this-block)) ;; miner confirmation will fail if invoked before the expiration - (asserts! (>= burn-block-height expiration-block-height) (err ERR_VETO_PERIOD_NOT_OVER)) + (asserts! (>= block-height expiration-block-height) (err ERR_VETO_PERIOD_NOT_OVER)) ;; miner confirmation will fail if there are enough vetos (asserts! (< vetos REQUIRED_VETOES) (err ERR_PROPOSAL_VETOED)) diff --git a/src/chainstate/stacks/boot/mod.rs b/src/chainstate/stacks/boot/mod.rs index 06659bcca9..ae75d3f00b 100644 --- a/src/chainstate/stacks/boot/mod.rs +++ b/src/chainstate/stacks/boot/mod.rs @@ -1019,6 +1019,7 @@ pub mod test { let num_blocks = 10; let mut expected_liquid_ustx = 1024 * POX_THRESHOLD_STEPS_USTX * (keys.len() as u128); + let mut prior_liquid_ustx = expected_liquid_ustx; let mut missed_initial_blocks = 0; for tenure_id in 0..num_blocks { @@ -1047,7 +1048,7 @@ pub mod test { let block_txs = vec![coinbase_tx]; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -1070,13 +1071,15 @@ pub mod test { peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); let liquid_ustx = get_liquid_ustx(&mut peer); - assert_eq!(liquid_ustx, expected_liquid_ustx); + // get_liquid_ustx is "off by one", i.e., it loads the parents liquid ustx + assert_eq!(liquid_ustx, prior_liquid_ustx); if tenure_id >= MINER_REWARD_MATURITY as usize { let block_reward = 1_000 * MICROSTACKS_PER_STACKS as u128; - let expected_bonus = (missed_initial_blocks as u128) * block_reward + let expected_bonus = (missed_initial_blocks as u128 * block_reward) / (INITIAL_MINING_BONUS_WINDOW as u128); // add mature coinbases + prior_liquid_ustx = expected_liquid_ustx; expected_liquid_ustx += block_reward + expected_bonus; } } @@ -1169,7 +1172,7 @@ pub mod test { let block_txs = vec![coinbase_tx]; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -1263,7 +1266,7 @@ pub mod test { } - let block_builder = StacksBlockBuilder::make_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -1317,6 +1320,7 @@ pub mod test { let num_blocks = 10; let mut expected_liquid_ustx = 1024 * POX_THRESHOLD_STEPS_USTX * (keys.len() as u128); + let mut prior_liquid_ustx = expected_liquid_ustx; let mut missed_initial_blocks = 0; let alice = keys.pop().unwrap(); @@ -1355,7 +1359,7 @@ pub mod test { let block_txs = vec![coinbase_tx, burn_tx]; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -1377,10 +1381,12 @@ pub mod test { peer.next_burnchain_block(burn_ops.clone()); peer.process_stacks_epoch_at_tip(&stacks_block, µblocks); - expected_liquid_ustx -= 1; - let liquid_ustx = get_liquid_ustx(&mut peer); - assert_eq!(liquid_ustx, expected_liquid_ustx); + // get_liquid_ustx is "off by one", i.e., it loads the parents liquid ustx + assert_eq!(liquid_ustx, prior_liquid_ustx); + + expected_liquid_ustx -= 1; + prior_liquid_ustx = expected_liquid_ustx; if tenure_id >= MINER_REWARD_MATURITY as usize { let block_reward = 1_000 * MICROSTACKS_PER_STACKS as u128; @@ -1460,7 +1466,7 @@ pub mod test { block_txs.push(alice_lockup); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -1671,6 +1677,7 @@ pub mod test { } let block_builder = StacksBlockBuilder::make_block_builder( + false, &parent_tip, vrf_proof, tip.total_burn, @@ -1814,7 +1821,7 @@ pub mod test { if cur_reward_cycle >= lockup_reward_cycle { // this will grow as more miner rewards are unlocked, so be wary - if tenure_id >= (MINER_REWARD_MATURITY + 1) as usize { + if tenure_id >= (MINER_REWARD_MATURITY + 2) as usize { // miner rewards increased liquid supply, so less than 25% is locked. // minimum participation decreases. assert!(total_liquid_ustx > 4 * 1024 * POX_THRESHOLD_STEPS_USTX); @@ -1924,7 +1931,7 @@ pub mod test { block_txs.push(alice_stack); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -2192,7 +2199,7 @@ pub mod test { block_txs.push(bob_lockup); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -2403,7 +2410,7 @@ pub mod test { "(define-data-var test-run bool false) (define-data-var test-result int -1) (let ((result - (contract-call? '{}.pox stack-stx u256000000 (tuple (version 0x00) (hashbytes 0xae1593226f85e49a7eaff5b633ff687695438cc9)) burn-block-height u12))) + (contract-call? '{}.pox stack-stx u10240000000000 (tuple (version 0x00) (hashbytes 0xae1593226f85e49a7eaff5b633ff687695438cc9)) burn-block-height u12))) (var-set test-result (match result ok_value -1 err_value err_value)) (var-set test-run true)) @@ -2440,7 +2447,7 @@ pub mod test { block_txs.push(charlie_test_tx); } - let block_builder = StacksBlockBuilder::make_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); (anchored_block, vec![]) }); @@ -2603,7 +2610,7 @@ pub mod test { block_txs.push(alice_lockup); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -2702,7 +2709,7 @@ pub mod test { if cur_reward_cycle >= alice_reward_cycle { // this will grow as more miner rewards are unlocked, so be wary - if tenure_id >= (MINER_REWARD_MATURITY + 1) as usize { + if tenure_id >= (MINER_REWARD_MATURITY + 2) as usize { // miner rewards increased liquid supply, so less than 25% is locked. // minimum participation decreases. assert!(total_liquid_ustx > 4 * 1024 * POX_THRESHOLD_STEPS_USTX); @@ -2895,7 +2902,7 @@ pub mod test { block_txs.push(charlie_stack); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -3016,7 +3023,7 @@ pub mod test { eprintln!("\ntenure: {}\nreward cycle: {}\nmin-uSTX: {}\naddrs: {:?}\ntotal_liquid_ustx: {}\ntotal-stacked: {}\n", tenure_id, cur_reward_cycle, min_ustx, &reward_addrs, total_liquid_ustx, total_stacked); // this will grow as more miner rewards are unlocked, so be wary - if tenure_id >= (MINER_REWARD_MATURITY + 1) as usize { + if tenure_id >= (MINER_REWARD_MATURITY + 2) as usize { // miner rewards increased liquid supply, so less than 25% is locked. // minimum participation decreases. assert!(total_liquid_ustx > 4 * 1024 * POX_THRESHOLD_STEPS_USTX); @@ -3470,7 +3477,7 @@ pub mod test { block_txs.push(alice_withdraw_tx); } - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -3816,7 +3823,7 @@ pub mod test { // Note: this behavior is a bug in the miner and block processor: see issue #? let charlie_stack = make_bare_contract(&charlie, 2, 0, "charlie-try-stack", &format!( - "(asserts! (not (is-eq (print (contract-call? '{}.pox stack-stx u1 {{ version: 0x01, hashbytes: 0x1111111111111111111111111111111111111111 }} burn-block-height u1)) (err 17))) (err 1))", + "(asserts! (not (is-eq (print (contract-call? '{}.pox stack-stx u10240000000000 {{ version: 0x01, hashbytes: 0x1111111111111111111111111111111111111111 }} burn-block-height u1)) (err 17))) (err 1))", boot_code_addr())); block_txs.push(charlie_stack); @@ -3843,7 +3850,7 @@ pub mod test { block_txs.push(charlie_reject); } - let block_builder = StacksBlockBuilder::make_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); + let block_builder = StacksBlockBuilder::make_regtest_block_builder(&parent_tip, vrf_proof, tip.total_burn, microblock_pubkeyhash).unwrap(); let (anchored_block, _size, _cost) = StacksBlockBuilder::make_anchored_block_from_txs(block_builder, chainstate, &sortdb.index_conn(), block_txs).unwrap(); if tenure_id == 2 { @@ -3960,7 +3967,7 @@ pub mod test { if cur_reward_cycle >= alice_reward_cycle { // this will grow as more miner rewards are unlocked, so be wary - if tenure_id >= (MINER_REWARD_MATURITY + 1) as usize { + if tenure_id >= (MINER_REWARD_MATURITY + 2) as usize { // miner rewards increased liquid supply, so less than 25% is locked. // minimum participation decreases. assert!(total_liquid_ustx > 4 * 1024 * POX_THRESHOLD_STEPS_USTX); diff --git a/src/chainstate/stacks/db/blocks.rs b/src/chainstate/stacks/db/blocks.rs index e09179c276..a6cc65ea45 100644 --- a/src/chainstate/stacks/db/blocks.rs +++ b/src/chainstate/stacks/db/blocks.rs @@ -3368,7 +3368,10 @@ impl StacksChainState { """ */ - let effective_ht = burn_block_height - first_burn_block_height; + // this is saturating subtraction for the initial reward calculation + // where we are computing the coinbase reward for blocks that occur *before* + // the `first_burn_block_height` + let effective_ht = burn_block_height.saturating_sub(first_burn_block_height); let blocks_per_year = 52596; let stx_reward = if effective_ht < blocks_per_year * 4 { 1000 diff --git a/src/chainstate/stacks/db/mod.rs b/src/chainstate/stacks/db/mod.rs index b9cc3a889b..1adc97340b 100644 --- a/src/chainstate/stacks/db/mod.rs +++ b/src/chainstate/stacks/db/mod.rs @@ -71,6 +71,7 @@ use chainstate::burn::db::sortdb::*; use chainstate::stacks::boot::*; +use net::atlas::BNS_CHARS_REGEX; use net::Error as net_error; use vm::analysis::analysis_db::AnalysisDatabase; @@ -630,6 +631,29 @@ pub struct ChainstateAccountLockup { pub block_height: u64, } +#[derive(Debug, Clone)] +pub struct ChainstateBNSNamespace { + pub namespace_id: String, + pub importer: String, + pub revealed_at: u64, + pub launched_at: u64, + pub buckets: String, + pub base: u64, + pub coeff: u64, + pub nonalpha_discount: u64, + pub no_vowel_discount: u64, + pub lifetime: u64, +} + +#[derive(Debug, Clone)] +pub struct ChainstateBNSName { + pub fully_qualified_name: String, + pub owner: String, + pub registered_at: u64, + pub expired_at: u64, + pub zonefile_hash: String, +} + impl ChainstateAccountLockup { pub fn new(address: StacksAddress, amount: u64, block_height: u64) -> ChainstateAccountLockup { ChainstateAccountLockup { @@ -650,6 +674,10 @@ pub struct ChainStateBootData { Option Box>>>, pub get_bulk_initial_balances: Option Box>>>, + pub get_bulk_initial_namespaces: + Option Box>>>, + pub get_bulk_initial_names: + Option Box>>>, } impl ChainStateBootData { @@ -666,6 +694,8 @@ impl ChainStateBootData { post_flight_callback, get_bulk_initial_lockups: None, get_bulk_initial_balances: None, + get_bulk_initial_namespaces: None, + get_bulk_initial_names: None, } } } @@ -891,6 +921,7 @@ impl StacksChainState { } let mut allocation_events: Vec = vec![]; + info!( "Initializing chain with {} config balances", boot_data.initial_balances.len() @@ -911,10 +942,11 @@ impl StacksChainState { allocation_events.push(mint_event); } - if let Some(get_balances) = boot_data.get_bulk_initial_balances.take() { - info!("Initializing chain with balances"); - let mut balances_count = 0; - clarity_tx.connection().as_transaction(|clarity| { + clarity_tx.connection().as_transaction(|clarity| { + // Balances + if let Some(get_balances) = boot_data.get_bulk_initial_balances.take() { + info!("Initializing chain with balances"); + let mut balances_count = 0; let initial_balances = get_balances(); for balance in initial_balances { balances_count = balances_count + 1; @@ -937,8 +969,206 @@ impl StacksChainState { allocation_events.push(mint_event); } info!("Committing {} balances to genesis tx", balances_count); - }); - } + } + + // Lockups + if let Some(get_schedules) = boot_data.get_bulk_initial_lockups.take() { + info!("Initializing chain with lockups"); + let mut lockups_per_block: BTreeMap> = BTreeMap::new(); + let initial_lockups = get_schedules(); + for schedule in initial_lockups { + let stx_address = + StacksChainState::parse_genesis_address(&schedule.address, mainnet); + let value = Value::Tuple( + TupleData::from_data(vec![ + ("recipient".into(), Value::Principal(stx_address)), + ("amount".into(), Value::UInt(schedule.amount.into())), + ]) + .unwrap(), + ); + match lockups_per_block.entry(schedule.block_height) { + Entry::Occupied(schedules) => { + schedules.into_mut().push(value); + } + Entry::Vacant(entry) => { + let schedules = vec![value]; + entry.insert(schedules); + } + }; + } + + let lockup_contract_id = boot_code_id("lockup"); + clarity + .with_clarity_db(|db| { + for (block_height, schedule) in lockups_per_block.into_iter() { + let key = Value::UInt(block_height.into()); + let value = Value::list_from(schedule).unwrap(); + db.insert_entry(&lockup_contract_id, "lockups", key, value)?; + } + Ok(()) + }) + .unwrap(); + } + + // BNS Namespace + let bns_contract_id = boot_code_id("bns"); + if let Some(get_namespaces) = boot_data.get_bulk_initial_namespaces.take() { + info!("Initializing chain with namespaces"); + clarity + .with_clarity_db(|db| { + let initial_namespaces = get_namespaces(); + for entry in initial_namespaces { + let namespace = { + if !BNS_CHARS_REGEX.is_match(&entry.namespace_id) { + panic!("Invalid namespace characters"); + } + let buffer = entry.namespace_id.as_bytes(); + Value::buff_from(buffer.to_vec()).expect("Invalid namespace") + }; + + let importer = { + let address = StacksChainState::parse_genesis_address( + &entry.importer, + mainnet, + ); + Value::Principal(address) + }; + + let revealed_at = Value::UInt(entry.revealed_at.into()); + let launched_at = Value::UInt(entry.launched_at.into()); + let lifetime = Value::UInt(entry.lifetime.into()); + let price_function = { + let base = Value::UInt(entry.base.into()); + let coeff = Value::UInt(entry.coeff.into()); + let nonalpha_discount = + Value::UInt(entry.nonalpha_discount.into()); + let no_vowel_discount = + Value::UInt(entry.no_vowel_discount.into()); + let buckets: Vec<_> = entry + .buckets + .split(";") + .map(|e| Value::UInt(e.parse::().unwrap().into())) + .collect(); + assert_eq!(buckets.len(), 16); + + TupleData::from_data(vec![ + ("buckets".into(), Value::list_from(buckets).unwrap()), + ("base".into(), base), + ("coeff".into(), coeff), + ("nonalpha-discount".into(), nonalpha_discount), + ("no-vowel-discount".into(), no_vowel_discount), + ]) + .unwrap() + }; + + let namespace_props = Value::Tuple( + TupleData::from_data(vec![ + ("revealed-at".into(), revealed_at), + ("launched-at".into(), Value::some(launched_at).unwrap()), + ("lifetime".into(), lifetime), + ("namespace-import".into(), importer), + ("price-function".into(), Value::Tuple(price_function)), + ]) + .unwrap(), + ); + + db.insert_entry( + &bns_contract_id, + "namespaces", + namespace, + namespace_props, + )?; + } + Ok(()) + }) + .unwrap(); + } + + // BNS Names + if let Some(get_names) = boot_data.get_bulk_initial_names.take() { + info!("Initializing chain with names"); + clarity + .with_clarity_db(|db| { + let initial_names = get_names(); + for entry in initial_names { + let components: Vec<_> = + entry.fully_qualified_name.split(".").collect(); + assert_eq!(components.len(), 2); + + let namespace = { + let namespace_str = components[1]; + if !BNS_CHARS_REGEX.is_match(&namespace_str) { + panic!("Invalid namespace characters"); + } + let buffer = namespace_str.as_bytes(); + Value::buff_from(buffer.to_vec()).expect("Invalid namespace") + }; + + let name = { + let name_str = components[0].to_string(); + if !BNS_CHARS_REGEX.is_match(&name_str) { + panic!("Invalid name characters"); + } + let buffer = name_str.as_bytes(); + Value::buff_from(buffer.to_vec()).expect("Invalid name") + }; + + let fqn = Value::Tuple( + TupleData::from_data(vec![ + ("namespace".into(), namespace), + ("name".into(), name), + ]) + .unwrap(), + ); + + let owner_address = + StacksChainState::parse_genesis_address(&entry.owner, mainnet); + + let zonefile_hash = { + if entry.zonefile_hash.len() == 0 { + Value::buff_from(vec![]).unwrap() + } else { + let buffer = Hash160::from_hex(&entry.zonefile_hash) + .expect("Invalid zonefile_hash"); + Value::buff_from(buffer.to_bytes().to_vec()).unwrap() + } + }; + + db.set_nft_owner(&bns_contract_id, "names", &fqn, &owner_address)?; + + let registered_at = Value::UInt(entry.registered_at.into()); + let name_props = Value::Tuple( + TupleData::from_data(vec![ + ( + "registered-at".into(), + Value::some(registered_at).unwrap(), + ), + ("imported-at".into(), Value::none()), + ("revoked-at".into(), Value::none()), + ("zonefile-hash".into(), zonefile_hash), + ]) + .unwrap(), + ); + + db.insert_entry( + &bns_contract_id, + "name-properties", + fqn.clone(), + name_props, + )?; + + db.insert_entry( + &bns_contract_id, + "owner-name", + Value::Principal(owner_address), + fqn, + )?; + } + Ok(()) + }) + .unwrap(); + } + }); let allocations_tx = StacksTransaction::new( tx_version.clone(), @@ -957,50 +1187,6 @@ impl StacksChainState { ); receipts.push(allocations_receipt); - if let Some(get_schedules) = boot_data.get_bulk_initial_lockups.take() { - info!("Initializing chain with lockups"); - let mut lockups_per_block: BTreeMap> = BTreeMap::new(); - let initial_lockups = get_schedules(); - for schedule in initial_lockups { - let stx_address = - StacksChainState::parse_genesis_address(&schedule.address, mainnet); - let value = Value::Tuple( - TupleData::from_data(vec![ - ("recipient".into(), Value::Principal(stx_address)), - ("amount".into(), Value::UInt(schedule.amount.into())), - ]) - .unwrap(), - ); - match lockups_per_block.entry(schedule.block_height) { - Entry::Occupied(schedules) => { - schedules.into_mut().push(value); - } - Entry::Vacant(entry) => { - let schedules = vec![value]; - entry.insert(schedules); - } - }; - } - - let lockup_contract_id = boot_code_id("lockup"); - clarity_tx.connection().as_transaction(|clarity| { - clarity - .with_clarity_db(|db| { - for (block_height, schedule) in lockups_per_block.into_iter() { - let key = Value::UInt(block_height.into()); - db.insert_entry( - &lockup_contract_id, - "lockups", - key, - Value::list_from(schedule).unwrap(), - )?; - } - Ok(()) - }) - .unwrap(); - }); - } - if let Some(callback) = boot_data.post_flight_callback.take() { callback(&mut clarity_tx); } @@ -1733,6 +1919,7 @@ pub mod test { use chainstate::stacks::*; use std::fs; + use stx_genesis::GenesisData; use vm::database::NULL_BURN_STATE_DB; pub fn instantiate_chainstate( @@ -1770,6 +1957,8 @@ pub mod test { first_burnchain_block_timestamp: 0, get_bulk_initial_lockups: None, get_bulk_initial_balances: None, + get_bulk_initial_names: None, + get_bulk_initial_namespaces: None, }; StacksChainState::open_and_exec( @@ -1815,4 +2004,189 @@ pub mod test { assert!(contract_res.is_some()); } } + + #[test] + fn test_chainstate_sampled_genesis_consistency() { + // Test root hash for the test chainstate data set + let mut boot_data = ChainStateBootData { + initial_balances: vec![], + first_burnchain_block_hash: BurnchainHeaderHash::zero(), + first_burnchain_block_height: 0, + first_burnchain_block_timestamp: 0, + post_flight_callback: None, + get_bulk_initial_lockups: Some(Box::new(|| { + Box::new(GenesisData::new(true).read_lockups().map(|item| { + ChainstateAccountLockup { + address: item.address, + amount: item.amount, + block_height: item.block_height, + } + })) + })), + get_bulk_initial_balances: Some(Box::new(|| { + Box::new(GenesisData::new(true).read_balances().map(|item| { + ChainstateAccountBalance { + address: item.address, + amount: item.amount, + } + })) + })), + get_bulk_initial_namespaces: Some(Box::new(|| { + Box::new(GenesisData::new(true).read_namespaces().map(|item| { + ChainstateBNSNamespace { + namespace_id: item.namespace_id, + importer: item.importer, + revealed_at: item.reveal_block as u64, + launched_at: item.ready_block as u64, + buckets: item.buckets, + base: item.base as u64, + coeff: item.coeff as u64, + nonalpha_discount: item.nonalpha_discount as u64, + no_vowel_discount: item.no_vowel_discount as u64, + lifetime: item.lifetime as u64, + } + })) + })), + get_bulk_initial_names: Some(Box::new(|| { + Box::new( + GenesisData::new(true) + .read_names() + .map(|item| ChainstateBNSName { + fully_qualified_name: item.fully_qualified_name, + owner: item.owner, + registered_at: item.registered_at as u64, + expired_at: item.expire_block as u64, + zonefile_hash: item.zonefile_hash, + }), + ) + })), + }; + + let path = chainstate_path("genesis-consistency-chainstate-test"); + match fs::metadata(&path) { + Ok(_) => { + fs::remove_dir_all(&path).unwrap(); + } + Err(_) => {} + }; + + let mut chainstate = StacksChainState::open_and_exec( + false, + 0x80000000, + &path, + Some(&mut boot_data), + ExecutionCost::max_value(), + ) + .unwrap() + .0; + + let genesis_root_hash = chainstate.clarity_state.with_marf(|marf| { + let index_block_hash = StacksBlockHeader::make_index_block_hash( + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + ); + marf.get_root_hash_at(&index_block_hash).unwrap() + }); + + // If the genesis data changed, then this test will fail. + // Just update the expected value + assert_eq!( + format!("{}", genesis_root_hash), + "dd2213e2a0f506ec519672752f033ce2070fa279a579d983bcf2edefb35ce131" + ); + } + + #[test] + #[ignore] + fn test_chainstate_full_genesis_consistency() { + // Test root hash for the final chainstate data set + // TODO: update the fields (first_burnchain_block_hash, first_burnchain_block_height, first_burnchain_block_timestamp) + // once https://github.com/blockstack/stacks-blockchain/pull/2173 merges + let mut boot_data = ChainStateBootData { + initial_balances: vec![], + first_burnchain_block_hash: BurnchainHeaderHash::zero(), + first_burnchain_block_height: 0, + first_burnchain_block_timestamp: 0, + post_flight_callback: None, + get_bulk_initial_lockups: Some(Box::new(|| { + Box::new(GenesisData::new(false).read_lockups().map(|item| { + ChainstateAccountLockup { + address: item.address, + amount: item.amount, + block_height: item.block_height, + } + })) + })), + get_bulk_initial_balances: Some(Box::new(|| { + Box::new(GenesisData::new(false).read_balances().map(|item| { + ChainstateAccountBalance { + address: item.address, + amount: item.amount, + } + })) + })), + get_bulk_initial_namespaces: Some(Box::new(|| { + Box::new(GenesisData::new(true).read_namespaces().map(|item| { + ChainstateBNSNamespace { + namespace_id: item.namespace_id, + importer: item.importer, + revealed_at: item.reveal_block as u64, + launched_at: item.ready_block as u64, + buckets: item.buckets, + base: item.base as u64, + coeff: item.coeff as u64, + nonalpha_discount: item.nonalpha_discount as u64, + no_vowel_discount: item.no_vowel_discount as u64, + lifetime: item.lifetime as u64, + } + })) + })), + get_bulk_initial_names: Some(Box::new(|| { + Box::new( + GenesisData::new(true) + .read_names() + .map(|item| ChainstateBNSName { + fully_qualified_name: item.fully_qualified_name, + owner: item.owner, + registered_at: item.registered_at as u64, + expired_at: item.expire_block as u64, + zonefile_hash: item.zonefile_hash, + }), + ) + })), + }; + + let path = chainstate_path("genesis-consistency-chainstate"); + match fs::metadata(&path) { + Ok(_) => { + fs::remove_dir_all(&path).unwrap(); + } + Err(_) => {} + }; + + let mut chainstate = StacksChainState::open_and_exec( + true, + 0x000000001, + &path, + Some(&mut boot_data), + ExecutionCost::max_value(), + ) + .unwrap() + .0; + + let genesis_root_hash = chainstate.clarity_state.with_marf(|marf| { + let index_block_hash = StacksBlockHeader::make_index_block_hash( + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &FIRST_STACKS_BLOCK_HASH, + ); + marf.get_root_hash_at(&index_block_hash).unwrap() + }); + + // If the genesis data changed, then this test will fail. + // Just update the expected value + assert_eq!( + format!("{}", genesis_root_hash), + "30f4472782b844e508bfebd8912f271270c1fd04393cd18e884f42dbb1a133f1" + ); + } } diff --git a/src/chainstate/stacks/db/unconfirmed.rs b/src/chainstate/stacks/db/unconfirmed.rs index 1ca0b951a0..7d53cd52af 100644 --- a/src/chainstate/stacks/db/unconfirmed.rs +++ b/src/chainstate/stacks/db/unconfirmed.rs @@ -509,7 +509,7 @@ mod test { } }; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, @@ -738,7 +738,7 @@ mod test { } }; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, diff --git a/src/chainstate/stacks/miner.rs b/src/chainstate/stacks/miner.rs index 278f77fc38..e342a32128 100644 --- a/src/chainstate/stacks/miner.rs +++ b/src/chainstate/stacks/miner.rs @@ -1098,18 +1098,33 @@ impl StacksBlockBuilder { /// Create a block builder for mining pub fn make_block_builder( + mainnet: bool, stacks_parent_header: &StacksHeaderInfo, proof: VRFProof, total_burn: u64, pubkey_hash: Hash160, ) -> Result { let builder = if stacks_parent_header.consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { + let (first_block_hash_hex, first_block_height, first_block_ts) = if mainnet { + ( + BITCOIN_MAINNET_FIRST_BLOCK_HASH, + BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT, + BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP, + ) + } else { + ( + BITCOIN_TESTNET_FIRST_BLOCK_HASH, + BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT, + BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP, + ) + }; + let first_block_hash = BurnchainHeaderHash::from_hex(first_block_hash_hex).unwrap(); StacksBlockBuilder::first_pubkey_hash( 0, &FIRST_BURNCHAIN_CONSENSUS_HASH, - &FIRST_BURNCHAIN_BLOCK_HASH, - FIRST_BURNCHAIN_BLOCK_HEIGHT, - FIRST_BURNCHAIN_BLOCK_TIMESTAMP, + &first_block_hash, + first_block_height as u32, + first_block_ts as u64, &proof, pubkey_hash, ) @@ -1135,6 +1150,46 @@ impl StacksBlockBuilder { Ok(builder) } + /// Create a block builder for regtest mining + pub fn make_regtest_block_builder( + stacks_parent_header: &StacksHeaderInfo, + proof: VRFProof, + total_burn: u64, + pubkey_hash: Hash160, + ) -> Result { + let builder = if stacks_parent_header.consensus_hash == FIRST_BURNCHAIN_CONSENSUS_HASH { + let first_block_hash = + BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); + StacksBlockBuilder::first_pubkey_hash( + 0, + &FIRST_BURNCHAIN_CONSENSUS_HASH, + &first_block_hash, + BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32, + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64, + &proof, + pubkey_hash, + ) + } else { + // building off an existing stacks block + let new_work = StacksWorkScore { + burn: total_burn, + work: stacks_parent_header + .block_height + .checked_add(1) + .expect("FATAL: block height overflow"), + }; + + StacksBlockBuilder::from_parent_pubkey_hash( + 0, + stacks_parent_header, + &new_work, + &proof, + pubkey_hash, + ) + }; + Ok(builder) + } + /// Given access to the mempool, mine an anchored block with no more than the given execution cost. /// returns the assembled block, and the consumed execution budget. pub fn build_anchored_block( @@ -1170,6 +1225,7 @@ impl StacksBlockBuilder { let (mut chainstate, _) = chainstate_handle.reopen_limited(execution_budget)?; // used for processing a block up to the given limit let mut builder = StacksBlockBuilder::make_block_builder( + chainstate.mainnet, parent_stacks_header, proof, total_burn, @@ -6164,7 +6220,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - stx_transfer, + &stx_transfer, ) .unwrap(); } @@ -6302,7 +6358,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - stx_transfer, + &stx_transfer, ) .unwrap(); } @@ -6324,7 +6380,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - stx_transfer, + &stx_transfer, ) .unwrap(); } @@ -6506,7 +6562,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - stx_transfer, + &stx_transfer, ) .unwrap(); @@ -6524,7 +6580,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - contract_tx, + &contract_tx, ) .unwrap(); @@ -6541,7 +6597,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - stx_transfer, + &stx_transfer, ) .unwrap(); @@ -6691,7 +6747,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - contract_tx, + &contract_tx, ) .unwrap(); } @@ -6849,7 +6905,7 @@ pub mod test { chainstate, &parent_consensus_hash, &parent_header_hash, - contract_tx, + &contract_tx, ) .unwrap(); } diff --git a/src/core/mempool.rs b/src/core/mempool.rs index b033c0d5f4..4fcd3745d7 100644 --- a/src/core/mempool.rs +++ b/src/core/mempool.rs @@ -905,7 +905,7 @@ impl MemPoolDB { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, - tx: StacksTransaction, + tx: &StacksTransaction, do_admission_checks: bool, ) -> Result<(), MemPoolRejection> { test_debug!( @@ -960,7 +960,7 @@ impl MemPoolDB { mempool_tx .admitter .set_block(&block_hash, (*consensus_hash).clone()); - mempool_tx.admitter.will_admit_tx(chainstate, &tx, len)?; + mempool_tx.admitter.will_admit_tx(chainstate, tx, len)?; } MemPoolDB::try_add_tx( @@ -988,7 +988,7 @@ impl MemPoolDB { chainstate: &mut StacksChainState, consensus_hash: &ConsensusHash, block_hash: &BlockHeaderHash, - tx: StacksTransaction, + tx: &StacksTransaction, ) -> Result<(), MemPoolRejection> { let mut mempool_tx = self.tx_begin().map_err(MemPoolRejection::DBError)?; MemPoolDB::tx_submit( @@ -1020,7 +1020,7 @@ impl MemPoolDB { chainstate, consensus_hash, block_hash, - tx, + &tx, false, )?; mempool_tx.commit().map_err(MemPoolRejection::DBError)?; diff --git a/src/core/mod.rs b/src/core/mod.rs index b8a4200d2b..f29b56e0a7 100644 --- a/src/core/mod.rs +++ b/src/core/mod.rs @@ -37,6 +37,8 @@ pub const NETWORK_ID_TESTNET: u32 = 0xff000000; // default port pub const NETWORK_P2P_PORT: u16 = 6265; +// sliding burnchain window over which a miner's past block-commit payouts will be used to weight +// its current block-commit in a sortition pub const MINING_COMMITMENT_WINDOW: u8 = 6; /// The number of blocks which will share the block bonus @@ -50,9 +52,23 @@ pub const INITIAL_MINING_BONUS_WINDOW: u16 = 10_000; // first burnchain block hash // TODO: update once we know the true first burnchain block pub const FIRST_BURNCHAIN_CONSENSUS_HASH: ConsensusHash = ConsensusHash([0u8; 20]); -pub const FIRST_BURNCHAIN_BLOCK_HASH: BurnchainHeaderHash = BurnchainHeaderHash([0u8; 32]); -pub const FIRST_BURNCHAIN_BLOCK_HEIGHT: u32 = 0; -pub const FIRST_BURNCHAIN_BLOCK_TIMESTAMP: u64 = 0; + +// TODO: TO BE SET BY STACKS_V1_MINER_THRESHOLD +pub const BITCOIN_MAINNET_FIRST_BLOCK_HEIGHT: u64 = 661389; +pub const BITCOIN_MAINNET_FIRST_BLOCK_TIMESTAMP: u32 = 1607986994; +pub const BITCOIN_MAINNET_FIRST_BLOCK_HASH: &str = + "0000000000000000000351332d33cb67e4baa9bbe7808dc7c46517eb0b396e4b"; +pub const BITCOIN_MAINNET_INITIAL_REWARD_START_BLOCK: u64 = 651389; + +pub const BITCOIN_TESTNET_FIRST_BLOCK_HEIGHT: u64 = 1894315; +pub const BITCOIN_TESTNET_FIRST_BLOCK_TIMESTAMP: u32 = 1606093490; +pub const BITCOIN_TESTNET_FIRST_BLOCK_HASH: &str = + "000000000000003efa81a29f2ee638ca4d4928a073e68789bb06a4fc0b153653"; + +pub const BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT: u64 = 0; +pub const BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP: u32 = 0; +pub const BITCOIN_REGTEST_FIRST_BLOCK_HASH: &str = + "0000000000000000000000000000000000000000000000000000000000000000"; pub const FIRST_STACKS_BLOCK_HASH: BlockHeaderHash = BlockHeaderHash([0u8; 32]); pub const EMPTY_MICROBLOCK_PARENT_HASH: BlockHeaderHash = BlockHeaderHash([0u8; 32]); @@ -64,7 +80,7 @@ pub const CHAINSTATE_VERSION: &'static str = "23.0.0.0"; pub const MICROSTACKS_PER_STACKS: u32 = 1_000_000; -pub const POX_SUNSET_START: u64 = (FIRST_BURNCHAIN_BLOCK_HEIGHT as u64) + 100_000; +pub const POX_SUNSET_START: u64 = 100_000; pub const POX_SUNSET_END: u64 = POX_SUNSET_START + 400_000; pub const POX_PREPARE_WINDOW_LENGTH: u32 = 100; diff --git a/src/lib.rs b/src/lib.rs index f9ef371685..e56fc125de 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -72,6 +72,9 @@ pub mod vm; #[macro_use] pub mod chainstate; +#[cfg(test)] +extern crate stx_genesis; + pub mod address; pub mod burnchains; pub mod core; diff --git a/src/main.rs b/src/main.rs index c171ce91f7..240ff0dff7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -455,7 +455,7 @@ fn main() { &old_burnchaindb_path, first_burnchain_block_height, &first_burnchain_block_hash, - FIRST_BURNCHAIN_BLOCK_TIMESTAMP, + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP.into(), true, ) .unwrap(); @@ -468,6 +468,8 @@ fn main() { first_burnchain_block_timestamp: 0, get_bulk_initial_lockups: None, get_bulk_initial_balances: None, + get_bulk_initial_namespaces: None, + get_bulk_initial_names: None, }; let (mut new_chainstate, _) = StacksChainState::open_and_exec( diff --git a/src/net/atlas/db.rs b/src/net/atlas/db.rs index bf9c1ca1a3..1050c67d23 100644 --- a/src/net/atlas/db.rs +++ b/src/net/atlas/db.rs @@ -26,7 +26,7 @@ use chainstate::burn::{BlockHeaderHash, ConsensusHash}; use chainstate::stacks::StacksBlockId; use net::StacksMessageCodec; -use super::{Attachment, AttachmentInstance}; +use super::{AtlasConfig, Attachment, AttachmentInstance}; pub const ATLASDB_VERSION: &'static str = "23.0.0.0"; @@ -35,8 +35,11 @@ const ATLASDB_SETUP: &'static [&'static str] = &[ CREATE TABLE attachments( hash TEXT UNIQUE PRIMARY KEY, content BLOB NOT NULL, - was_instantiated INTEGER NOT NULL - );"#, + was_instantiated INTEGER NOT NULL, + created_at INTEGER NOT NULL + ); + CREATE INDEX index_was_instanciated ON attachments(was_instantiated); + "#, r#" CREATE TABLE attachment_instances( id INTEGER PRIMARY KEY AUTOINCREMENT, @@ -89,6 +92,7 @@ impl FromRow for AttachmentInstance { #[derive(Debug)] pub struct AtlasDB { + pub atlas_config: AtlasConfig, pub conn: Connection, pub readwrite: bool, } @@ -113,9 +117,32 @@ impl AtlasDB { Ok(()) } + pub fn should_keep_attachment( + &self, + contract_id: &QualifiedContractIdentifier, + attachment: &Attachment, + ) -> bool { + if !self.atlas_config.contracts.contains(contract_id) { + info!( + "Atlas: will discard posted attachment - {} not in supported contracts", + contract_id + ); + return false; + } + if attachment.content.len() as u32 > self.atlas_config.attachments_max_size { + info!("Atlas: will discard posted attachment - attachment too large"); + return false; + } + true + } + // Open the burn database at the given path. Open read-only or read/write. // If opened for read/write and it doesn't exist, instantiate it. - pub fn connect(path: &String, readwrite: bool) -> Result { + pub fn connect( + atlas_config: AtlasConfig, + path: &String, + readwrite: bool, + ) -> Result { let mut create_flag = false; let open_flags = if fs::metadata(path).is_err() { // need to create @@ -137,8 +164,9 @@ impl AtlasDB { Connection::open_with_flags(path, open_flags).map_err(|e| db_error::SqliteError(e))?; let mut db = AtlasDB { - conn: conn, - readwrite: readwrite, + atlas_config, + conn, + readwrite, }; if create_flag { db.instantiate()?; @@ -148,11 +176,11 @@ impl AtlasDB { // Open an atlas database in memory (used for testing) #[cfg(test)] - pub fn connect_memory() -> Result { + pub fn connect_memory(atlas_config: AtlasConfig) -> Result { let conn = Connection::open_in_memory().map_err(|e| db_error::SqliteError(e))?; - let mut db = AtlasDB { - conn: conn, + atlas_config, + conn, readwrite: true, }; @@ -231,13 +259,26 @@ impl AtlasDB { Ok(res) } - pub fn insert_new_attachment(&mut self, attachment: &Attachment) -> Result<(), db_error> { + pub fn insert_uninstantiated_attachment( + &mut self, + attachment: &Attachment, + ) -> Result<(), db_error> { + // Insert the new attachment + let uninstantiated_attachments = self.count_uninstantiated_attachments()?; + if uninstantiated_attachments >= self.atlas_config.max_uninstantiated_attachments { + let to_delete = + 1 + uninstantiated_attachments - self.atlas_config.max_uninstantiated_attachments; + self.evict_k_oldest_uninstantiated_attachments(to_delete)?; + } + let tx = self.tx_begin()?; + let now = util::get_epoch_time_secs() as i64; let res = tx.execute( - "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated) VALUES (?, ?, 0)", + "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 0, ?)", &[ &attachment.hash() as &dyn ToSql, &attachment.content as &dyn ToSql, + &now as &dyn ToSql, ], ); res.map_err(db_error::SqliteError)?; @@ -245,16 +286,49 @@ impl AtlasDB { Ok(()) } + pub fn evict_k_oldest_uninstantiated_attachments(&mut self, k: u32) -> Result<(), db_error> { + let tx = self.tx_begin()?; + let res = tx.execute( + "DELETE FROM attachments WHERE hash IN (SELECT hash FROM attachments WHERE was_instantiated = 0 ORDER BY created_at ASC LIMIT ?)", + &[&k as &dyn ToSql], + ); + res.map_err(db_error::SqliteError)?; + tx.commit().map_err(db_error::SqliteError)?; + Ok(()) + } + + pub fn evict_expired_uninstantiated_attachments(&mut self) -> Result<(), db_error> { + let now = util::get_epoch_time_secs() as i64; + let cut_off = now - self.atlas_config.uninstantiated_attachments_expire_after as i64; + let tx = self.tx_begin()?; + let res = tx.execute( + "DELETE FROM attachments WHERE was_instantiated = 0 AND created_at < ?", + &[&cut_off as &dyn ToSql], + ); + res.map_err(db_error::SqliteError)?; + tx.commit().map_err(db_error::SqliteError)?; + Ok(()) + } + + pub fn count_uninstantiated_attachments(&self) -> Result { + let qry = "SELECT COUNT(rowid) FROM attachments + WHERE was_instantiated = 0"; + let count = query_count(&self.conn, qry, NO_PARAMS)? as u32; + Ok(count) + } + pub fn insert_instantiated_attachment( &mut self, attachment: &Attachment, ) -> Result<(), db_error> { + let now = util::get_epoch_time_secs() as i64; let tx = self.tx_begin()?; tx.execute( - "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated) VALUES (?, ?, 1)", + "INSERT OR REPLACE INTO attachments (hash, content, was_instantiated, created_at) VALUES (?, ?, 1, ?)", &[ &attachment.hash() as &dyn ToSql, &attachment.content as &dyn ToSql, + &now as &dyn ToSql, ], ) .map_err(db_error::SqliteError)?; @@ -267,7 +341,7 @@ impl AtlasDB { Ok(()) } - pub fn find_new_attachment( + pub fn find_uninstantiated_attachment( &mut self, content_hash: &Hash160, ) -> Result, db_error> { @@ -290,19 +364,19 @@ impl AtlasDB { Ok(rows) } - pub fn find_instantiated_attachment( + pub fn find_attachment( &mut self, content_hash: &Hash160, ) -> Result, db_error> { let hex_content_hash = to_hex(&content_hash.0[..]); - let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 1" + let qry = "SELECT content, hash FROM attachments WHERE hash = ?1 AND was_instantiated = 0" .to_string(); let args = [&hex_content_hash as &dyn ToSql]; let row = query_row::(&self.conn, &qry, &args)?; Ok(row) } - pub fn insert_new_attachment_instance( + pub fn insert_uninstantiated_attachment_instance( &mut self, attachment: &AttachmentInstance, is_available: bool, diff --git a/src/net/atlas/download.rs b/src/net/atlas/download.rs index 97829af68c..bce3a86ecd 100644 --- a/src/net/atlas/download.rs +++ b/src/net/atlas/download.rs @@ -98,6 +98,9 @@ impl AttachmentsDownloader { .resolve_attachment(&attachment.hash()) } + // Every once in a while, we delete uninstantiated attachments + network.atlasdb.evict_expired_uninstantiated_attachments()?; + // Update reliability reports for (peer_url, report) in context.peers.drain() { self.reliability_reports.insert(peer_url, report); @@ -133,7 +136,7 @@ impl AttachmentsDownloader { if attachment_instance.content_hash == Hash160::empty() { // todo(ludo) insert or update ? atlasdb - .insert_new_attachment_instance(&attachment_instance, true) + .insert_uninstantiated_attachment_instance(&attachment_instance, true) .map_err(|e| net_error::DBError(e))?; debug!("Atlas: inserting and pairing new attachment instance with empty hash"); resolved_attachments.push(attachment_instance); @@ -141,11 +144,9 @@ impl AttachmentsDownloader { } // Do we already have a matching validated attachment - if let Ok(Some(_entry)) = - atlasdb.find_instantiated_attachment(&attachment_instance.content_hash) - { + if let Ok(Some(_entry)) = atlasdb.find_attachment(&attachment_instance.content_hash) { atlasdb - .insert_new_attachment_instance(&attachment_instance, true) + .insert_uninstantiated_attachment_instance(&attachment_instance, true) .map_err(|e| net_error::DBError(e))?; debug!( "Atlas: inserting and pairing new attachment instance to existing attachment" @@ -156,13 +157,13 @@ impl AttachmentsDownloader { // Do we already have a matching inboxed attachment if let Ok(Some(attachment)) = - atlasdb.find_new_attachment(&attachment_instance.content_hash) + atlasdb.find_uninstantiated_attachment(&attachment_instance.content_hash) { atlasdb .insert_instantiated_attachment(&attachment) .map_err(|e| net_error::DBError(e))?; atlasdb - .insert_new_attachment_instance(&attachment_instance, true) + .insert_uninstantiated_attachment_instance(&attachment_instance, true) .map_err(|e| net_error::DBError(e))?; debug!("Atlas: inserting and pairing new attachment instance to inboxed attachment, now validated"); resolved_attachments.push(attachment_instance); @@ -173,7 +174,7 @@ impl AttachmentsDownloader { // Let's append it to the batch being constructed in this routine. attachments_batch.track_attachment(&attachment_instance); atlasdb - .insert_new_attachment_instance(&attachment_instance, false) + .insert_uninstantiated_attachment_instance(&attachment_instance, false) .map_err(|e| net_error::DBError(e))?; } diff --git a/src/net/atlas/mod.rs b/src/net/atlas/mod.rs index 533411c328..826a891027 100644 --- a/src/net/atlas/mod.rs +++ b/src/net/atlas/mod.rs @@ -13,26 +13,23 @@ use net::StacksMessageCodec; use util::hash::{to_hex, Hash160, MerkleHashFunc}; use vm::types::{QualifiedContractIdentifier, SequenceData, TupleData, Value}; +use regex::Regex; use std::collections::{HashMap, HashSet}; use std::convert::TryFrom; use std::hash::{Hash, Hasher}; -pub const BNS_NAMESPACE_MIN_LEN: usize = 1; -pub const BNS_NAMESPACE_MAX_LEN: usize = 20; -pub const BNS_NAME_MIN_LEN: usize = 1; -pub const BNS_NAME_MAX_LEN: usize = 32; pub const MAX_ATTACHMENT_INV_PAGES_PER_REQUEST: usize = 8; lazy_static! { - pub static ref BNS_NAME_REGEX: String = format!( - r#"([a-z0-9]|[-_]){{{},{}}}\.([a-z0-9]|[-_]){{{},{}}}(\.([a-z0-9]|[-_]){{{},{}}})?"#, - BNS_NAMESPACE_MIN_LEN, BNS_NAMESPACE_MAX_LEN, BNS_NAME_MIN_LEN, BNS_NAME_MAX_LEN, 1, 128 - ); + pub static ref BNS_CHARS_REGEX: Regex = Regex::new("^([a-z0-9]|[-_])*$").unwrap(); } +#[derive(Debug, Clone)] pub struct AtlasConfig { pub contracts: HashSet, pub attachments_max_size: u32, + pub max_uninstantiated_attachments: u32, + pub uninstantiated_attachments_expire_after: u32, } impl AtlasConfig { @@ -42,6 +39,8 @@ impl AtlasConfig { AtlasConfig { contracts, attachments_max_size: 1_048_576, + max_uninstantiated_attachments: 10_000, + uninstantiated_attachments_expire_after: 3_600, } } } diff --git a/src/net/atlas/tests.rs b/src/net/atlas/tests.rs index 9c5be1edcc..b38a442e49 100644 --- a/src/net/atlas/tests.rs +++ b/src/net/atlas/tests.rs @@ -2,8 +2,9 @@ use super::download::{ AttachmentRequest, AttachmentsBatch, AttachmentsBatchStateContext, AttachmentsInventoryRequest, BatchedRequestsResult, ReliabilityReport, }; -use super::{Attachment, AttachmentInstance}; +use super::{AtlasConfig, AtlasDB, Attachment, AttachmentInstance}; use chainstate::burn::{BlockHeaderHash, ConsensusHash}; +use chainstate::stacks::boot::boot_code_id; use chainstate::stacks::db::StacksChainState; use chainstate::stacks::{StacksBlockHeader, StacksBlockId}; use net::connection::ConnectionOptions; @@ -11,13 +12,14 @@ use net::{ AttachmentPage, GetAttachmentsInvResponse, HttpResponseMetadata, HttpResponseType, HttpVersion, PeerHost, Requestable, }; +use std::collections::{BinaryHeap, HashMap, HashSet}; +use std::convert::TryFrom; +use std::thread; +use std::time; use util::hash::Hash160; use vm::representations::UrlString; use vm::types::QualifiedContractIdentifier; -use std::collections::{BinaryHeap, HashMap}; -use std::convert::TryFrom; - fn new_attachment_from(content: &str) -> Attachment { Attachment { content: content.as_bytes().to_vec(), @@ -682,9 +684,250 @@ fn test_downloader_context_attachment_requests() { } #[test] -fn test_downloader_dns_state_machine() {} +fn test_keep_uninstantiated_attachments() { + let bns_contract_id = boot_code_id("bns"); + let pox_contract_id = boot_code_id("pox"); + + let mut contracts = HashSet::new(); + contracts.insert(bns_contract_id.clone()); + + let atlas_config = AtlasConfig { + contracts, + attachments_max_size: 16, + max_uninstantiated_attachments: 10, + uninstantiated_attachments_expire_after: 10, + }; + + let atlas_db = AtlasDB::connect_memory(atlas_config).unwrap(); + + assert_eq!( + atlas_db.should_keep_attachment(&pox_contract_id, &new_attachment_from("facade02")), + false + ); + + assert_eq!( + atlas_db.should_keep_attachment(&bns_contract_id, &new_attachment_from("facade02")), + true + ); + + assert_eq!( + atlas_db.should_keep_attachment( + &bns_contract_id, + &new_attachment_from("facadefacadefacade02") + ), + false + ); +} #[test] -fn test_downloader_batched_requests_state_machine() {} +fn test_evict_k_oldest_uninstantiated_attachments() { + let atlas_config = AtlasConfig { + contracts: HashSet::new(), + attachments_max_size: 1024, + max_uninstantiated_attachments: 10, + uninstantiated_attachments_expire_after: 0, + }; -// todo(ludo): write tests around the fact that one hash can exist multiple inside the same fork as well. + let mut atlas_db = AtlasDB::connect_memory(atlas_config).unwrap(); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade00")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 1); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade01")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 2); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade02")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 3); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade02")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 3); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade03")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 4); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade04")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 5); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade05")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 6); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade06")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 7); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade07")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 8); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade08")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 9); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade09")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade10")) + .unwrap(); + // We reached `max_uninstantiated_attachments`. Eviction should start kicking in + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + // The latest attachment inserted should be available + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade10").hash()) + .unwrap() + .is_some(), + true + ); + // The first attachment inserted should be gone + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade00").hash()) + .unwrap() + .is_none(), + true + ); + // The second attachment inserted should be available + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade01").hash()) + .unwrap() + .is_some(), + true + ); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade11")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade12")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade13")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade14")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade15")) + .unwrap(); + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); + // The 5th attachment inserted should be gone + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade05").hash()) + .unwrap() + .is_none(), + true + ); + // The 6th attachment inserted should be available + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade06").hash()) + .unwrap() + .is_some(), + true + ); + // The latest attachment inserted should be available + assert_eq!( + atlas_db + .find_uninstantiated_attachment(&new_attachment_from("facade15").hash()) + .unwrap() + .is_some(), + true + ); +} + +#[test] +fn test_evict_expired_uninstantiated_attachments() { + let atlas_config = AtlasConfig { + contracts: HashSet::new(), + attachments_max_size: 1024, + max_uninstantiated_attachments: 100, + uninstantiated_attachments_expire_after: 10, + }; + + let mut atlas_db = AtlasDB::connect_memory(atlas_config).unwrap(); + + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade00")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade01")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade02")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade03")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade04")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade05")) + .unwrap(); + thread::sleep(time::Duration::from_secs(11)); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade06")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade07")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade08")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade09")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade10")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade11")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade12")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade13")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade14")) + .unwrap(); + atlas_db + .insert_uninstantiated_attachment(&new_attachment_from("facade15")) + .unwrap(); + // Count before eviction should be 16 + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 16); + atlas_db.evict_expired_uninstantiated_attachments().unwrap(); + // Count after eviction should be 10 + assert_eq!(atlas_db.count_uninstantiated_attachments().unwrap(), 10); +} diff --git a/src/net/chat.rs b/src/net/chat.rs index 24f32c7433..a5d3b6389c 100644 --- a/src/net/chat.rs +++ b/src/net/chat.rs @@ -2541,6 +2541,7 @@ mod test { consensus_hash_lifetime: 24, stable_confirmations: 7, first_block_height: 12300, + initial_reward_start_block: 12300, first_block_hash: first_burn_hash.clone(), first_block_timestamp: 0, pox_constants: PoxConstants::test_default(), diff --git a/src/net/http.rs b/src/net/http.rs index 879a484887..3034c62e22 100644 --- a/src/net/http.rs +++ b/src/net/http.rs @@ -35,7 +35,7 @@ use chainstate::burn::BlockHeaderHash; use chainstate::stacks::{ StacksAddress, StacksBlock, StacksBlockId, StacksMicroblock, StacksPublicKey, StacksTransaction, }; -use net::atlas::{Attachment, BNS_NAME_REGEX}; +use net::atlas::Attachment; use net::codec::{read_next, write_next}; use net::CallReadOnlyRequestBody; use net::ClientError; @@ -61,9 +61,9 @@ use net::UnconfirmedTransactionStatus; use net::HTTP_PREAMBLE_MAX_ENCODED_SIZE; use net::HTTP_PREAMBLE_MAX_NUM_HEADERS; use net::HTTP_REQUEST_ID_RESERVED; -use net::MAX_MESSAGE_LEN; use net::MAX_MICROBLOCKS_UNCONFIRMED; use net::{GetAttachmentResponse, GetAttachmentsInvResponse, PostTransactionRequestBody}; +use net::{MAX_MESSAGE_LEN, MAX_PAYLOAD_LEN}; use util::hash::hex_bytes; use util::hash::to_hex; @@ -2041,6 +2041,14 @@ impl HttpRequestType { )); } + if preamble.get_content_length() > MAX_PAYLOAD_LEN { + return Err(net_error::DeserializeError( + "Invalid Http request: PostTransaction body is too big".to_string(), + )); + } + + let mut bound_fd = BoundReader::from_reader(fd, preamble.get_content_length() as u64); + match preamble.content_type { None => { return Err(net_error::DeserializeError( @@ -2048,10 +2056,10 @@ impl HttpRequestType { )); } Some(HttpContentType::Bytes) => { - HttpRequestType::parse_posttransaction_octets(preamble, fd) + HttpRequestType::parse_posttransaction_octets(preamble, &mut bound_fd) } Some(HttpContentType::JSON) => { - HttpRequestType::parse_posttransaction_json(preamble, fd) + HttpRequestType::parse_posttransaction_json(preamble, &mut bound_fd) } _ => { return Err(net_error::DeserializeError( diff --git a/src/net/inv.rs b/src/net/inv.rs index 9ebddfe196..3e501d1843 100644 --- a/src/net/inv.rs +++ b/src/net/inv.rs @@ -1499,6 +1499,7 @@ impl PeerNetwork { return Ok(None); } + let ancestor_sn = self.get_ancestor_sortition_snapshot(sortdb, target_block_height)?; assert!( target_block_reward_cycle == 0 || self.burnchain.is_reward_cycle_start(target_block_height) @@ -1525,7 +1526,6 @@ impl PeerNetwork { }; assert!(num_blocks <= self.burnchain.pox_constants.reward_cycle_length as u64); - let ancestor_sn = self.get_ancestor_sortition_snapshot(sortdb, target_block_height)?; debug!( "{:?}: Send GetBlocksInv to {:?} for {} blocks at sortition block {} ({})", @@ -3039,8 +3039,8 @@ mod test { match reply { StacksMessageType::PoxInv(poxinv) => { - assert_eq!(poxinv.bitlen, 6); // 2 reward cycles we generated, plus 5 reward cycles when booted up (1 reward cycle = 5 blocks). 1st one is free - assert_eq!(poxinv.pox_bitvec, vec![0x3f]); + assert_eq!(poxinv.bitlen, 7); // 2 reward cycles we generated, plus 5 reward cycles when booted up (1 reward cycle = 5 blocks). 1st one is free + assert_eq!(poxinv.pox_bitvec, vec![0x7f]); } x => { error!("Did not get PoxInv, but got {:?}", &x); diff --git a/src/net/mod.rs b/src/net/mod.rs index 469b77725b..961e7a92c3 100644 --- a/src/net/mod.rs +++ b/src/net/mod.rs @@ -1014,14 +1014,15 @@ pub struct RPCPeerInfoData { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct RPCPoxInfoData { pub contract_id: String, - pub first_burnchain_block_height: u128, - pub min_amount_ustx: u128, - pub prepare_cycle_length: u128, - pub rejection_fraction: u128, - pub reward_cycle_id: u128, - pub reward_cycle_length: u128, - pub rejection_votes_left_required: u128, - pub total_liquid_supply_ustx: u128, + pub first_burnchain_block_height: u64, + pub min_amount_ustx: u64, + pub prepare_cycle_length: u64, + pub rejection_fraction: u64, + pub reward_cycle_id: u64, + pub reward_cycle_length: u64, + pub rejection_votes_left_required: u64, + pub total_liquid_supply_ustx: u64, + pub next_reward_cycle_in: u64, } #[derive(Debug, Clone, PartialEq, Copy, Hash)] @@ -2290,7 +2291,7 @@ pub mod test { .unwrap(); let atlasdb_path = format!("{}/atlas.db", &test_path); - let atlasdb = AtlasDB::connect(&atlasdb_path, true).unwrap(); + let atlasdb = AtlasDB::connect(AtlasConfig::default(), &atlasdb_path, true).unwrap(); let conf = config.clone(); let post_flight_callback = move |clarity_tx: &mut ClarityTx| { @@ -3063,7 +3064,17 @@ pub mod test { ) { Ok(recipients) => { block_commit_op.commit_outs = match recipients { - Some(info) => info.recipients.into_iter().map(|x| x.0).collect(), + Some(info) => { + let mut recipients = info + .recipients + .into_iter() + .map(|x| x.0) + .collect::>(); + if recipients.len() == 1 { + recipients.push(StacksAddress::burn_address(false)); + } + recipients + } None => vec![], }; test_debug!( diff --git a/src/net/p2p.rs b/src/net/p2p.rs index d08713c998..94700674e5 100644 --- a/src/net/p2p.rs +++ b/src/net/p2p.rs @@ -4140,7 +4140,7 @@ impl PeerNetwork { return false; } - if let Err(e) = mempool.submit(chainstate, consensus_hash, block_hash, tx) { + if let Err(e) = mempool.submit(chainstate, consensus_hash, block_hash, &tx) { info!("Reject transaction {}: {:?}", txid, &e; "txid" => %txid ); @@ -4298,6 +4298,7 @@ mod test { use super::*; use burnchains::burnchain::*; use burnchains::*; + use net::atlas::*; use net::codec::*; use net::db::*; use net::*; @@ -4366,6 +4367,7 @@ mod test { working_dir: "/nope".to_string(), consensus_hash_lifetime: 24, stable_confirmations: 7, + initial_reward_start_block: 50, first_block_height: 50, first_block_timestamp: 0, first_block_hash: first_burn_hash.clone(), @@ -4389,8 +4391,8 @@ mod test { initial_neighbors, ) .unwrap(); - - let atlasdb = AtlasDB::connect_memory().unwrap(); + let atlas_config = AtlasConfig::default(); + let atlasdb = AtlasDB::connect_memory(atlas_config).unwrap(); let local_peer = PeerDB::get_local_peer(db.conn()).unwrap(); let p2p = PeerNetwork::new( diff --git a/src/net/rpc.rs b/src/net/rpc.rs index a3af8b1ef5..efdb64d4ff 100644 --- a/src/net/rpc.rs +++ b/src/net/rpc.rs @@ -255,55 +255,60 @@ impl RPCPoxInfoData { .get("first-burnchain-block-height") .expect(&format!("FATAL: no 'first-burnchain-block-height'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let min_amount_ustx = res .get("min-amount-ustx") .expect(&format!("FATAL: no 'min-amount-ustx'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let prepare_cycle_length = res .get("prepare-cycle-length") .expect(&format!("FATAL: no 'prepare-cycle-length'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let rejection_fraction = res .get("rejection-fraction") .expect(&format!("FATAL: no 'rejection-fraction'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let reward_cycle_id = res .get("reward-cycle-id") .expect(&format!("FATAL: no 'reward-cycle-id'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let reward_cycle_length = res .get("reward-cycle-length") .expect(&format!("FATAL: no 'reward-cycle-length'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let current_rejection_votes = res .get("current-rejection-votes") .expect(&format!("FATAL: no 'current-rejection-votes'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let total_liquid_supply_ustx = res .get("total-liquid-supply-ustx") .expect(&format!("FATAL: no 'total-liquid-supply-ustx'")) .to_owned() - .expect_u128(); + .expect_u128() as u64; let total_required = total_liquid_supply_ustx .checked_div(rejection_fraction) .expect("FATAL: unable to compute total_liquid_supply_ustx/current_rejection_votes"); let rejection_votes_left_required = total_required.saturating_sub(current_rejection_votes); + let burnchain_tip = SortitionDB::get_canonical_burn_chain_tip(sortdb.conn())?; + + let next_reward_cycle_in = reward_cycle_length + - ((burnchain_tip.block_height - first_burnchain_block_height) % reward_cycle_length); + Ok(RPCPoxInfoData { contract_id: boot::boot_code_id("pox").to_string(), first_burnchain_block_height, @@ -314,6 +319,7 @@ impl RPCPoxInfoData { reward_cycle_length, rejection_votes_left_required, total_liquid_supply_ustx, + next_reward_cycle_in, }) } } @@ -673,7 +679,7 @@ impl ConversationHttp { content_hash: Hash160, ) -> Result<(), net_error> { let response_metadata = HttpResponseMetadata::from(req); - match atlasdb.find_instantiated_attachment(&content_hash) { + match atlasdb.find_attachment(&content_hash) { Ok(Some(attachment)) => { let content = GetAttachmentResponse { attachment }; let response = HttpResponseType::GetAttachment(response_metadata, content); @@ -1482,7 +1488,7 @@ impl ConversationHttp { false, ) } else { - match mempool.submit(chainstate, &consensus_hash, &block_hash, tx) { + match mempool.submit(chainstate, &consensus_hash, &block_hash, &tx) { Ok(_) => ( HttpResponseType::TransactionID(response_metadata, txid), true, @@ -1494,13 +1500,18 @@ impl ConversationHttp { } }; - if let Some(attachment) = attachment { - if accepted { - atlasdb - .insert_new_attachment(&attachment) - .map_err(|e| net_error::DBError(e))?; + if let Some(ref attachment) = attachment { + if let TransactionPayload::ContractCall(ref contract_call) = tx.payload { + if atlasdb + .should_keep_attachment(&contract_call.to_clarity_contract_id(), &attachment) + { + atlasdb + .insert_uninstantiated_attachment(attachment) + .map_err(|e| net_error::DBError(e))?; + } } } + response.send(http, fd).and_then(|_| Ok(accepted)) } @@ -2780,7 +2791,7 @@ mod test { } }; - let block_builder = StacksBlockBuilder::make_block_builder( + let block_builder = StacksBlockBuilder::make_regtest_block_builder( &parent_tip, vrf_proof, tip.total_burn, diff --git a/src/util/vrf.rs b/src/util/vrf.rs index dd64d10b38..be42a8a403 100644 --- a/src/util/vrf.rs +++ b/src/util/vrf.rs @@ -207,7 +207,7 @@ impl VRFPublicKey { } } - pub fn from_hex(h: &String) -> Option { + pub fn from_hex(h: &str) -> Option { match hex_bytes(h) { Ok(b) => VRF::check_public_key(&b), Err(_) => None, diff --git a/src/vm/analysis/type_checker/tests/mod.rs b/src/vm/analysis/type_checker/tests/mod.rs index 4e1b8091f7..538bbf639f 100644 --- a/src/vm/analysis/type_checker/tests/mod.rs +++ b/src/vm/analysis/type_checker/tests/mod.rs @@ -868,8 +868,9 @@ fn test_buff() { "(if true \"blockstack\" \"block\")", "(if true \"block\" \"blockstack\")", "(len \"blockstack\")", + "(len 0x)", ]; - let expected = ["(string-ascii 10)", "(string-ascii 10)", "uint"]; + let expected = ["(string-ascii 10)", "(string-ascii 10)", "uint", "uint"]; let bad = [ "(fold and (list true false) 2)", "(fold hash160 (list 1 2 3 4) 2)", diff --git a/src/vm/database/clarity_db.rs b/src/vm/database/clarity_db.rs index d4028e83e7..b5d526315d 100644 --- a/src/vm/database/clarity_db.rs +++ b/src/vm/database/clarity_db.rs @@ -53,8 +53,9 @@ use chainstate::burn::db::sortdb::{ }; use core::{ - FIRST_BURNCHAIN_BLOCK_HASH, FIRST_BURNCHAIN_BLOCK_HEIGHT, FIRST_BURNCHAIN_BLOCK_TIMESTAMP, - FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, POX_REWARD_CYCLE_LENGTH, + BITCOIN_REGTEST_FIRST_BLOCK_HASH, BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT, + BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP, FIRST_BURNCHAIN_CONSENSUS_HASH, FIRST_STACKS_BLOCK_HASH, + POX_REWARD_CYCLE_LENGTH, }; pub const STORE_CONTRACT_SRC_INTERFACE: bool = true; @@ -272,7 +273,9 @@ impl HeadersDB for NullHeadersDB { &FIRST_STACKS_BLOCK_HASH, ) { - Some(FIRST_BURNCHAIN_BLOCK_HASH) + let first_block_hash = + BurnchainHeaderHash::from_hex(BITCOIN_REGTEST_FIRST_BLOCK_HASH).unwrap(); + Some(first_block_hash) } else { None } @@ -302,7 +305,7 @@ impl HeadersDB for NullHeadersDB { &FIRST_STACKS_BLOCK_HASH, ) { - Some(FIRST_BURNCHAIN_BLOCK_TIMESTAMP) + Some(BITCOIN_REGTEST_FIRST_BLOCK_TIMESTAMP as u64) } else { None } @@ -314,7 +317,7 @@ impl HeadersDB for NullHeadersDB { &FIRST_STACKS_BLOCK_HASH, ) { - Some(FIRST_BURNCHAIN_BLOCK_HEIGHT) + Some(BITCOIN_REGTEST_FIRST_BLOCK_HEIGHT as u32) } else { None } @@ -688,9 +691,16 @@ impl<'a> ClarityDatabase<'a> { .into() } + /// Returns the total liquid ustx of the parent block + /// if we'd rather expose `total-liquid-ustx` as the _current_ block's + /// total liquid supply, then it needs to be tracked as a variable in the + /// clarity marf proper (rather than the headers db) pub fn get_total_liquid_ustx(&mut self) -> u128 { let cur_height = self.get_current_block_height(); - let cur_id_bhh = self.get_index_block_header_hash(cur_height); + if cur_height == 0 { + return 0; + } + let cur_id_bhh = self.get_index_block_header_hash(cur_height - 1); self.headers_db.get_total_liquid_ustx(&cur_id_bhh) } diff --git a/src/vm/tests/sequences.rs b/src/vm/tests/sequences.rs index bb7e9a5892..b8b5c6062c 100644 --- a/src/vm/tests/sequences.rs +++ b/src/vm/tests/sequences.rs @@ -713,6 +713,10 @@ fn test_buff_len() { let test1 = "(len \"blockstack\")"; let expected = Value::UInt(10); assert_eq!(expected, execute(test1).unwrap().unwrap()); + + let test2 = "(len 0x)"; + let expected = Value::UInt(0); + assert_eq!(expected, execute(test2).unwrap().unwrap()); } #[test] diff --git a/stx-genesis/src/lib.rs b/stx-genesis/src/lib.rs index 0583ea647f..eb71583116 100644 --- a/stx-genesis/src/lib.rs +++ b/stx-genesis/src/lib.rs @@ -21,20 +21,20 @@ pub struct GenesisAccountLockup { pub struct GenesisNamespace { pub namespace_id: String, - pub address: String, + pub importer: String, pub reveal_block: i64, pub ready_block: i64, pub buckets: String, - pub base: String, - pub coeff: String, - pub nonalpha_discount: String, - pub no_vowel_discount: String, - pub lifetime: String, + pub base: i64, + pub coeff: i64, + pub nonalpha_discount: i64, + pub no_vowel_discount: i64, + pub lifetime: i64, } pub struct GenesisName { - pub name: String, - pub address: String, + pub fully_qualified_name: String, + pub owner: String, pub registered_at: i64, pub expire_block: i64, pub zonefile_hash: String, @@ -114,23 +114,23 @@ fn read_lockups(deflate_bytes: &'static [u8]) -> Box Box> { let namespaces = iter_deflated_csv(deflate_bytes).map(|cols| GenesisNamespace { namespace_id: cols[0].to_string(), - address: cols[1].to_string(), + importer: cols[1].to_string(), reveal_block: cols[2].parse::().unwrap(), ready_block: cols[3].parse::().unwrap(), buckets: cols[4].to_string(), - base: cols[5].to_string(), - coeff: cols[6].to_string(), - nonalpha_discount: cols[7].to_string(), - no_vowel_discount: cols[8].to_string(), - lifetime: cols[9].to_string(), + base: cols[5].parse::().unwrap(), + coeff: cols[6].parse::().unwrap(), + nonalpha_discount: cols[7].parse::().unwrap(), + no_vowel_discount: cols[8].parse::().unwrap(), + lifetime: cols[9].parse::().unwrap(), }); return Box::new(namespaces); } fn read_names(deflate_bytes: &'static [u8]) -> Box> { let names = iter_deflated_csv(deflate_bytes).map(|cols| GenesisName { - name: cols[0].to_string(), - address: cols[1].to_string(), + fully_qualified_name: cols[0].to_string(), + owner: cols[1].to_string(), registered_at: cols[2].parse::().unwrap(), expire_block: cols[3].parse::().unwrap(), zonefile_hash: cols[4].to_string(), diff --git a/testnet/stacks-node/src/config.rs b/testnet/stacks-node/src/config.rs index dfbd1bf635..eb132b3c55 100644 --- a/testnet/stacks-node/src/config.rs +++ b/testnet/stacks-node/src/config.rs @@ -14,8 +14,11 @@ use stacks::util::secp256k1::Secp256k1PublicKey; use stacks::vm::costs::ExecutionCost; use stacks::vm::types::{AssetIdentifier, PrincipalData, QualifiedContractIdentifier}; -use super::neon_node::TESTNET_PEER_VERSION; -use super::node::TESTNET_CHAIN_ID; +pub const TESTNET_CHAIN_ID: u32 = 0x80000000; +pub const TESTNET_PEER_VERSION: u32 = 0xfacade01; + +pub const MAINNET_CHAIN_ID: u32 = 0x00000001; +pub const MAINNET_PEER_VERSION: u32 = 0x18000000; const MINIMUM_DUST_FEE: u64 = 5500; @@ -236,7 +239,7 @@ impl ConfigFile { rpc_port: Some(18332), peer_port: Some(18333), peer_host: Some("bitcoind.xenon.blockstack.org".to_string()), - magic_bytes: Some("X2".into()), + magic_bytes: Some("X3".into()), ..BurnchainConfigFile::default() }; @@ -273,6 +276,29 @@ impl ConfigFile { } } + pub fn mainnet() -> ConfigFile { + let burnchain = BurnchainConfigFile { + mode: Some("mainnet".to_string()), + rpc_port: Some(8332), + peer_port: Some(8333), + peer_host: Some("bitcoind.blockstack.org".to_string()), + ..BurnchainConfigFile::default() + }; + + let node = NodeConfigFile { + bootstrap_node: Some("047435c194e9b01b3d7f7a2802d6684a3af68d05bbf4ec8f17021980d777691f1d51651f7f1d566532c804da506c117bbf79ad62eea81213ba58f8808b4d9504ad@mainnet.blockstack.org:20444".to_string()), + miner: Some(false), + ..NodeConfigFile::default() + }; + + ConfigFile { + burnchain: Some(burnchain), + node: Some(node), + ustx_balance: None, + ..ConfigFile::default() + } + } + pub fn helium() -> ConfigFile { // ## Settings for local testnet, relying on a local bitcoind server // ## running with the following bitcoin.conf: @@ -377,10 +403,10 @@ pub const HELIUM_BLOCK_LIMIT: ExecutionCost = ExecutionCost { impl Config { pub fn from_config_file(config_file: ConfigFile) -> Config { let default_node_config = NodeConfig::default(); - let node = match config_file.node { + let (mut node, bootstrap_node, deny_nodes) = match config_file.node { Some(node) => { let rpc_bind = node.rpc_bind.unwrap_or(default_node_config.rpc_bind); - let mut node_config = NodeConfig { + let node_config = NodeConfig { name: node.name.unwrap_or(default_node_config.name), seed: match node.seed { Some(seed) => { @@ -423,13 +449,9 @@ impl Config { .unwrap_or(default_node_config.pox_sync_sample_secs), use_test_genesis_chainstate: node.use_test_genesis_chainstate, }; - node_config.set_bootstrap_node(node.bootstrap_node); - if let Some(deny_nodes) = node.deny_nodes { - node_config.set_deny_nodes(deny_nodes); - } - node_config + (node_config, node.bootstrap_node, node.deny_nodes) } - None => default_node_config, + None => (default_node_config, None, None), }; let default_burnchain_config = BurnchainConfig::default(); @@ -441,10 +463,21 @@ impl Config { burnchain.magic_bytes = ConfigFile::xenon().burnchain.unwrap().magic_bytes; } } + let burnchain_mode = burnchain.mode.unwrap_or(default_burnchain_config.mode); BurnchainConfig { chain: burnchain.chain.unwrap_or(default_burnchain_config.chain), - mode: burnchain.mode.unwrap_or(default_burnchain_config.mode), + chain_id: if &burnchain_mode == "mainnet" { + MAINNET_CHAIN_ID + } else { + TESTNET_CHAIN_ID + }, + peer_version: if &burnchain_mode == "mainnet" { + MAINNET_PEER_VERSION + } else { + TESTNET_PEER_VERSION + }, + mode: burnchain_mode.clone(), burn_fee_cap: burnchain .burn_fee_cap .unwrap_or(default_burnchain_config.burn_fee_cap), @@ -501,7 +534,9 @@ impl Config { None => default_burnchain_config, }; - let supported_modes = vec!["mocknet", "helium", "neon", "argon", "krypton", "xenon"]; + let supported_modes = vec![ + "mocknet", "helium", "neon", "argon", "krypton", "xenon", "mainnet", + ]; if !supported_modes.contains(&burnchain.mode.as_str()) { panic!( @@ -514,6 +549,11 @@ impl Config { panic!("Config is missing the setting `burnchain.local_mining_public_key` (mandatory for helium)") } + node.set_bootstrap_node(bootstrap_node, burnchain.chain_id, burnchain.peer_version); + if let Some(deny_nodes) = deny_nodes { + node.set_deny_nodes(deny_nodes, burnchain.chain_id, burnchain.peer_version); + } + let initial_balances: Vec = match config_file.ustx_balance { Some(balances) => balances .iter() @@ -773,6 +813,13 @@ impl Config { } total } + + pub fn is_mainnet(&self) -> bool { + match self.burnchain.mode.as_str() { + "mainnet" => true, + _ => false, + } + } } impl std::default::Default for Config { @@ -806,6 +853,8 @@ impl std::default::Default for Config { pub struct BurnchainConfig { pub chain: String, pub mode: String, + pub chain_id: u32, + pub peer_version: u32, pub commit_anchor_block_within: u64, pub burn_fee_cap: u64, pub peer_host: String, @@ -828,6 +877,8 @@ impl BurnchainConfig { BurnchainConfig { chain: "bitcoin".to_string(), mode: "mocknet".to_string(), + chain_id: TESTNET_CHAIN_ID, + peer_version: TESTNET_PEER_VERSION, burn_fee_cap: 20000, commit_anchor_block_within: 5000, peer_host: "0.0.0.0".to_string(), @@ -922,6 +973,7 @@ impl NodeConfig { let mut rng = rand::thread_rng(); let mut buf = [0u8; 8]; rng.fill_bytes(&mut buf); + let testnet_id = format!("stacks-testnet-{}", to_hex(&buf)); let rpc_port = 20443; @@ -964,11 +1016,16 @@ impl NodeConfig { format!("{}/spv-headers.dat", self.get_burnchain_path()) } - fn default_neighbor(addr: SocketAddr, pubk: Secp256k1PublicKey) -> Neighbor { + fn default_neighbor( + addr: SocketAddr, + pubk: Secp256k1PublicKey, + chain_id: u32, + peer_version: u32, + ) -> Neighbor { Neighbor { addr: NeighborKey { - peer_version: TESTNET_PEER_VERSION, - network_id: TESTNET_CHAIN_ID, + peer_version: peer_version, + network_id: chain_id, addrbytes: PeerAddress::from_socketaddr(&addr), port: addr.port(), }, @@ -984,7 +1041,12 @@ impl NodeConfig { } } - pub fn set_bootstrap_node(&mut self, bootstrap_node: Option) { + pub fn set_bootstrap_node( + &mut self, + bootstrap_node: Option, + chain_id: u32, + peer_version: u32, + ) { if let Some(bootstrap_node) = bootstrap_node { let comps: Vec<&str> = bootstrap_node.split("@").collect(); match comps[..] { @@ -994,7 +1056,8 @@ impl NodeConfig { let mut addrs_iter = peer_addr.to_socket_addrs().unwrap(); let sock_addr = addrs_iter.next().unwrap(); - let neighbor = NodeConfig::default_neighbor(sock_addr, pubk); + let neighbor = + NodeConfig::default_neighbor(sock_addr, pubk, chain_id, peer_version); self.bootstrap_node = Some(neighbor); } _ => {} @@ -1002,20 +1065,22 @@ impl NodeConfig { } } - pub fn add_deny_node(&mut self, deny_node: &str) { + pub fn add_deny_node(&mut self, deny_node: &str, chain_id: u32, peer_version: u32) { let sockaddr = deny_node.to_socket_addrs().unwrap().next().unwrap(); let neighbor = NodeConfig::default_neighbor( sockaddr, Secp256k1PublicKey::from_private(&Secp256k1PrivateKey::new()), + chain_id, + peer_version, ); self.deny_nodes.push(neighbor); } - pub fn set_deny_nodes(&mut self, deny_nodes: String) { + pub fn set_deny_nodes(&mut self, deny_nodes: String, chain_id: u32, peer_version: u32) { let parts: Vec<&str> = deny_nodes.split(",").collect(); for part in parts.into_iter() { if part.len() > 0 { - self.add_deny_node(&part); + self.add_deny_node(&part, chain_id, peer_version); } } } diff --git a/testnet/stacks-node/src/keychain.rs b/testnet/stacks-node/src/keychain.rs index dd78b678c5..dbcc7fc8c1 100644 --- a/testnet/stacks-node/src/keychain.rs +++ b/testnet/stacks-node/src/keychain.rs @@ -161,15 +161,19 @@ impl Keychain { } /// Given the keychain's secret keys, computes and returns the corresponding Stack address. - /// Note: Testnet bit is hardcoded. - pub fn get_address(&self) -> StacksAddress { + pub fn get_address(&self, is_mainnet: bool) -> StacksAddress { let public_keys = self .secret_keys .iter() .map(|ref pk| StacksPublicKey::from_private(pk)) .collect(); + let version = if is_mainnet { + self.hash_mode.to_version_mainnet() + } else { + self.hash_mode.to_version_testnet() + }; StacksAddress::from_public_keys( - self.hash_mode.to_version_testnet(), + version, &self.hash_mode, self.threshold as usize, &public_keys, @@ -177,9 +181,17 @@ impl Keychain { .unwrap() } - pub fn address_from_burnchain_signer(signer: &BurnchainSigner) -> StacksAddress { + pub fn address_from_burnchain_signer( + signer: &BurnchainSigner, + is_mainnet: bool, + ) -> StacksAddress { + let version = if is_mainnet { + signer.hash_mode.to_version_mainnet() + } else { + signer.hash_mode.to_version_testnet() + }; StacksAddress::from_public_keys( - signer.hash_mode.to_version_testnet(), + version, &signer.hash_mode, signer.num_sigs, &signer.public_keys, diff --git a/testnet/stacks-node/src/main.rs b/testnet/stacks-node/src/main.rs index 2b0347e874..ea0dcd07bc 100644 --- a/testnet/stacks-node/src/main.rs +++ b/testnet/stacks-node/src/main.rs @@ -102,6 +102,10 @@ fn main() { args.finish().unwrap(); ConfigFile::xenon() } + "mainnet" => { + args.finish().unwrap(); + ConfigFile::mainnet() + } "start" => { let config_path: String = args.value_from_str("--config").unwrap(); args.finish().unwrap(); @@ -139,9 +143,9 @@ fn main() { return; } } else if conf.burnchain.mode == "neon" - || conf.burnchain.mode == "argon" - || conf.burnchain.mode == "krypton" || conf.burnchain.mode == "xenon" + || conf.burnchain.mode == "krypton" + || conf.burnchain.mode == "mainnet" { let mut run_loop = neon::RunLoop::new(conf); run_loop.start(num_round, None); @@ -163,6 +167,8 @@ stacks-node SUBCOMMANDS: +mainnet\t\tStart a node that will join and stream blocks from the public mainnet. + mocknet\t\tStart a node based on a fast local setup emulating a burnchain. Ideal for smart contract development. helium\t\tStart a node based on a local setup relying on a local instance of bitcoind. @@ -174,10 +180,6 @@ helium\t\tStart a node based on a local setup relying on a local instance of bit \t\t rpcuser=helium \t\t rpcpassword=helium -argon\t\tStart a node that will join and stream blocks from the public argon testnet, powered by Blockstack (Proof of Burn). - -krypton\t\tStart a node that will join and stream blocks from the public krypton testnet, powered by Blockstack via (Proof of Transfer). - xenon\t\tStart a node that will join and stream blocks from the public xenon testnet, decentralized. start\t\tStart a node with a config of your own. Can be used for joining a network, starting new chain, etc. diff --git a/testnet/stacks-node/src/neon_node.rs b/testnet/stacks-node/src/neon_node.rs index 53117c0bf3..a2683f7537 100644 --- a/testnet/stacks-node/src/neon_node.rs +++ b/testnet/stacks-node/src/neon_node.rs @@ -29,7 +29,7 @@ use stacks::chainstate::stacks::{ }; use stacks::core::mempool::MemPoolDB; use stacks::net::{ - atlas::{AtlasDB, AttachmentInstance}, + atlas::{AtlasConfig, AtlasDB, AttachmentInstance}, db::{LocalPeer, PeerDB}, dns::DNSResolver, p2p::PeerNetwork, @@ -58,8 +58,6 @@ use stacks::chainstate::coordinator::{get_next_recipients, OnChainRewardSetProvi use stacks::monitoring::{increment_stx_blocks_mined_counter, update_active_miners_count_gauge}; -pub const TESTNET_CHAIN_ID: u32 = 0x80000000; -pub const TESTNET_PEER_VERSION: u32 = 0xfacade01; pub const RELAYER_MAX_BUFFER: usize = 100; struct AssembledAnchorBlock { @@ -87,12 +85,14 @@ enum RelayerDirective { } pub struct InitializedNeonNode { + config: Config, relay_channel: SyncSender, burnchain_signer: BurnchainSigner, last_burn_block: Option, active_keys: Vec, sleep_before_tenure: u64, is_miner: bool, + pub atlas_config: AtlasConfig, } pub struct NeonGenesisNode { @@ -149,16 +149,26 @@ fn inner_process_tenure( Ok(true) } -fn inner_generate_coinbase_tx(keychain: &mut Keychain, nonce: u64) -> StacksTransaction { +fn inner_generate_coinbase_tx( + keychain: &mut Keychain, + nonce: u64, + is_mainnet: bool, + chain_id: u32, +) -> StacksTransaction { let mut tx_auth = keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; let mut tx = StacksTransaction::new( - TransactionVersion::Testnet, + version, tx_auth, TransactionPayload::Coinbase(CoinbasePayload([0u8; 32])), ); - tx.chain_id = TESTNET_CHAIN_ID; + tx.chain_id = chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx); keychain.sign_as_origin(&mut tx_signer); @@ -170,12 +180,19 @@ fn inner_generate_poison_microblock_tx( keychain: &mut Keychain, nonce: u64, poison_payload: TransactionPayload, + is_mainnet: bool, + chain_id: u32, ) -> StacksTransaction { let mut tx_auth = keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(nonce); - let mut tx = StacksTransaction::new(TransactionVersion::Testnet, tx_auth, poison_payload); - tx.chain_id = TESTNET_CHAIN_ID; + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; + let mut tx = StacksTransaction::new(version, tx_auth, poison_payload); + tx.chain_id = chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx); keychain.sign_as_origin(&mut tx_signer); @@ -202,6 +219,7 @@ fn inner_generate_leader_key_register_op( } fn rotate_vrf_and_register( + is_mainnet: bool, keychain: &mut Keychain, burn_block: &BlockSnapshot, btc_controller: &mut BitcoinRegtestController, @@ -209,7 +227,7 @@ fn rotate_vrf_and_register( let vrf_pk = keychain.rotate_vrf_keypair(burn_block.block_height); let burnchain_tip_consensus_hash = &burn_block.consensus_hash; let op = inner_generate_leader_key_register_op( - keychain.get_address(), + keychain.get_address(is_mainnet), vrf_pk, burnchain_tip_consensus_hash, ); @@ -332,7 +350,7 @@ fn try_mine_microblock( chainstate: &mut StacksChainState, sortdb: &SortitionDB, mem_pool: &MemPoolDB, - coord_comms: &CoordinatorChannels, + _coord_comms: &CoordinatorChannels, miner_tip_arc: Arc>>, ) -> Result, NetError> { let mut next_microblock = None; @@ -422,6 +440,7 @@ fn try_mine_microblock( } fn spawn_peer( + is_mainnet: bool, mut this: PeerNetwork, p2p_sock: &SocketAddr, rpc_sock: &SocketAddr, @@ -443,15 +462,19 @@ fn spawn_peer( let sortdb = SortitionDB::open(&burn_db_path, false).map_err(NetError::DBError)?; let (mut chainstate, _) = StacksChainState::open_with_block_limit( - false, - TESTNET_CHAIN_ID, + is_mainnet, + config.burnchain.chain_id, &stacks_chainstate_path, block_limit, ) .map_err(|e| NetError::ChainstateError(e.to_string()))?; - let mut mem_pool = MemPoolDB::open(false, TESTNET_CHAIN_ID, &stacks_chainstate_path) - .map_err(NetError::DBError)?; + let mut mem_pool = MemPoolDB::open( + is_mainnet, + config.burnchain.chain_id, + &stacks_chainstate_path, + ) + .map_err(NetError::DBError)?; // buffer up blocks to store without stalling the p2p thread let mut results_with_data = VecDeque::new(); @@ -608,6 +631,8 @@ fn spawn_peer( } fn spawn_miner_relayer( + is_mainnet: bool, + chain_id: u32, mut relayer: Relayer, local_peer: LocalPeer, config: Config, @@ -630,14 +655,14 @@ fn spawn_miner_relayer( let mut sortdb = SortitionDB::open(&burn_db_path, true).map_err(NetError::DBError)?; let (mut chainstate, _) = StacksChainState::open_with_block_limit( - false, - TESTNET_CHAIN_ID, + is_mainnet, + chain_id, &stacks_chainstate_path, config.block_limit.clone(), ) .map_err(|e| NetError::ChainstateError(e.to_string()))?; - let mut mem_pool = MemPoolDB::open(false, TESTNET_CHAIN_ID, &stacks_chainstate_path) + let mut mem_pool = MemPoolDB::open(is_mainnet, chain_id, &stacks_chainstate_path) .map_err(NetError::DBError)?; let mut last_mined_blocks: HashMap< @@ -834,6 +859,7 @@ fn spawn_miner_relayer( continue; } did_register_key = rotate_vrf_and_register( + is_mainnet, &mut keychain, last_burn_block, &mut bitcoin_controller, @@ -880,6 +906,7 @@ impl InitializedNeonNode { sync_comms: PoxSyncWatchdogComms, burnchain: Burnchain, attachments_rx: Receiver>, + atlas_config: AtlasConfig, ) -> InitializedNeonNode { // we can call _open_ here rather than _connect_, since connect is first called in // make_genesis_block @@ -932,7 +959,7 @@ impl InitializedNeonNode { let mut peerdb = PeerDB::connect( &config.get_peer_db_path(), true, - TESTNET_CHAIN_ID, + config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), config.connection_options.private_key_lifetime.clone(), @@ -959,7 +986,7 @@ impl InitializedNeonNode { } tx.commit().unwrap(); } - let atlasdb = AtlasDB::connect(&config.get_atlas_db_path(), true).unwrap(); + let atlasdb = AtlasDB::connect(atlas_config, &config.get_atlas_db_path(), true).unwrap(); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, @@ -971,7 +998,7 @@ impl InitializedNeonNode { peerdb, atlasdb, local_peer.clone(), - TESTNET_PEER_VERSION, + config.burnchain.peer_version, burnchain.clone(), view, config.connection_options.clone(), @@ -990,6 +1017,8 @@ impl InitializedNeonNode { let miner_tip_arc = Arc::new(Mutex::new(None)); spawn_miner_relayer( + config.is_mainnet(), + config.burnchain.chain_id, relayer, local_peer, config.clone(), @@ -1006,6 +1035,7 @@ impl InitializedNeonNode { .expect("Failed to initialize mine/relay thread"); spawn_peer( + config.is_mainnet(), p2p_net, &p2p_sock, &rpc_sock, @@ -1027,14 +1057,16 @@ impl InitializedNeonNode { let is_miner = miner; let active_keys = vec![]; - + let atlas_config = AtlasConfig::default(); InitializedNeonNode { + config: config.clone(), relay_channel: relay_send, last_burn_block, burnchain_signer, is_miner, sleep_before_tenure, active_keys, + atlas_config, } } @@ -1362,7 +1394,12 @@ impl InitializedNeonNode { let mblock_pubkey_hash = Hash160::from_node_public_key(&StacksPublicKey::from_private(µblock_secret_key)); - let coinbase_tx = inner_generate_coinbase_tx(keychain, coinbase_nonce); + let coinbase_tx = inner_generate_coinbase_tx( + keychain, + coinbase_nonce, + config.is_mainnet(), + config.burnchain.chain_id, + ); // find the longest microblock tail we can build off of let microblock_info_opt = @@ -1404,6 +1441,8 @@ impl InitializedNeonNode { keychain, coinbase_nonce + 1, poison_payload, + config.is_mainnet(), + config.burnchain.chain_id, ); // submit the poison payload, privately, so we'll mine it when building the @@ -1412,7 +1451,7 @@ impl InitializedNeonNode { chain_state, &parent_consensus_hash, &stacks_parent_header.anchored_header.block_hash(), - poison_microblock_tx, + &poison_microblock_tx, ) { warn!( "Detected but failed to mine poison-microblock transaction: {:?}", @@ -1473,9 +1512,9 @@ impl InitializedNeonNode { let commit_outs = if burn_block.block_height + 1 < burnchain.pox_constants.sunset_end && !burnchain.is_in_prepare_phase(burn_block.block_height + 1) { - RewardSetInfo::into_commit_outs(recipients, false) + RewardSetInfo::into_commit_outs(recipients, config.is_mainnet()) } else { - vec![StacksAddress::burn_address(false)] + vec![StacksAddress::burn_address(config.is_mainnet())] }; // let's commit @@ -1542,12 +1581,14 @@ impl InitializedNeonNode { update_active_miners_count_gauge(block_commits.len() as i64); + let (_, network) = self.config.burnchain.get_bitcoin_network(); + for op in block_commits.into_iter() { if op.txid == block_snapshot.winning_block_txid { info!( "Received burnchain block #{} including block_commit_op (winning) - {} ({})", block_height, - op.apparent_sender.to_testnet_address(), + op.apparent_sender.to_address(network), &op.block_header_hash ); last_sortitioned_block = Some((block_snapshot.clone(), op.vtxindex)); @@ -1556,7 +1597,7 @@ impl InitializedNeonNode { info!( "Received burnchain block #{} including block_commit_op - {} ({})", block_height, - op.apparent_sender.to_testnet_address(), + op.apparent_sender.to_address(network), &op.block_header_hash ); } @@ -1567,6 +1608,11 @@ impl InitializedNeonNode { SortitionDB::get_leader_keys_by_block(&ic, &block_snapshot.sortition_id) .expect("Unexpected SortitionDB error fetching key registers"); + let node_address = Keychain::address_from_burnchain_signer( + &self.burnchain_signer, + self.config.is_mainnet(), + ); + for op in key_registers.into_iter() { if self.is_miner { info!( @@ -1574,7 +1620,7 @@ impl InitializedNeonNode { block_height, op.address ); } - if op.address == Keychain::address_from_burnchain_signer(&self.burnchain_signer) { + if op.address == node_address { if !ibd { // not in initial block download, so we're not just replaying an old key. // Registered key has been mined @@ -1614,8 +1660,8 @@ impl NeonGenesisNode { // do the initial open! let (_chain_state, receipts) = match StacksChainState::open_and_exec( - false, - TESTNET_CHAIN_ID, + config.is_mainnet(), + config.burnchain.chain_id, &config.get_chainstate_path(), Some(&mut boot_data), config.block_limit.clone(), @@ -1645,6 +1691,7 @@ impl NeonGenesisNode { coord_comms: CoordinatorChannels, sync_comms: PoxSyncWatchdogComms, attachments_rx: Receiver>, + atlas_config: AtlasConfig, ) -> InitializedNeonNode { let config = self.config; let keychain = self.keychain; @@ -1661,6 +1708,7 @@ impl NeonGenesisNode { sync_comms, self.burnchain, attachments_rx, + atlas_config, ) } @@ -1671,6 +1719,7 @@ impl NeonGenesisNode { coord_comms: CoordinatorChannels, sync_comms: PoxSyncWatchdogComms, attachments_rx: Receiver>, + atlas_config: AtlasConfig, ) -> InitializedNeonNode { let config = self.config; let keychain = self.keychain; @@ -1687,6 +1736,7 @@ impl NeonGenesisNode { sync_comms, self.burnchain, attachments_rx, + atlas_config, ) } } diff --git a/testnet/stacks-node/src/node.rs b/testnet/stacks-node/src/node.rs index 57a79b9101..479b93d03b 100644 --- a/testnet/stacks-node/src/node.rs +++ b/testnet/stacks-node/src/node.rs @@ -24,12 +24,18 @@ use stacks::chainstate::stacks::{ }; use stacks::core::mempool::MemPoolDB; use stacks::net::{ - atlas::AtlasDB, db::PeerDB, p2p::PeerNetwork, rpc::RPCHandlerArgs, Error as NetError, - PeerAddress, + atlas::{AtlasConfig, AtlasDB}, + db::PeerDB, + p2p::PeerNetwork, + rpc::RPCHandlerArgs, + Error as NetError, PeerAddress, }; use stacks::{ burnchains::{Burnchain, BurnchainHeaderHash, Txid}, - chainstate::stacks::db::{ChainstateAccountBalance, ChainstateAccountLockup}, + chainstate::stacks::db::{ + ChainstateAccountBalance, ChainstateAccountLockup, ChainstateBNSName, + ChainstateBNSNamespace, + }, }; use stacks::chainstate::stacks::index::TrieHash; @@ -39,9 +45,6 @@ use stacks::util::secp256k1::Secp256k1PrivateKey; use stacks::util::strings::UrlString; use stacks::util::vrf::VRFPublicKey; -pub const TESTNET_CHAIN_ID: u32 = 0x80000000; -pub const TESTNET_PEER_VERSION: u32 = 0xfacade01; - #[derive(Debug, Clone)] pub struct ChainTip { pub metadata: StacksHeaderInfo, @@ -111,7 +114,44 @@ pub fn get_account_balances( ) } +pub fn get_namespaces( + use_test_chainstate_data: bool, +) -> Box> { + Box::new( + stx_genesis::GenesisData::new(use_test_chainstate_data) + .read_namespaces() + .map(|item| ChainstateBNSNamespace { + namespace_id: item.namespace_id, + importer: item.importer, + revealed_at: item.reveal_block as u64, + launched_at: item.ready_block as u64, + buckets: item.buckets, + base: item.base as u64, + coeff: item.coeff as u64, + nonalpha_discount: item.nonalpha_discount as u64, + no_vowel_discount: item.no_vowel_discount as u64, + lifetime: item.lifetime as u64, + }), + ) +} + +pub fn get_names(use_test_chainstate_data: bool) -> Box> { + Box::new( + stx_genesis::GenesisData::new(use_test_chainstate_data) + .read_names() + .map(|item| ChainstateBNSName { + fully_qualified_name: item.fully_qualified_name, + owner: item.owner, + registered_at: item.registered_at as u64, + expired_at: item.expire_block as u64, + zonefile_hash: item.zonefile_hash, + }), + ) +} + fn spawn_peer( + is_mainnet: bool, + chain_id: u32, mut this: PeerNetwork, p2p_sock: &SocketAddr, rpc_sock: &SocketAddr, @@ -140,7 +180,7 @@ fn spawn_peer( } }; let (mut chainstate, _) = - match StacksChainState::open(false, TESTNET_CHAIN_ID, &stacks_chainstate_path) { + match StacksChainState::open(is_mainnet, chain_id, &stacks_chainstate_path) { Ok(x) => x, Err(e) => { warn!("Error while connecting chainstate db in peer loop: {}", e); @@ -149,15 +189,15 @@ fn spawn_peer( } }; - let mut mem_pool = - match MemPoolDB::open(false, TESTNET_CHAIN_ID, &stacks_chainstate_path) { - Ok(x) => x, - Err(e) => { - warn!("Error while connecting to mempool db in peer loop: {}", e); - thread::sleep(time::Duration::from_secs(1)); - continue; - } - }; + let mut mem_pool = match MemPoolDB::open(is_mainnet, chain_id, &stacks_chainstate_path) + { + Ok(x) => x, + Err(e) => { + warn!("Error while connecting to mempool db in peer loop: {}", e); + thread::sleep(time::Duration::from_secs(1)); + continue; + } + }; let mut attachments = HashSet::new(); let net_result = this .run( @@ -218,11 +258,15 @@ impl Node { get_bulk_initial_balances: Some(Box::new(move || { get_account_balances(use_test_genesis_data) })), + get_bulk_initial_namespaces: Some(Box::new(move || { + get_namespaces(use_test_genesis_data) + })), + get_bulk_initial_names: Some(Box::new(move || get_names(use_test_genesis_data))), }; let chain_state_result = StacksChainState::open_and_exec( - false, - TESTNET_CHAIN_ID, + config.is_mainnet(), + config.burnchain.chain_id, &config.get_chainstate_path(), Some(&mut boot_data), config.block_limit.clone(), @@ -275,11 +319,14 @@ impl Node { let chainstate_path = config.get_chainstate_path(); let sortdb_path = config.get_burn_db_file_path(); - let (chain_state, _) = - match StacksChainState::open(false, TESTNET_CHAIN_ID, &chainstate_path) { - Ok(x) => x, - Err(_e) => panic!(), - }; + let (chain_state, _) = match StacksChainState::open( + config.is_mainnet(), + config.burnchain.chain_id, + &chainstate_path, + ) { + Ok(x) => x, + Err(_e) => panic!(), + }; let mut node = Node { active_registered_key: None, @@ -372,7 +419,7 @@ impl Node { let mut peerdb = PeerDB::connect( &self.config.get_peer_db_path(), true, - TESTNET_CHAIN_ID, + self.config.burnchain.chain_id, burnchain.network_id, Some(node_privkey), self.config.connection_options.private_key_lifetime.clone(), @@ -399,7 +446,9 @@ impl Node { } tx.commit().unwrap(); } - let atlasdb = AtlasDB::connect(&self.config.get_peer_db_path(), true).unwrap(); + let atlas_config = AtlasConfig::default(); + let atlasdb = + AtlasDB::connect(atlas_config, &self.config.get_peer_db_path(), true).unwrap(); let local_peer = match PeerDB::get_local_peer(peerdb.conn()) { Ok(local_peer) => local_peer, @@ -413,12 +462,14 @@ impl Node { peerdb, atlasdb, local_peer, - TESTNET_PEER_VERSION, + self.config.burnchain.peer_version, burnchain, view, self.config.connection_options.clone(), ); let _join_handle = spawn_peer( + self.config.is_mainnet(), + self.config.burnchain.chain_id, p2p_net, &p2p_sock, &rpc_sock, @@ -457,11 +508,12 @@ impl Node { let mut last_sortitioned_block = None; let mut won_sortition = false; let ops = &burnchain_tip.state_transition.accepted_ops; + let is_mainnet = self.config.is_mainnet(); for op in ops.iter() { match op { BlockstackOperationType::LeaderKeyRegister(ref op) => { - if op.address == self.keychain.get_address() { + if op.address == self.keychain.get_address(is_mainnet) { // Registered key has been mined new_key = Some(RegisteredKey { vrf_public_key: op.public_key.clone(), @@ -565,12 +617,16 @@ impl Node { }, }; - let mem_pool = MemPoolDB::open(false, TESTNET_CHAIN_ID, &self.chain_state.root_path) - .expect("FATAL: failed to open mempool"); + let mem_pool = MemPoolDB::open( + self.config.is_mainnet(), + self.config.burnchain.chain_id, + &self.chain_state.root_path, + ) + .expect("FATAL: failed to open mempool"); // Construct the coinbase transaction - 1st txn that should be handled and included in // the upcoming tenure. - let coinbase_tx = self.generate_coinbase_tx(); + let coinbase_tx = self.generate_coinbase_tx(self.config.is_mainnet()); let burn_fee_cap = self.config.burnchain.burn_fee_cap; @@ -746,7 +802,7 @@ impl Node { /// Returns the Stacks address of the node pub fn get_address(&self) -> StacksAddress { - self.keychain.get_address() + self.keychain.get_address(self.config.is_mainnet()) } /// Constructs and returns a LeaderKeyRegisterOp out of the provided params @@ -758,7 +814,7 @@ impl Node { BlockstackOperationType::LeaderKeyRegister(LeaderKeyRegisterOp { public_key: vrf_public_key, memo: vec![], - address: self.keychain.get_address(), + address: self.keychain.get_address(self.config.is_mainnet()), consensus_hash: consensus_hash.clone(), vtxindex: 0, txid: Txid([0u8; 32]), @@ -798,9 +854,9 @@ impl Node { < burnchain.pox_constants.sunset_end && !burnchain.is_in_prepare_phase(burnchain_tip.block_snapshot.block_height + 1) { - RewardSetInfo::into_commit_outs(None, false) + RewardSetInfo::into_commit_outs(None, self.config.is_mainnet()) } else { - vec![StacksAddress::burn_address(false)] + vec![StacksAddress::burn_address(self.config.is_mainnet())] }; let burn_parent_modulus = (burnchain_tip.block_snapshot.block_height % BURN_BLOCK_MINED_AT_MODULUS) as u8; @@ -827,16 +883,21 @@ impl Node { } // Constructs a coinbase transaction - fn generate_coinbase_tx(&mut self) -> StacksTransaction { + fn generate_coinbase_tx(&mut self, is_mainnet: bool) -> StacksTransaction { let mut tx_auth = self.keychain.get_transaction_auth().unwrap(); tx_auth.set_origin_nonce(self.nonce); + let version = if is_mainnet { + TransactionVersion::Mainnet + } else { + TransactionVersion::Testnet + }; let mut tx = StacksTransaction::new( - TransactionVersion::Testnet, + version, tx_auth, TransactionPayload::Coinbase(CoinbasePayload([0u8; 32])), ); - tx.chain_id = TESTNET_CHAIN_ID; + tx.chain_id = self.config.burnchain.chain_id; tx.anchor_mode = TransactionAnchorMode::OnChainOnly; let mut tx_signer = StacksTransactionSigner::new(&tx); self.keychain.sign_as_origin(&mut tx_signer); diff --git a/testnet/stacks-node/src/run_loop/neon.rs b/testnet/stacks-node/src/run_loop/neon.rs index 8492dc045d..a3920e0df2 100644 --- a/testnet/stacks-node/src/run_loop/neon.rs +++ b/testnet/stacks-node/src/run_loop/neon.rs @@ -1,7 +1,6 @@ use crate::{ genesis_data::USE_TEST_GENESIS_CHAINSTATE, - neon_node, - node::{get_account_balances, get_account_lockups}, + node::{get_account_balances, get_account_lockups, get_names, get_namespaces}, BitcoinRegtestController, BurnchainController, Config, EventDispatcher, Keychain, NeonGenesisNode, }; @@ -15,6 +14,7 @@ use stacks::chainstate::coordinator::{ }; use stacks::chainstate::stacks::boot::STACKS_BOOT_CODE_CONTRACT_ADDRESS_STR; use stacks::chainstate::stacks::db::{ChainStateBootData, ClarityTx, StacksChainState}; +use stacks::net::atlas::AtlasConfig; use stacks::vm::types::{PrincipalData, QualifiedContractIdentifier, Value}; use std::cmp; use std::sync::mpsc::sync_channel; @@ -108,11 +108,14 @@ impl RunLoop { let is_miner = if self.config.node.miner { let keychain = Keychain::default(self.config.node.seed.clone()); + let node_address = Keychain::address_from_burnchain_signer( + &keychain.get_burnchain_signer(), + self.config.is_mainnet(), + ); let btc_addr = BitcoinAddress::from_bytes( self.config.burnchain.get_bitcoin_network().1, BitcoinAddressType::PublicKeyHash, - &Keychain::address_from_burnchain_signer(&keychain.get_burnchain_signer()) - .to_bytes(), + &node_address.to_bytes(), ) .unwrap(); info!("Miner node: checking UTXOs at address: {}", btc_addr); @@ -141,8 +144,8 @@ impl RunLoop { } }; - let mainnet = false; - let chainid = neon_node::TESTNET_CHAIN_ID; + let mainnet = self.config.is_mainnet(); + let chainid = self.config.burnchain.chain_id; let block_limit = self.config.block_limit.clone(); let initial_balances = self .config @@ -205,6 +208,10 @@ impl RunLoop { get_bulk_initial_balances: Some(Box::new(|| { get_account_balances(USE_TEST_GENESIS_CHAINSTATE) })), + get_bulk_initial_namespaces: Some(Box::new(|| { + get_namespaces(USE_TEST_GENESIS_CHAINSTATE) + })), + get_bulk_initial_names: Some(Box::new(|| get_names(USE_TEST_GENESIS_CHAINSTATE))), }; let (chain_state_db, receipts) = StacksChainState::open_and_exec( @@ -217,6 +224,9 @@ impl RunLoop { .unwrap(); coordinator_dispatcher.dispatch_boot_receipts(receipts); + let atlas_config = AtlasConfig::default(); + let moved_atlas_config = atlas_config.clone(); + thread::spawn(move || { ChainsCoordinator::run( chain_state_db, @@ -224,6 +234,7 @@ impl RunLoop { attachments_tx, &mut coordinator_dispatcher, coordinator_receivers, + moved_atlas_config, ); }); @@ -254,6 +265,7 @@ impl RunLoop { coordinator_senders, pox_watchdog.make_comms_handle(), attachments_rx, + atlas_config, ) } else { node.into_initialized_node( @@ -262,6 +274,7 @@ impl RunLoop { coordinator_senders, pox_watchdog.make_comms_handle(), attachments_rx, + atlas_config, ) }; diff --git a/testnet/stacks-node/src/tenure.rs b/testnet/stacks-node/src/tenure.rs index 0d93c73f2f..0e77acab97 100644 --- a/testnet/stacks-node/src/tenure.rs +++ b/testnet/stacks-node/src/tenure.rs @@ -1,4 +1,4 @@ -use super::node::{ChainTip, TESTNET_CHAIN_ID}; +use super::node::ChainTip; use super::{BurnchainTip, Config}; use std::thread; @@ -77,8 +77,8 @@ impl<'a> Tenure { } let (mut chain_state, _) = StacksChainState::open_with_block_limit( - false, - TESTNET_CHAIN_ID, + self.config.is_mainnet(), + self.config.burnchain.chain_id, &self.config.get_chainstate_path(), self.config.block_limit.clone(), ) @@ -110,6 +110,8 @@ impl<'a> Tenure { #[cfg(test)] pub fn open_chainstate(&self) -> StacksChainState { + use super::config::TESTNET_CHAIN_ID; + let (chain_state, _) = StacksChainState::open_with_block_limit( false, TESTNET_CHAIN_ID, diff --git a/testnet/stacks-node/src/tests/integrations.rs b/testnet/stacks-node/src/tests/integrations.rs index 39227e9d38..3aac12e036 100644 --- a/testnet/stacks-node/src/tests/integrations.rs +++ b/testnet/stacks-node/src/tests/integrations.rs @@ -753,7 +753,7 @@ fn contract_stx_transfer() { &mut chainstate_copy, &consensus_hash, &header_hash, - xfer_to_contract, + &xfer_to_contract, ) .unwrap(); } @@ -768,7 +768,7 @@ fn contract_stx_transfer() { &mut chainstate_copy, &consensus_hash, &header_hash, - xfer_to_contract, + &xfer_to_contract, ) .unwrap_err() { diff --git a/testnet/stacks-node/src/tests/mempool.rs b/testnet/stacks-node/src/tests/mempool.rs index 22c8f16761..bb3958c5e5 100644 --- a/testnet/stacks-node/src/tests/mempool.rs +++ b/testnet/stacks-node/src/tests/mempool.rs @@ -22,7 +22,7 @@ use std::sync::Mutex; use crate::helium::RunLoop; use crate::Keychain; -use crate::node::TESTNET_CHAIN_ID; +use crate::config::TESTNET_CHAIN_ID; use super::{ make_coinbase, make_contract_call, make_contract_publish, make_poison, make_stacks_transfer, diff --git a/testnet/stacks-node/src/tests/mod.rs b/testnet/stacks-node/src/tests/mod.rs index 99003320b1..f44a08e231 100644 --- a/testnet/stacks-node/src/tests/mod.rs +++ b/testnet/stacks-node/src/tests/mod.rs @@ -22,7 +22,7 @@ use stacks::vm::types::PrincipalData; use stacks::vm::{ClarityName, ContractName, Value}; use super::burnchains::bitcoin_regtest_controller::ParsedUTXO; -use super::node::TESTNET_CHAIN_ID; +use super::config::TESTNET_CHAIN_ID; use super::Config; use crate::helium::RunLoop; use rand::RngCore; diff --git a/testnet/stacks-node/src/tests/neon_integrations.rs b/testnet/stacks-node/src/tests/neon_integrations.rs index 8012122646..f80d7c7148 100644 --- a/testnet/stacks-node/src/tests/neon_integrations.rs +++ b/testnet/stacks-node/src/tests/neon_integrations.rs @@ -20,7 +20,7 @@ use stacks::vm::database::ClarityDeserializable; use super::bitcoin_regtest::BitcoinCoreController; use crate::{ burnchains::bitcoin_regtest_controller::UTXO, config::EventKeyType, - config::EventObserverConfig, config::InitialBalance, neon, node::TESTNET_CHAIN_ID, + config::EventObserverConfig, config::InitialBalance, config::TESTNET_CHAIN_ID, neon, operations::BurnchainOpSigner, BitcoinRegtestController, BurnchainController, Config, ConfigFile, Keychain, }; @@ -62,7 +62,7 @@ fn neon_integration_test_conf() -> (Config, StacksAddress) { let magic_bytes = Config::from_config_file(ConfigFile::xenon()) .burnchain .magic_bytes; - assert_eq!(magic_bytes.as_bytes(), &['X' as u8, '2' as u8]); + assert_eq!(magic_bytes.as_bytes(), &['X' as u8, '3' as u8]); conf.burnchain.magic_bytes = magic_bytes; conf.burnchain.poll_time_secs = 1; conf.node.pox_sync_sample_secs = 1; @@ -215,6 +215,31 @@ fn wait_for_runloop(blocks_processed: &Arc) { } } +fn submit_tx(http_origin: &str, tx: &Vec) { + let client = reqwest::blocking::Client::new(); + let path = format!("{}/v2/transactions", http_origin); + let res = client + .post(&path) + .header("Content-Type", "application/octet-stream") + .body(tx.clone()) + .send() + .unwrap(); + eprintln!("{:#?}", res); + if res.status().is_success() { + let res: String = res.json().unwrap(); + assert_eq!( + res, + StacksTransaction::consensus_deserialize(&mut &tx[..]) + .unwrap() + .txid() + .to_string() + ); + } else { + eprintln!("{}", res.text().unwrap()); + panic!(""); + } +} + fn get_tip_anchored_block(conf: &Config) -> (ConsensusHash, StacksBlock) { let http_origin = format!("http://{}", &conf.node.rpc_bind); let client = reqwest::blocking::Client::new(); @@ -336,6 +361,135 @@ fn get_balance( u128::from_str_radix(&res.balance[2..], 16).unwrap() } +#[test] +#[ignore] +fn liquid_ustx_integration() { + if env::var("BITCOIND_TEST") != Ok("1".into()) { + return; + } + + // the contract that we'll test the costs of + let caller_src = " + (define-public (execute) + (ok stx-liquid-supply)) + "; + + let spender_sk = StacksPrivateKey::new(); + let spender_addr = to_addr(&spender_sk); + let spender_princ: PrincipalData = spender_addr.into(); + + let (mut conf, _miner_account) = neon_integration_test_conf(); + + test_observer::spawn(); + + conf.events_observers.push(EventObserverConfig { + endpoint: format!("localhost:{}", test_observer::EVENT_OBSERVER_PORT), + events_keys: vec![EventKeyType::AnyEvent], + }); + + let spender_bal = 10_000_000_000 * (core::MICROSTACKS_PER_STACKS as u64); + + conf.initial_balances.push(InitialBalance { + address: spender_princ.clone(), + amount: spender_bal, + }); + + let mut btcd_controller = BitcoinCoreController::new(conf.clone()); + btcd_controller + .start_bitcoind() + .map_err(|_e| ()) + .expect("Failed starting bitcoind"); + + let burnchain_config = Burnchain::regtest(&conf.get_burn_db_path()); + + let mut btc_regtest_controller = BitcoinRegtestController::with_burnchain( + conf.clone(), + None, + Some(burnchain_config.clone()), + ); + let http_origin = format!("http://{}", &conf.node.rpc_bind); + + btc_regtest_controller.bootstrap_chain(201); + + eprintln!("Chain bootstrapped..."); + + let mut run_loop = neon::RunLoop::new(conf.clone()); + let blocks_processed = run_loop.get_blocks_processed_arc(); + let _client = reqwest::blocking::Client::new(); + let channel = run_loop.get_coordinator_channel().unwrap(); + + thread::spawn(move || run_loop.start(0, Some(burnchain_config))); + + // give the run loop some time to start up! + wait_for_runloop(&blocks_processed); + + // first block wakes up the run loop + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // first block will hold our VRF registration + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // second block will be the first mined Stacks block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let _sort_height = channel.get_sortitions_processed(); + + let publish = make_contract_publish(&spender_sk, 0, 1000, "caller", caller_src); + + submit_tx(&http_origin, &publish); + + // mine 1 burn block for the miner to issue the next block + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + // mine next burn block for the miner to win + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let call_tx = make_contract_call( + &spender_sk, + 1, + 1000, + &spender_addr, + "caller", + "execute", + &[], + ); + + submit_tx(&http_origin, &call_tx); + + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + // clear and mine another burnchain block, so that the new winner is seen by the observer + // (the observer is logically "one block behind" the miner + test_observer::clear(); + next_block_and_wait(&mut btc_regtest_controller, &blocks_processed); + + let mut blocks = test_observer::get_blocks(); + // should have produced 1 new block + assert_eq!(blocks.len(), 1); + let block = blocks.pop().unwrap(); + let transactions = block.get("transactions").unwrap().as_array().unwrap(); + eprintln!("{}", transactions.len()); + let mut tested = false; + for tx in transactions.iter() { + let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); + if raw_tx == "0x00" { + continue; + } + let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); + let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "execute" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = >::deserialize(&raw_result[2..]); + let liquid_ustx = parsed.expect_result_ok().expect_u128(); + assert!(liquid_ustx > 0, "Should be more liquid ustx than 0"); + tested = true; + } + } + } + assert!(tested, "Should have found a contract call tx"); +} + #[test] #[ignore] fn stx_transfer_btc_integration_test() { @@ -1369,30 +1523,11 @@ fn pox_integration_test() { ); // okay, let's push that stacking transaction! - let path = format!("{}/v2/transactions", &http_origin); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .unwrap(); - eprintln!("{:#?}", res); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } + submit_tx(&http_origin, &tx); let mut sort_height = channel.get_sortitions_processed(); eprintln!("Sort height: {}", sort_height); + test_observer::clear(); // now let's mine until the next reward cycle starts ... while sort_height < ((14 * pox_constants.reward_cycle_length) + 1).into() { @@ -1415,7 +1550,6 @@ fn pox_integration_test() { break; } let transactions = block.get("transactions").unwrap().as_array().unwrap(); - eprintln!("{}", transactions.len()); for tx in transactions.iter() { let raw_tx = tx.get("raw_tx").unwrap().as_str().unwrap(); if raw_tx == "0x00" { @@ -1423,24 +1557,25 @@ fn pox_integration_test() { } let tx_bytes = hex_bytes(&raw_tx[2..]).unwrap(); let parsed = StacksTransaction::consensus_deserialize(&mut &tx_bytes[..]).unwrap(); - if let TransactionPayload::ContractCall(_) = parsed.payload { - } else { - continue; + if let TransactionPayload::ContractCall(contract_call) = parsed.payload { + eprintln!("{}", contract_call.function_name.as_str()); + if contract_call.function_name.as_str() == "stack-stx" { + let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); + let parsed = + >::deserialize(&raw_result[2..]); + // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle + // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward + // cycle length of 15 blocks, is a burnchain height of 300) + assert_eq!(parsed.to_string(), + format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", + &spender_addr)); + tested = true; + } } - - let raw_result = tx.get("raw_result").unwrap().as_str().unwrap(); - let parsed = >::deserialize(&raw_result[2..]); - // should unlock at height 300 (we're in reward cycle 13, lockup starts in reward cycle - // 14, and goes for 6 blocks, so we unlock in reward cycle 20, which with a reward - // cycle length of 15 blocks, is a burnchain height of 300) - assert_eq!(parsed.to_string(), - format!("(ok (tuple (lock-amount u1000000000000000) (stacker {}) (unlock-burn-height u300)))", - &spender_addr)); - tested = true; } } - assert!(tested); + assert!(tested, "Should have observed stack-stx transaction"); // let's stack with spender 2 and spender 3... @@ -1468,27 +1603,7 @@ fn pox_integration_test() { ); // okay, let's push that stacking transaction! - let path = format!("{}/v2/transactions", &http_origin); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .unwrap(); - eprintln!("{:#?}", res); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } + submit_tx(&http_origin, &tx); let tx = make_contract_call( &spender_3_sk, @@ -1510,28 +1625,7 @@ fn pox_integration_test() { ], ); - // okay, let's push that stacking transaction! - let path = format!("{}/v2/transactions", &http_origin); - let res = client - .post(&path) - .header("Content-Type", "application/octet-stream") - .body(tx.clone()) - .send() - .unwrap(); - eprintln!("{:#?}", res); - if res.status().is_success() { - let res: String = res.json().unwrap(); - assert_eq!( - res, - StacksTransaction::consensus_deserialize(&mut &tx[..]) - .unwrap() - .txid() - .to_string() - ); - } else { - eprintln!("{}", res.text().unwrap()); - panic!(""); - } + submit_tx(&http_origin, &tx); // mine until the end of the current reward cycle. sort_height = channel.get_sortitions_processed(); @@ -1696,9 +1790,11 @@ fn atlas_integration_test() { "{}@{}", bootstrap_node_public_key, conf_bootstrap_node.node.p2p_bind ); - conf_follower_node - .node - .set_bootstrap_node(Some(bootstrap_node_url)); + conf_follower_node.node.set_bootstrap_node( + Some(bootstrap_node_url), + conf_follower_node.burnchain.chain_id, + conf_follower_node.burnchain.peer_version, + ); conf_follower_node.node.miner = false; conf_follower_node .initial_balances @@ -1890,7 +1986,7 @@ fn atlas_integration_test() { } // (define-public (name-import (namespace (buff 20)) - // (name (buff 32)) + // (name (buff 48)) // (zonefile-hash (buff 20))) let zonefile_hex = "facade00"; let hashed_zonefile = Hash160::from_data(&hex_bytes(zonefile_hex).unwrap()); @@ -1968,7 +2064,7 @@ fn atlas_integration_test() { // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; - let mut timeout = 60; + let mut timeout = 120; while attachments_did_sync != true { let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); @@ -2135,7 +2231,7 @@ fn atlas_integration_test() { let target_height = match follower_node_rx.recv() { Ok(Signal::ReplicatingAttachmentsCheckTest2(target_height)) => target_height, - _ => panic!("Bootstrap node could nod boot. Aborting test."), + _ => panic!("Bootstrap node could not boot. Aborting test."), }; let mut sort_height = channel.get_sortitions_processed(); @@ -2147,7 +2243,7 @@ fn atlas_integration_test() { // Poll GET v2/attachments/ for i in 1..10 { let mut attachments_did_sync = false; - let mut timeout = 30; + let mut timeout = 60; while attachments_did_sync != true { let zonefile_hex = hex_bytes(&format!("facade0{}", i)).unwrap(); let hashed_zonefile = Hash160::from_data(&zonefile_hex); @@ -2169,7 +2265,7 @@ fn atlas_integration_test() { } else { timeout -= 1; if timeout == 0 { - panic!("Failed syncing 9 attachments between 2 neon runloops within 30s - Something is wrong"); + panic!("Failed syncing 9 attachments between 2 neon runloops within 60s - Something is wrong"); } eprintln!("Attachment {} not sync'd yet", bytes_to_hex(&zonefile_hex)); thread::sleep(Duration::from_millis(1000));