-
Notifications
You must be signed in to change notification settings - Fork 306
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
fix: use tree calculator in world state synchronizer #8902
Changes from all commits
0b632d8
45a1614
9e3c070
de7012b
444ec41
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -144,23 +144,35 @@ export class AztecLmdbStore implements AztecKVStore { | |
} | ||
|
||
/** | ||
* Clears all entries in the store | ||
* Clears all entries in the store & sub DBs. | ||
*/ | ||
async clear() { | ||
await this.#data.clearAsync(); | ||
await this.#multiMapData.clearAsync(); | ||
await this.#rootDb.clearAsync(); | ||
} | ||
|
||
/** | ||
* Drops the database & sub DBs. | ||
*/ | ||
async drop() { | ||
await this.#data.drop(); | ||
await this.#multiMapData.drop(); | ||
await this.#rootDb.drop(); | ||
} | ||
|
||
/** | ||
* Close the database. Note, once this is closed we can no longer interact with the DB. | ||
* Closing the root DB also closes child DBs. | ||
*/ | ||
async close() { | ||
await this.#data.close(); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Did closing the root DB not close these? Docs seemed to say it would There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. ChatGPT told me it didn't and indeed, as I was looking into objects that were in-memory, there were 3 per store, (rootDB, data, data_dup_sort). with previous code, data & data_dup_sort still had |
||
await this.#multiMapData.close(); | ||
await this.#rootDb.close(); | ||
} | ||
|
||
/** Deletes this store and removes the database files from disk */ | ||
async delete() { | ||
await this.#rootDb.drop(); | ||
await this.drop(); | ||
await this.close(); | ||
if (this.path) { | ||
await rm(this.path, { recursive: true, force: true }); | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -10,15 +10,15 @@ import { | |
type WorldStateSynchronizer, | ||
} from '@aztec/circuit-types'; | ||
import { type L2BlockHandledStats } from '@aztec/circuit-types/stats'; | ||
import { MerkleTreeCalculator } from '@aztec/circuits.js'; | ||
import { L1_TO_L2_MSG_SUBTREE_HEIGHT } from '@aztec/circuits.js/constants'; | ||
import { Fr } from '@aztec/foundation/fields'; | ||
import { type Fr } from '@aztec/foundation/fields'; | ||
import { createDebugLogger } from '@aztec/foundation/log'; | ||
import { promiseWithResolvers } from '@aztec/foundation/promise'; | ||
import { SerialQueue } from '@aztec/foundation/queue'; | ||
import { elapsed } from '@aztec/foundation/timer'; | ||
import { type AztecKVStore, type AztecSingleton } from '@aztec/kv-store'; | ||
import { openTmpStore } from '@aztec/kv-store/utils'; | ||
import { SHA256Trunc, StandardTree } from '@aztec/merkle-tree'; | ||
import { SHA256Trunc } from '@aztec/merkle-tree'; | ||
|
||
import { | ||
MerkleTreeAdminOperationsFacade, | ||
|
@@ -272,7 +272,7 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { | |
// Note that we cannot optimize this check by checking the root of the subtree after inserting the messages | ||
// to the real L1_TO_L2_MESSAGE_TREE (like we do in merkleTreeDb.handleL2BlockAndMessages(...)) because that | ||
// tree uses pedersen and we don't have access to the converted root. | ||
await this.#verifyMessagesHashToInHash(l1ToL2Messages, l2Block.header.contentCommitment.inHash); | ||
this.#verifyMessagesHashToInHash(l1ToL2Messages, l2Block.header.contentCommitment.inHash); | ||
|
||
// If the above check succeeds, we can proceed to handle the block. | ||
const result = await this.merkleTreeDb.handleL2BlockAndMessages(l2Block, l1ToL2Messages); | ||
|
@@ -302,14 +302,19 @@ export class ServerWorldStateSynchronizer implements WorldStateSynchronizer { | |
* @param inHash - The inHash of the block. | ||
* @throws If the L1 to L2 messages do not hash to the block inHash. | ||
*/ | ||
async #verifyMessagesHashToInHash(l1ToL2Messages: Fr[], inHash: Buffer) { | ||
const store = openTmpStore(true); | ||
const tree = new StandardTree(store, new SHA256Trunc(), 'temp_in_hash_check', L1_TO_L2_MSG_SUBTREE_HEIGHT, 0n, Fr); | ||
await tree.appendLeaves(l1ToL2Messages); | ||
#verifyMessagesHashToInHash(l1ToL2Messages: Fr[], inHash: Buffer) { | ||
const treeCalculator = new MerkleTreeCalculator( | ||
L1_TO_L2_MSG_SUBTREE_HEIGHT, | ||
Buffer.alloc(32), | ||
new SHA256Trunc().hash, | ||
); | ||
|
||
if (!tree.getRoot(true).equals(inHash)) { | ||
const root = treeCalculator.computeTreeRoot(l1ToL2Messages.map(msg => msg.toBuffer())); | ||
this.log.info(`root: ${root.toString('hex')}`); | ||
this.log.info(`inHash: ${inHash.toString('hex')}`); | ||
Comment on lines
+313
to
+314
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Sorry, got late to the party, but can you clean up these logs? |
||
|
||
if (!root.equals(inHash)) { | ||
throw new Error('Obtained L1 to L2 messages failed to be hashed to the block inHash'); | ||
} | ||
await store.delete(); | ||
} | ||
} |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we need to clear/drop the subdbs before the main one? Doesn't clearing the main db also clear the child ones?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I thought so but while looking into the mem leak it turns out it doesn't. The
data
anddata_dup_sort
dbs still had anopen
status