From 45ce49e00c29045389c22dffd502142c47bed5b8 Mon Sep 17 00:00:00 2001 From: Roger Ng Date: Fri, 25 Oct 2024 22:50:49 +0000 Subject: [PATCH 1/2] Separate sequencing and integration for MySQL storage --- integration/integration_test.go | 24 +-- storage/mysql/mysql.go | 306 ++++++++++++++++++++++++-------- storage/mysql/mysql_test.go | 19 +- storage/mysql/schema.sql | 11 +- 4 files changed, 274 insertions(+), 86 deletions(-) diff --git a/integration/integration_test.go b/integration/integration_test.go index ba9f4d60..6b3f0faf 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -146,18 +146,22 @@ func TestLiveLogIntegration(t *testing.T) { } // Step 3 - Validate checkpoint size increment. - checkpoint, _, _, err = client.FetchCheckpoint(ctx, logRead, noteVerifier, noteVerifier.Name()) - if err != nil { - t.Errorf("client.FetchCheckpoint: %v", err) - } - if checkpoint == nil { - t.Fatal("checkpoint not found") + var gotIncrease uint64 + for gotIncrease != uint64(*testEntrySize) { + checkpoint, _, _, err = client.FetchCheckpoint(ctx, logRead, noteVerifier, noteVerifier.Name()) + if err != nil { + t.Errorf("client.FetchCheckpoint: %v", err) + } + if checkpoint == nil { + t.Fatal("checkpoint not found") + } + t.Logf("polling checkpoint size: %d", checkpoint.Size) + gotIncrease = checkpoint.Size - checkpointInitSize + + time.Sleep(100 * time.Millisecond) } + t.Logf("checkpoint final size: %d", checkpoint.Size) - gotIncrease := checkpoint.Size - checkpointInitSize - if gotIncrease != uint64(*testEntrySize) { - t.Errorf("checkpoint size increase got: %d, want: %d", gotIncrease, *testEntrySize) - } // Step 4 - Loop through the entry data index map to verify leaves and inclusion proofs. entryIndexMap.Range(func(k, v any) bool { diff --git a/storage/mysql/mysql.go b/storage/mysql/mysql.go index 457cc668..d1487163 100644 --- a/storage/mysql/mysql.go +++ b/storage/mysql/mysql.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "strings" + "time" _ "github.com/go-sql-driver/mysql" "github.com/transparency-dev/merkle/rfc6962" @@ -32,16 +33,22 @@ import ( ) const ( - selectCheckpointByIDSQL = "SELECT `note` FROM `Checkpoint` WHERE `id` = ?" - selectCheckpointByIDForUpdateSQL = selectCheckpointByIDSQL + " FOR UPDATE" - replaceCheckpointSQL = "REPLACE INTO `Checkpoint` (`id`, `note`) VALUES (?, ?)" - selectSubtreeByLevelAndIndexSQL = "SELECT `nodes` FROM `Subtree` WHERE `level` = ? AND `index` = ?" - replaceSubtreeSQL = "REPLACE INTO `Subtree` (`level`, `index`, `nodes`) VALUES (?, ?, ?)" - selectTiledLeavesSQL = "SELECT `data` FROM `TiledLeaves` WHERE `tile_index` = ?" - replaceTiledLeavesSQL = "REPLACE INTO `TiledLeaves` (`tile_index`, `data`) VALUES (?, ?)" - + selectNextSeqIndexByIDSQL = "SELECT `next_sequence_index` FROM `SequencingMetadata` WHERE `id` = ?" + selectNextSeqIndexByIDForUpdateSQL = selectNextSeqIndexByIDSQL + " FOR UPDATE" + replaceNextSeqIndexSQL = "REPLACE INTO `SequencingMetadata` (`id`, `next_sequence_index`) VALUES (?, ?)" + selectCheckpointByIDSQL = "SELECT `note` FROM `Checkpoint` WHERE `id` = ?" + selectCheckpointByIDForUpdateSQL = selectCheckpointByIDSQL + " FOR UPDATE" + replaceCheckpointSQL = "REPLACE INTO `Checkpoint` (`id`, `note`) VALUES (?, ?)" + selectSubtreeByLevelAndIndexSQL = "SELECT `nodes` FROM `Subtree` WHERE `level` = ? AND `index` = ?" + replaceSubtreeSQL = "REPLACE INTO `Subtree` (`level`, `index`, `nodes`) VALUES (?, ?, ?)" + selectTiledLeavesSQL = "SELECT `data` FROM `TiledLeaves` WHERE `tile_index` = ?" + replaceTiledLeavesSQL = "REPLACE INTO `TiledLeaves` (`tile_index`, `data`) VALUES (?, ?)" + + nextSeqIndexID = 0 checkpointID = 0 entryBundleSize = 256 + + defaultIntegrationSizeLimit = 10 * entryBundleSize ) // Storage is a MySQL-based storage implementation for Tessera. @@ -70,7 +77,13 @@ func New(ctx context.Context, db *sql.DB, opts ...func(*tessera.StorageOptions)) return nil, errors.New("tessera.WithCheckpointSignerVerifier must be provided in New()") } - s.queue = storage.NewQueue(ctx, opt.BatchMaxAge, opt.BatchMaxSize, s.sequenceBatch) + s.queue = storage.NewQueue(ctx, opt.BatchMaxAge, opt.BatchMaxSize, s.sequenceEntries) + + // Initialize next sequence index if there is no row in the SequencingMetadata table. + if err := s.initNextSequenceIndex(ctx); err != nil { + klog.Errorf("Failed to initialize checkpoint: %v", err) + return nil, err + } // Initialize checkpoint if there is no row in the Checkpoint table. checkpoint, err := s.ReadCheckpoint(ctx) @@ -101,9 +114,75 @@ func New(ctx context.Context, db *sql.DB, opts ...func(*tessera.StorageOptions)) } } + // Run integration every 1 second. + go func() { + t := time.NewTicker(1 * time.Second) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + } + + func() { + cctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + if err := s.consumeEntries(cctx, defaultIntegrationSizeLimit); err != nil { + klog.Errorf("consumeEntries: %v", err) + } + }() + } + }() + return s, nil } +// initNextSequenceIndex initializes next sequence index if there is no row in the SequencingMetadata table. +func (s *Storage) initNextSequenceIndex(ctx context.Context) error { + row := s.db.QueryRowContext(ctx, selectNextSeqIndexByIDSQL, nextSeqIndexID) + if err := row.Err(); err != nil { + return err + } + var seqIndex uint64 + if err := row.Scan(&seqIndex); err != nil { + if err == sql.ErrNoRows { + klog.Infof("Initializing next sequence index") + // Get a Tx for making transaction requests. + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + // Defer a rollback in case anything fails. + defer func() { + if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { + klog.Errorf("Failed to rollback in write initial next sequence index: %v", err) + } + }() + if err := s.writeNextSequenceIndex(ctx, tx, 0); err != nil { + klog.Errorf("Failed to write initial next sequence index: %v", err) + return err + } + // Commit the transaction. + return tx.Commit() + } + return err + } + + return nil +} + +// writeNextSequenceIndex stores the next sequence index. +func (s *Storage) writeNextSequenceIndex(ctx context.Context, tx *sql.Tx, nextSeqIndex uint64) error { + if _, err := tx.ExecContext(ctx, replaceNextSeqIndexSQL, nextSeqIndexID, nextSeqIndex); err != nil { + klog.Errorf("Failed to execute replaceNextSeqIndexSQL: %v", err) + return err + } + + return nil +} + // ReadCheckpoint returns the latest stored checkpoint. // If the checkpoint is not found, nil is returned with no error. func (s *Storage) ReadCheckpoint(ctx context.Context) ([]byte, error) { @@ -220,15 +299,14 @@ func (s *Storage) Add(ctx context.Context, entry *tessera.Entry) tessera.IndexFu return s.queue.Add(ctx, entry) } -// sequenceBatch writes the entries from the provided batch into the entry bundle files of the log. +// sequenceEntries writes the entries from the provided batch into the entry bundle files of the log, +// and durably assigns each of the passed-in entries an index in the log. // // This func starts filling entries bundles at the next available slot in the log, ensuring that the // sequenced entries are contiguous from the zeroth entry (i.e left-hand dense). // We try to minimise the number of partially complete entry bundles by writing entries in chunks rather // than one-by-one. -// -// TODO(#21): Separate sequencing and integration for better performance. -func (s *Storage) sequenceBatch(ctx context.Context, entries []*tessera.Entry) error { +func (s *Storage) sequenceEntries(ctx context.Context, entries []*tessera.Entry) error { // Return when there is no entry to sequence. if len(entries) == 0 { return nil @@ -242,36 +320,102 @@ func (s *Storage) sequenceBatch(ctx context.Context, entries []*tessera.Entry) e // Defer a rollback in case anything fails. defer func() { if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { - klog.Errorf("Failed to rollback in sequenceBatch: %v", err) + klog.Errorf("Failed to rollback in sequenceEntries: %v", err) } }() - // Get tree size from checkpoint. Note that "SELECT ... FOR UPDATE" is used for row-level locking. - // TODO(#21): Optimize how we get the tree size without parsing and verifying the checkpoints every time. - row := tx.QueryRowContext(ctx, selectCheckpointByIDForUpdateSQL, checkpointID) + // Get next sequenced index. Note that "SELECT ... FOR UPDATE" is used for row-level locking. + row := tx.QueryRowContext(ctx, selectNextSeqIndexByIDForUpdateSQL, nextSeqIndexID) if err := row.Err(); err != nil { return err } - var rawCheckpoint []byte - if err := row.Scan(&rawCheckpoint); err != nil { - return fmt.Errorf("failed to read checkpoint: %w", err) + var nextSeqIndex uint64 + if err := row.Scan(&nextSeqIndex); err != nil { + return fmt.Errorf("failed to read next sequence index: %w", err) } - checkpoint, err := s.parseCheckpoint(rawCheckpoint) - if err != nil { - return fmt.Errorf("failed to verify checkpoint: %w", err) + + sequencedEntries := make([]storage.SequencedEntry, len(entries)) + // Assign provisional sequence numbers to entries. + // We need to do this here in order to support serialisations which include the log position. + for i, e := range entries { + sequencedEntries[i] = storage.SequencedEntry{ + BundleData: e.MarshalBundleData(nextSeqIndex + uint64(i)), + LeafHash: e.LeafHash(), + } + } + + // Add sequenced entries to entry bundles. + bundleIndex, entriesInBundle := nextSeqIndex/entryBundleSize, nextSeqIndex%entryBundleSize + bundleWriter := &bytes.Buffer{} + + // If the latest bundle is partial, we need to read the data it contains in for our newer, larger, bundle. + if entriesInBundle > 0 { + row := tx.QueryRowContext(ctx, selectTiledLeavesSQL, bundleIndex) + if err := row.Err(); err != nil { + return err + } + + var partialEntryBundle []byte + if err := row.Scan(&partialEntryBundle); err != nil { + return fmt.Errorf("row.Scan: %w", err) + } + + if _, err := bundleWriter.Write(partialEntryBundle); err != nil { + return fmt.Errorf("bundleWriter: %w", err) + } } - // Integrate the new entries into the entry bundle (TiledLeaves table) and tile (Subtree table). - if err := s.integrate(ctx, tx, checkpoint.Size, entries); err != nil { - return fmt.Errorf("failed to integrate: %w", err) + // Add new entries to the bundle. + for _, e := range sequencedEntries { + if _, err := bundleWriter.Write(e.BundleData); err != nil { + return fmt.Errorf("bundleWriter.Write: %w", err) + } + entriesInBundle++ + + // This bundle is full, so we need to write it out. + if entriesInBundle == entryBundleSize { + if err := s.writeEntryBundle(ctx, tx, bundleIndex, bundleWriter.Bytes()); err != nil { + return fmt.Errorf("writeEntryBundle: %w", err) + } + + // Prepare the next entry bundle for any remaining entries in the batch. + bundleIndex++ + entriesInBundle = 0 + bundleWriter = &bytes.Buffer{} + } + } + + // If we have a partial bundle remaining once we've added all the entries from the batch, + // this needs writing out too. + if entriesInBundle > 0 { + if err := s.writeEntryBundle(ctx, tx, bundleIndex, bundleWriter.Bytes()); err != nil { + return fmt.Errorf("writeEntryBundle: %w", err) + } + } + + // Update last sequenced entry index. + if err := s.writeNextSequenceIndex(ctx, tx, nextSeqIndex+uint64(len(entries))); err != nil { + return fmt.Errorf("writeNextSequenceIndex: %w", err) } // Commit the transaction. return tx.Commit() } -// integrate incorporates the provided entries into the log starting at fromSeq. -func (s *Storage) integrate(ctx context.Context, tx *sql.Tx, fromSeq uint64, entries []*tessera.Entry) error { +// consumeEntries fetches the sequenced entries, integrate them into the log, and issues a new checkpoint. +func (s *Storage) consumeEntries(ctx context.Context, limit uint64) error { + // Get a Tx for making transaction requests. + tx, err := s.db.BeginTx(ctx, nil) + if err != nil { + return err + } + // Defer a rollback in case anything fails. + defer func() { + if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { + klog.Errorf("Failed to rollback in consumeEntries: %v", err) + } + }() + tb := storage.NewTreeBuilder(func(ctx context.Context, tileIDs []storage.TileID, treeSize uint64) ([]*api.HashTile, error) { hashTiles := make([]*api.HashTile, len(tileIDs)) if len(tileIDs) == 0 { @@ -322,66 +466,78 @@ func (s *Storage) integrate(ctx context.Context, tx *sql.Tx, fromSeq uint64, ent return hashTiles, nil }) - sequencedEntries := make([]storage.SequencedEntry, len(entries)) - // Assign provisional sequence numbers to entries. - // We need to do this here in order to support serialisations which include the log position. - for i, e := range entries { - sequencedEntries[i] = storage.SequencedEntry{ - BundleData: e.MarshalBundleData(fromSeq + uint64(i)), - LeafHash: e.LeafHash(), - } + // Get the next sequence index without transaction. + row := s.db.QueryRowContext(ctx, selectNextSeqIndexByIDSQL, nextSeqIndexID) + if err := row.Err(); err != nil { + return err + } + var nextSeqIndex uint64 + if err := row.Scan(&nextSeqIndex); err != nil { + return fmt.Errorf("failed to read next sequence index: %w", err) } - // Add sequenced entries to entry bundles. - bundleIndex, entriesInBundle := fromSeq/entryBundleSize, fromSeq%entryBundleSize - bundleWriter := &bytes.Buffer{} + // Get tree size from checkpoint. Note that "SELECT ... FOR UPDATE" is used for row-level locking. + // TODO(#21): Optimize how we get the tree size without parsing and verifying the checkpoints every time. + row = tx.QueryRowContext(ctx, selectCheckpointByIDForUpdateSQL, checkpointID) + if err := row.Err(); err != nil { + return err + } + var rawCheckpoint []byte + if err := row.Scan(&rawCheckpoint); err != nil { + return fmt.Errorf("failed to read checkpoint: %w", err) + } + checkpoint, err := s.parseCheckpoint(rawCheckpoint) + if err != nil { + return fmt.Errorf("failed to verify checkpoint: %w", err) + } - // If the latest bundle is partial, we need to read the data it contains in for our newer, larger, bundle. - if entriesInBundle > 0 { - row := tx.QueryRowContext(ctx, selectTiledLeavesSQL, bundleIndex) - if err := row.Err(); err != nil { - return err - } + klog.Infof("consumeEntries checkpoint.Size: %d", checkpoint.Size) - var partialEntryBundle []byte - if err := row.Scan(&partialEntryBundle); err != nil { - return fmt.Errorf("row.Scan: %w", err) - } + integrateEntriesSize := nextSeqIndex - checkpoint.Size - if _, err := bundleWriter.Write(partialEntryBundle); err != nil { - return fmt.Errorf("bundleWriter: %w", err) - } + // Return when there is no entry to integrate. + // Ignore when next sequence index is smaller than the checkpoint size due to dirty read. + if integrateEntriesSize <= 0 { + return nil } - // Add new entries to the bundle. - for _, e := range sequencedEntries { - if _, err := bundleWriter.Write(e.BundleData); err != nil { - return fmt.Errorf("bundleWriter.Write: %w", err) + // Fetch the sequenced entries that are not yet integrated. + sequencedEntries := []storage.SequencedEntry{} + + for count := uint64(0); count < integrateEntriesSize; { + entryBundleIndex := (checkpoint.Size + count) / entryBundleSize + row := tx.QueryRowContext(ctx, selectTiledLeavesSQL, entryBundleIndex) + if err := row.Err(); err != nil { + return err } - entriesInBundle++ - // This bundle is full, so we need to write it out. - if entriesInBundle == entryBundleSize { - if err := s.writeEntryBundle(ctx, tx, bundleIndex, bundleWriter.Bytes()); err != nil { - return fmt.Errorf("writeEntryBundle: %w", err) - } + var entryBundle []byte + if err := row.Scan(&entryBundle); err != nil && err != sql.ErrNoRows { + return err + } - // Prepare the next entry bundle for any remaining entries in the batch. - bundleIndex++ - entriesInBundle = 0 - bundleWriter = &bytes.Buffer{} + bundle := api.EntryBundle{} + if err := bundle.UnmarshalText(entryBundle); err != nil { + return fmt.Errorf("failed to parse EntryBundle at index %d: %w", entryBundleIndex, err) } - } + for i, data := range bundle.Entries { + if integrateEntriesSize == 0 { + break + } + if entryBundleIndex*entryBundleSize+uint64(i) < checkpoint.Size { + continue + } - // If we have a partial bundle remaining once we've added all the entries from the batch, - // this needs writing out too. - if entriesInBundle > 0 { - if err := s.writeEntryBundle(ctx, tx, bundleIndex, bundleWriter.Bytes()); err != nil { - return fmt.Errorf("writeEntryBundle: %w", err) + sequencedEntries = append(sequencedEntries, storage.SequencedEntry{ + BundleData: entryBundle, + LeafHash: rfc6962.DefaultHasher.HashLeaf(data), + }) + count++ } } - newSize, newRoot, tiles, err := tb.Integrate(ctx, fromSeq, sequencedEntries) + // Integrate sequenced entries into the log. + newSize, newRoot, tiles, err := tb.Integrate(ctx, checkpoint.Size, sequencedEntries) if err != nil { return fmt.Errorf("tb.Integrate: %v", err) } @@ -400,5 +556,7 @@ func (s *Storage) integrate(ctx context.Context, tx *sql.Tx, fromSeq uint64, ent if err := s.writeCheckpoint(ctx, tx, newSize, newRoot); err != nil { return fmt.Errorf("writeCheckpoint: %w", err) } - return nil + + // Commit the transaction. + return tx.Commit() } diff --git a/storage/mysql/mysql_test.go b/storage/mysql/mysql_test.go index a52ce212..216d8726 100644 --- a/storage/mysql/mysql_test.go +++ b/storage/mysql/mysql_test.go @@ -28,6 +28,7 @@ import ( "testing" "time" + "github.com/transparency-dev/formats/log" "github.com/transparency-dev/merkle/rfc6962" tessera "github.com/transparency-dev/trillian-tessera" "github.com/transparency-dev/trillian-tessera/api" @@ -102,7 +103,7 @@ func TestMain(m *testing.M) { // `multiStatements=true` in the data source name allows multiple statements in one query. // This is not being used in the actual MySQL storage implementation. func initDatabaseSchema(ctx context.Context) { - dropTablesSQL := "DROP TABLE IF EXISTS `Checkpoint`, `Subtree`, `TiledLeaves`" + dropTablesSQL := "DROP TABLE IF EXISTS `SequencingMetadata`, `Checkpoint`, `Subtree`, `TiledLeaves`" rawSchema, err := os.ReadFile("schema.sql") if err != nil { @@ -280,11 +281,27 @@ func TestTileRoundTrip(t *testing.T) { }, } { t.Run(test.name, func(t *testing.T) { + cpSize := uint64(0) + entryIndex, err := s.Add(ctx, tessera.NewEntry(test.entry))() if err != nil { t.Errorf("Add got err: %v", err) } + for cpSize <= entryIndex { + time.Sleep(100 * time.Millisecond) + + cpRaw, err := s.ReadCheckpoint(ctx) + if err != nil { + t.Errorf("ReadCheckpoint got err: %v", err) + } + cp, _, _, err := log.ParseCheckpoint(cpRaw, noteVerifier.Name(), noteVerifier) + if err != nil { + t.Errorf("log.ParseCheckpoint got err: %v", err) + } + cpSize = cp.Size + } + tileLevel, tileIndex, _, nodeIndex := layout.NodeCoordsToTileAddress(0, entryIndex) tileRaw, err := s.ReadTile(ctx, tileLevel, tileIndex, nodeIndex) if err != nil { diff --git a/storage/mysql/schema.sql b/storage/mysql/schema.sql index 485c4fb4..e07bb604 100644 --- a/storage/mysql/schema.sql +++ b/storage/mysql/schema.sql @@ -14,7 +14,16 @@ -- MySQL version of the Trillian Tessera database schema. --- "Checkpoint" table stores a single row that records the current state of the log. It is updated after every sequence and integration. +-- "SequencingMetadata" table stores the next sequence index. It is updated after every sequencing. +CREATE TABLE IF NOT EXISTS `SequencingMetadata` ( + -- id is expected to be always 0 to maintain a maximum of a single row. + `id` INT UNSIGNED NOT NULL, + -- next_sequence_index is the index of the next to-be-sequenced entry. + `next_sequence_index` BIGINT UNSIGNED NOT NULL, + PRIMARY KEY(`id`) +); + +-- "Checkpoint" table stores a single row that records the current state of the log. It is updated after every integration. CREATE TABLE IF NOT EXISTS `Checkpoint` ( -- id is expected to be always 0 to maintain a maximum of a single row. `id` INT UNSIGNED NOT NULL, From 6ada95af2b546a998f47b2c9bdcd947ab670aec4 Mon Sep 17 00:00:00 2001 From: Roger Ng Date: Tue, 29 Oct 2024 01:02:35 +0000 Subject: [PATCH 2/2] Update performance metrics --- docs/performance.md | 64 +++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 26 deletions(-) diff --git a/docs/performance.md b/docs/performance.md index a4466084..6a9d04dc 100644 --- a/docs/performance.md +++ b/docs/performance.md @@ -25,25 +25,25 @@ This document describes the performance of each Trillian Tessera storage impleme ``` ┌──────────────────────────────────────────────────────────────────────┐ │Read (8 workers): Current max: 0/s. Oversupply in last second: 0 │ -│Write (256 workers): Current max: 409/s. Oversupply in last second: 0 │ -│TreeSize: 240921 (Δ 307qps over 30s) │ -│Time-in-queue: 86ms/566ms/2172ms (min/avg/max) │ -│Observed-time-to-integrate: 516ms/1056ms/2531ms (min/avg/max) │ +│Write (400 workers): Current max: 527/s. Oversupply in last second: 0 │ +│TreeSize: 372394 (Δ 457qps over 30s) │ +│Time-in-queue: 57ms/400ms/1225ms (min/avg/max) │ +│Observed-time-to-integrate: 895ms/1793ms/7094ms (min/avg/max) │ └──────────────────────────────────────────────────────────────────────┘ ``` -The bottleneck is at the dockerized MySQL instance, which consumes around 50% of the memory. +The bottleneck is at the dockerized MySQL instance, which consumes around 50% of the memory. `kswapd0` started consuming 100% swapping the memory when pushing through the limit. ``` -top - 20:07:16 up 9 min, 3 users, load average: 0.55, 0.56, 0.29 -Tasks: 103 total, 1 running, 102 sleeping, 0 stopped, 0 zombie -%Cpu(s): 3.5 us, 1.7 sy, 0.0 ni, 89.9 id, 2.9 wa, 0.0 hi, 2.0 si, 0.0 st -MiB Mem : 970.0 total, 74.5 free, 932.7 used, 65.2 buff/cache -MiB Swap: 0.0 total, 0.0 free, 0.0 used. 37.3 avail Mem +top - 18:15:31 up 18 min, 3 users, load average: 0.12, 0.26, 0.13 +Tasks: 103 total, 1 running, 101 sleeping, 0 stopped, 1 zombie +%Cpu(s): 2.7 us, 2.0 sy, 0.0 ni, 91.3 id, 0.8 wa, 0.0 hi, 3.0 si, 0.2 st +MiB Mem : 970.0 total, 71.8 free, 924.5 used, 87.1 buff/cache +MiB Swap: 0.0 total, 0.0 free, 0.0 used. 45.5 avail Mem PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1770 root 20 0 1231828 22808 0 S 8.6 2.3 0:18.35 conformance-mys - 1140 999 20 0 1842244 493652 0 S 4.0 49.7 0:13.93 mysqld + 3021 root 20 0 1231580 24544 3476 S 10.3 2.5 0:17.89 conformance-mys + 2675 999 20 0 1842248 475440 3396 S 4.0 47.9 0:09.91 mysqld ``` #### Steps @@ -53,11 +53,11 @@ MiB Swap: 0.0 total, 0.0 free, 0.0 used. 37.3 avail Mem 1. [Install Go](https://go.dev/doc/install) ```sh - instance:~$ wget https://go.dev/dl/go1.23.0.linux-amd64.tar.gz - instance:~$ sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.23.0.linux-amd64.tar.gz + instance:~$ wget https://go.dev/dl/go1.23.2.linux-amd64.tar.gz + instance:~$ sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.23.2.linux-amd64.tar.gz instance:~$ export PATH=$PATH:/usr/local/go/bin instance:~$ go version - go version go1.23.0 linux/amd64 + go version go1.23.2 linux/amd64 ``` 1. [Install Docker using the `apt` repository](https://docs.docker.com/engine/install/ubuntu/#install-using-the-repository) @@ -68,7 +68,7 @@ MiB Swap: 0.0 total, 0.0 free, 0.0 used. 37.3 avail Mem instance:~$ sudo apt-get install git -y -q ... instance:~$ git version - git version 2.39.2 + git version 2.39.5 ``` 1. Clone the Trillian Tessera repository @@ -113,16 +113,28 @@ MiB Swap: 0.0 total, 0.0 free, 0.0 used. 37.3 avail Mem ``` ┌───────────────────────────────────────────────────────────────────────┐ -│Read (8 workers): Current max: 0/s. Oversupply in last second: 0 │ -│Write (512 workers): Current max: 2571/s. Oversupply in last second: 0 │ -│TreeSize: 2530480 (Δ 2047qps over 30s) │ -│Time-in-queue: 41ms/120ms/288ms (min/avg/max) │ -│Observed-time-to-integrate: 568ms/636ms/782ms (min/avg/max) │ +│Read (66 workers): Current max: 1/s. Oversupply in last second: 0 │ +│Write (541 workers): Current max: 4139/s. Oversupply in last second: 0 │ +│TreeSize: 1087381 (Δ 3121qps over 30s) │ +│Time-in-queue: 71ms/339ms/1320ms (min/avg/max) │ +│Observed-time-to-integrate: 887ms/2834ms/8510ms (min/avg/max) │ └───────────────────────────────────────────────────────────────────────┘ ``` The bottleneck comes from CPU usage of the `cmd/conformance/mysql` binary on the free tier VM instance. The Cloud SQL (MySQL) CPU usage is lower than 10%. +``` +top - 00:57:43 up 7:00, 2 users, load average: 0.13, 0.15, 0.07 +Tasks: 91 total, 1 running, 90 sleeping, 0 stopped, 0 zombie +%Cpu(s): 10.6 us, 3.4 sy, 0.0 ni, 81.8 id, 0.0 wa, 0.0 hi, 4.1 si, 0.2 st +MiB Mem : 970.0 total, 314.0 free, 529.5 used, 275.9 buff/cache +MiB Swap: 0.0 total, 0.0 free, 0.0 used. 440.5 avail Mem + + PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND + 10240 rogerng+ 20 0 1299188 77288 5464 S 32.9 7.8 1:04.62 mysql + 371 root 20 0 221788 3192 0 S 0.7 0.3 0:18.78 rsyslo +``` + #### Steps 1. Create a MySQL instance on Cloud SQL. @@ -134,11 +146,11 @@ The bottleneck comes from CPU usage of the `cmd/conformance/mysql` binary on the 1. [Install Go](https://go.dev/doc/install) ```sh - instance:~$ wget https://go.dev/dl/go1.23.0.linux-amd64.tar.gz - instance:~$ sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.23.0.linux-amd64.tar.gz + instance:~$ wget https://go.dev/dl/go1.23.2.linux-amd64.tar.gz + instance:~$ sudo rm -rf /usr/local/go && sudo tar -C /usr/local -xzf go1.23.2.linux-amd64.tar.gz instance:~$ export PATH=$PATH:/usr/local/go/bin instance:~$ go version - go version go1.23.0 linux/amd64 + go version go1.23.2 linux/amd64 ``` 1. Install Git @@ -147,7 +159,7 @@ The bottleneck comes from CPU usage of the `cmd/conformance/mysql` binary on the instance:~$ sudo apt-get install git -y -q ... instance:~$ git version - git version 2.39.2 + git version 2.39.5 ``` 1. Clone the Trillian Tessera repository @@ -157,7 +169,7 @@ The bottleneck comes from CPU usage of the `cmd/conformance/mysql` binary on the Cloning into 'trillian-tessera'... ``` -1. Run `cloud-sql-proxy` +1. Run [`cloud-sql-proxy`](https://cloud.google.com/sql/docs/mysql/sql-proxy#install) ```sh instance:~$ ./cloud-sql-proxy --port 3306 transparency-dev-playground:us-central1:mysql-dev-instance-1