From 052224047ea6e0e0ae3cfd27dc3f77a1acafeb94 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 19 Jan 2023 15:17:48 +0100 Subject: [PATCH 001/187] WIP ingest tool. Added TruncateAllTables to db. --- cmd/ingest/main.go | 183 +++++++++++++++++++++++++++++++++++++++------ pkg/db/db.go | 6 -- pkg/db/mysql.go | 10 --- 3 files changed, 160 insertions(+), 39 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index b9663076..0b21c31d 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -1,21 +1,23 @@ package main import ( + "compress/gzip" + "encoding/base64" + "encoding/csv" "flag" "fmt" + "io" "io/ioutil" "os" "path/filepath" - "strings" + "sync" "github.com/netsec-ethz/fpki/pkg/db" ) const ( - NumDBInserters = 16 - CertificateColumn = 3 - CertChainColumn = 4 + ChainColumn = 4 ) func main() { @@ -35,24 +37,20 @@ func main() { // Truncate DB. exitIfError(conn.TruncateAllTables()) + // Disable indices in DB. - exitIfError(conn.DisableIndexing("domainEntries")) - exitIfError(conn.DisableIndexing("updates")) + // TODO(juagargi) // Update certificates and chains. - proc := NewProcessor(conn) - proc.AddGzFiles(gzFiles) - proc.AddCsvFiles(csvFiles) - exitIfError(proc.Wait()) + err = updateCertificatesFromFiles(conn, gzFiles, csvFiles) + exitIfError(err) // Re-enable indices in DB. - exitIfError(conn.EnableIndexing("updates")) - exitIfError(conn.EnableIndexing("domainEntries")) + // TODO(juagargi) + // Close DB and check errors. err = conn.Close() exitIfError(err) - - fmt.Printf("Final root value: %x\n", proc.root) } func listOurFiles(dir string) (gzFiles, csvFiles []string) { @@ -60,14 +58,15 @@ func listOurFiles(dir string) (gzFiles, csvFiles []string) { exitIfError(err) for _, e := range entries { if !e.IsDir() { - f := filepath.Join(dir, e.Name()) - ext := strings.ToLower(filepath.Ext(e.Name())) - switch ext { - case ".gz": - gzFiles = append(gzFiles, f) - case ".csv": - csvFiles = append(csvFiles, f) - } + continue + } + if e.Name() == "bundled" { + // Use all *.gz in this directory. + d := filepath.Join(dir, e.Name()) + gzFiles, err = filepath.Glob(fmt.Sprintf("%s/*.gz", d)) + exitIfError(err) + csvFiles, err = filepath.Glob(fmt.Sprintf("%s/*.csv", dir)) + exitIfError(err) } else { gzs, csvs := listOurFiles(filepath.Join(dir, e.Name())) gzFiles = append(gzFiles, gzs...) @@ -77,9 +76,147 @@ func listOurFiles(dir string) (gzFiles, csvFiles []string) { return } +func updateCertificatesFromFiles(conn db.Conn, gzFiles, csvFiles []string) error { + const N = 2 + exitIfError(processCollection(conn, gzFiles, N, func(fileNameCh chan string) error { + return updateFromGzFileName(conn, fileNameCh) + })) + + exitIfError(processCollection(conn, csvFiles, N, func(fileNameCh chan string) error { + return updateFromFileName(conn, fileNameCh) + })) + + return nil +} + +func processCollection(conn db.Conn, fileNames []string, N int, + fcn func(fileNameCh chan string) error) error { + + // Use a channel to dispatch batches to go routines. + fileNameCh := make(chan string) + + errorCh := processConcurrently(conn, N, func() error { + return fcn(fileNameCh) + }) + // Send the GZ file names to the channel: + for _, f := range fileNames { + fileNameCh <- f + } + close(fileNameCh) + + fmt.Println("deleteme 10") + // If there had been any errors, report them and return an error as well. + var errorsFound bool + fmt.Println("deleteme 31") + for err := range errorCh { + fmt.Println("deleteme 32") + if err == nil { + continue + } + errorsFound = true + fmt.Fprintf(os.Stderr, "%s\n", err) + fmt.Println("deleteme ---------------") + } + + fmt.Println("deleteme 41") + if errorsFound { + return fmt.Errorf("found errors") + } + return nil +} + +func processConcurrently(conn db.Conn, N int, fcn func() error) chan error { + errorCh := make(chan error) + go func() { + // Use a WaitGroup to wait for all go routines to finish. + wg := sync.WaitGroup{} + // Span N go routines. + wg.Add(N) // TODO(juagargi) remove N and span as many routines as files. + for i := 0; i < N; i++ { + go func() { + fmt.Println("deleteme 20") + defer wg.Done() + errorCh <- fcn() + fmt.Println("deleteme 22") + }() + } + fmt.Println("deleteme 19") + wg.Wait() + fmt.Println("deleteme 29") + close(errorCh) + fmt.Println("deleteme 30") + }() + + return errorCh +} + +func updateFromGzFileName(conn db.Conn, fileNameCh chan string) error { + for filename := range fileNameCh { + fmt.Printf("deleteme BEGIN WORK with %s\n", filename) + f, err := os.Open(filename) + if err != nil { + return err + } + gz, err := gzip.NewReader(f) + if err != nil { + return err + } + + if err := updateFromCSV(conn, gz); err != nil { + return err + } + + if err := gz.Close(); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + fmt.Printf("deleteme END WORK with %s\n", filename) + } + return nil +} + +func updateFromFileName(conn db.Conn, fileNameCh chan string) error { + for filename := range fileNameCh { + f, err := os.Open(filename) + if err != nil { + return err + } + if err := updateFromCSV(conn, f); err != nil { + return err + } + if err := f.Close(); err != nil { + return err + } + } + return nil +} + +func updateFromCSV(conn db.Conn, fileReader io.Reader) error { + reader := csv.NewReader(fileReader) + reader.FieldsPerRecord = -1 // don't check number of fields + reader.ReuseRecord = true + + var err error + var fields []string + for lineNo := 1; err == nil; lineNo++ { + fields, err = reader.Read() + if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) + continue + } + raw, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) + if err != nil { + return err + } + _ = raw + } + return nil +} + func exitIfError(err error) { if err != nil { - fmt.Fprintf(os.Stderr, "%s\n", err) + fmt.Fprintf(os.Stderr, "%s", err) os.Exit(1) } } diff --git a/pkg/db/db.go b/pkg/db/db.go index c3b52042..294e5620 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -24,12 +24,6 @@ type Conn interface { // TruncateAllTables resets the DB to an initial state. TruncateAllTables() error - // DisableIndexing stops the indexing in the table. - DisableIndexing(table string) error - - // DisableIndexing starts the indexing in the table. - EnableIndexing(table string) error - // ************************************************************ // Function for Tree table // ************************************************************ diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index a886e8b5..8fe18379 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -140,16 +140,6 @@ func (c *mysqlDB) TruncateAllTables() error { return nil } -func (c *mysqlDB) DisableIndexing(table string) error { - _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` DISABLE KEYS", table)) - return err -} - -func (c *mysqlDB) EnableIndexing(table string) error { - _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` ENABLE KEYS", table)) - return err -} - // repeatStmt returns ( (?,..inner..,?), ...outer... ) func repeatStmt(outer int, inner int) string { components := make([]string, inner) From c251889f43d9e272884f6eb10e6735642333147c Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 19 Jan 2023 16:27:50 +0100 Subject: [PATCH 002/187] Use a processor structure instead of plain functions. --- cmd/ingest/main.go | 84 +++++++++++++++---------------------- cmd/ingest/map_reduce.go | 89 ++++++++++++++++++++++++++++++++++++++++ pkg/db/db.go | 6 +++ pkg/db/mysql.go | 10 +++++ 4 files changed, 137 insertions(+), 52 deletions(-) create mode 100644 cmd/ingest/map_reduce.go diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 0b21c31d..77000847 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -2,11 +2,8 @@ package main import ( "compress/gzip" - "encoding/base64" - "encoding/csv" "flag" "fmt" - "io" "io/ioutil" "os" "path/filepath" @@ -17,7 +14,7 @@ import ( const ( CertificateColumn = 3 - ChainColumn = 4 + CertChainColumn = 4 ) func main() { @@ -38,15 +35,20 @@ func main() { // Truncate DB. exitIfError(conn.TruncateAllTables()) + const N = 2 + mapReduce := NewMapReduce(conn) + // Disable indices in DB. - // TODO(juagargi) + exitIfError(conn.DisableIndexing("domainEntries")) // Update certificates and chains. - err = updateCertificatesFromFiles(conn, gzFiles, csvFiles) + err = updateCertificatesFromFiles(mapReduce, N, gzFiles, csvFiles) exitIfError(err) + // <-mapReduce.Done + // Re-enable indices in DB. - // TODO(juagargi) + exitIfError(conn.EnableIndexing("domainEntries")) // Close DB and check errors. err = conn.Close() @@ -76,26 +78,25 @@ func listOurFiles(dir string) (gzFiles, csvFiles []string) { return } -func updateCertificatesFromFiles(conn db.Conn, gzFiles, csvFiles []string) error { - const N = 2 - exitIfError(processCollection(conn, gzFiles, N, func(fileNameCh chan string) error { - return updateFromGzFileName(conn, fileNameCh) +func updateCertificatesFromFiles(mapReduce *MapReduce, N int, gzFiles, csvFiles []string) error { + exitIfError(processCollection(gzFiles, N, func(fileNameCh chan string) error { + return updateFromGzFileName(mapReduce, fileNameCh) })) - exitIfError(processCollection(conn, csvFiles, N, func(fileNameCh chan string) error { - return updateFromFileName(conn, fileNameCh) + exitIfError(processCollection(csvFiles, N, func(fileNameCh chan string) error { + return updateFromFileName(mapReduce, fileNameCh) })) return nil } -func processCollection(conn db.Conn, fileNames []string, N int, +func processCollection(fileNames []string, N int, fcn func(fileNameCh chan string) error) error { // Use a channel to dispatch batches to go routines. fileNameCh := make(chan string) - errorCh := processConcurrently(conn, N, func() error { + errorCh := processConcurrently(N, func() error { return fcn(fileNameCh) }) // Send the GZ file names to the channel: @@ -104,28 +105,28 @@ func processCollection(conn db.Conn, fileNames []string, N int, } close(fileNameCh) - fmt.Println("deleteme 10") + // fmt.Println("deleteme 10") // If there had been any errors, report them and return an error as well. var errorsFound bool - fmt.Println("deleteme 31") + // fmt.Println("deleteme 31") for err := range errorCh { - fmt.Println("deleteme 32") + // fmt.Println("deleteme 32") if err == nil { continue } errorsFound = true fmt.Fprintf(os.Stderr, "%s\n", err) - fmt.Println("deleteme ---------------") + // fmt.Println("deleteme ---------------") } - fmt.Println("deleteme 41") + // fmt.Println("deleteme 41") if errorsFound { return fmt.Errorf("found errors") } return nil } -func processConcurrently(conn db.Conn, N int, fcn func() error) chan error { +func processConcurrently(N int, fcn func() error) chan error { errorCh := make(chan error) go func() { // Use a WaitGroup to wait for all go routines to finish. @@ -134,23 +135,23 @@ func processConcurrently(conn db.Conn, N int, fcn func() error) chan error { wg.Add(N) // TODO(juagargi) remove N and span as many routines as files. for i := 0; i < N; i++ { go func() { - fmt.Println("deleteme 20") + // fmt.Println("deleteme 20") defer wg.Done() errorCh <- fcn() - fmt.Println("deleteme 22") + // fmt.Println("deleteme 22") }() } - fmt.Println("deleteme 19") + // fmt.Println("deleteme 19") wg.Wait() - fmt.Println("deleteme 29") + // fmt.Println("deleteme 29") close(errorCh) - fmt.Println("deleteme 30") + // fmt.Println("deleteme 30") }() return errorCh } -func updateFromGzFileName(conn db.Conn, fileNameCh chan string) error { +func updateFromGzFileName(mr *MapReduce, fileNameCh chan string) error { for filename := range fileNameCh { fmt.Printf("deleteme BEGIN WORK with %s\n", filename) f, err := os.Open(filename) @@ -162,7 +163,7 @@ func updateFromGzFileName(conn db.Conn, fileNameCh chan string) error { return err } - if err := updateFromCSV(conn, gz); err != nil { + if err := mr.IngestWithCSV(gz); err != nil { return err } @@ -177,13 +178,13 @@ func updateFromGzFileName(conn db.Conn, fileNameCh chan string) error { return nil } -func updateFromFileName(conn db.Conn, fileNameCh chan string) error { +func updateFromFileName(mr *MapReduce, fileNameCh chan string) error { for filename := range fileNameCh { f, err := os.Open(filename) if err != nil { return err } - if err := updateFromCSV(conn, f); err != nil { + if err := mr.IngestWithCSV(f); err != nil { return err } if err := f.Close(); err != nil { @@ -193,30 +194,9 @@ func updateFromFileName(conn db.Conn, fileNameCh chan string) error { return nil } -func updateFromCSV(conn db.Conn, fileReader io.Reader) error { - reader := csv.NewReader(fileReader) - reader.FieldsPerRecord = -1 // don't check number of fields - reader.ReuseRecord = true - - var err error - var fields []string - for lineNo := 1; err == nil; lineNo++ { - fields, err = reader.Read() - if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) - continue - } - raw, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) - if err != nil { - return err - } - _ = raw - } - return nil -} - func exitIfError(err error) { if err != nil { - fmt.Fprintf(os.Stderr, "%s", err) + fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } } diff --git a/cmd/ingest/map_reduce.go b/cmd/ingest/map_reduce.go new file mode 100644 index 00000000..e40fd147 --- /dev/null +++ b/cmd/ingest/map_reduce.go @@ -0,0 +1,89 @@ +package main + +import ( + "encoding/base64" + "encoding/csv" + "fmt" + "io" + "strings" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/db" +) + +type MapReduce struct { + BatchSize int + Conn db.Conn + Done chan struct{} + + fromParserCh chan *CertData +} + +type CertData struct { + Cert *ctx509.Certificate + CertChain []*ctx509.Certificate +} + +func NewMapReduce(conn db.Conn) *MapReduce { + mr := &MapReduce{ + BatchSize: 1000, + Conn: conn, + Done: make(chan struct{}), + + fromParserCh: make(chan *CertData), + } + mr.Process() + return mr +} + +func (mr *MapReduce) Process() { + go func() { + for data := range mr.fromParserCh { + cn := data.Cert.Subject.CommonName + // fmt.Printf("CN: %s\n", cn) + _ = cn + } + }() +} + +func (mr *MapReduce) IngestWithCSV(fileReader io.Reader) error { + reader := csv.NewReader(fileReader) + reader.FieldsPerRecord = -1 // don't check number of fields + reader.ReuseRecord = true + + var err error + var fields []string + for lineNo := 1; err == nil; lineNo++ { + fields, err = reader.Read() + if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) + continue + } + rawBytes, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) + if err != nil { + return err + } + cert, err := ctx509.ParseCertificate(rawBytes) + if err != nil { + return err + } + + // The certificate chain is a list of base64 strings separated by semicolon (;). + strs := strings.Split(fields[CertChainColumn], ";") + chain := make([]*ctx509.Certificate, len(strs)) + for i, s := range strs { + rawBytes, err = base64.StdEncoding.DecodeString(s) + if err != nil { + return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) + } + chain[i], err = ctx509.ParseCertificate(rawBytes) + if err != nil { + return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) + } + } + mr.fromParserCh <- &CertData{ + Cert: cert, + CertChain: chain, + } + } + return nil +} diff --git a/pkg/db/db.go b/pkg/db/db.go index 294e5620..c3b52042 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -24,6 +24,12 @@ type Conn interface { // TruncateAllTables resets the DB to an initial state. TruncateAllTables() error + // DisableIndexing stops the indexing in the table. + DisableIndexing(table string) error + + // DisableIndexing starts the indexing in the table. + EnableIndexing(table string) error + // ************************************************************ // Function for Tree table // ************************************************************ diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 8fe18379..a886e8b5 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -140,6 +140,16 @@ func (c *mysqlDB) TruncateAllTables() error { return nil } +func (c *mysqlDB) DisableIndexing(table string) error { + _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` DISABLE KEYS", table)) + return err +} + +func (c *mysqlDB) EnableIndexing(table string) error { + _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` ENABLE KEYS", table)) + return err +} + // repeatStmt returns ( (?,..inner..,?), ...outer... ) func repeatStmt(outer int, inner int) string { components := make([]string, inner) From 8d64850aa6b136743e53467fe4a391b44da2bb13 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 19 Jan 2023 16:29:47 +0100 Subject: [PATCH 003/187] rename MapReduce to processor --- cmd/ingest/main.go | 14 ++-- cmd/ingest/map_reduce.go | 89 ------------------------ cmd/ingest/processor.go | 145 +++------------------------------------ 3 files changed, 18 insertions(+), 230 deletions(-) delete mode 100644 cmd/ingest/map_reduce.go diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 77000847..5199aa35 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -78,13 +78,13 @@ func listOurFiles(dir string) (gzFiles, csvFiles []string) { return } -func updateCertificatesFromFiles(mapReduce *MapReduce, N int, gzFiles, csvFiles []string) error { +func updateCertificatesFromFiles(proc *Processor, N int, gzFiles, csvFiles []string) error { exitIfError(processCollection(gzFiles, N, func(fileNameCh chan string) error { - return updateFromGzFileName(mapReduce, fileNameCh) + return updateFromGzFileName(proc, fileNameCh) })) exitIfError(processCollection(csvFiles, N, func(fileNameCh chan string) error { - return updateFromFileName(mapReduce, fileNameCh) + return updateFromFileName(proc, fileNameCh) })) return nil @@ -151,7 +151,7 @@ func processConcurrently(N int, fcn func() error) chan error { return errorCh } -func updateFromGzFileName(mr *MapReduce, fileNameCh chan string) error { +func updateFromGzFileName(proc *Processor, fileNameCh chan string) error { for filename := range fileNameCh { fmt.Printf("deleteme BEGIN WORK with %s\n", filename) f, err := os.Open(filename) @@ -163,7 +163,7 @@ func updateFromGzFileName(mr *MapReduce, fileNameCh chan string) error { return err } - if err := mr.IngestWithCSV(gz); err != nil { + if err := proc.IngestWithCSV(gz); err != nil { return err } @@ -178,13 +178,13 @@ func updateFromGzFileName(mr *MapReduce, fileNameCh chan string) error { return nil } -func updateFromFileName(mr *MapReduce, fileNameCh chan string) error { +func updateFromFileName(proc *Processor, fileNameCh chan string) error { for filename := range fileNameCh { f, err := os.Open(filename) if err != nil { return err } - if err := mr.IngestWithCSV(f); err != nil { + if err := proc.IngestWithCSV(f); err != nil { return err } if err := f.Close(); err != nil { diff --git a/cmd/ingest/map_reduce.go b/cmd/ingest/map_reduce.go deleted file mode 100644 index e40fd147..00000000 --- a/cmd/ingest/map_reduce.go +++ /dev/null @@ -1,89 +0,0 @@ -package main - -import ( - "encoding/base64" - "encoding/csv" - "fmt" - "io" - "strings" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/db" -) - -type MapReduce struct { - BatchSize int - Conn db.Conn - Done chan struct{} - - fromParserCh chan *CertData -} - -type CertData struct { - Cert *ctx509.Certificate - CertChain []*ctx509.Certificate -} - -func NewMapReduce(conn db.Conn) *MapReduce { - mr := &MapReduce{ - BatchSize: 1000, - Conn: conn, - Done: make(chan struct{}), - - fromParserCh: make(chan *CertData), - } - mr.Process() - return mr -} - -func (mr *MapReduce) Process() { - go func() { - for data := range mr.fromParserCh { - cn := data.Cert.Subject.CommonName - // fmt.Printf("CN: %s\n", cn) - _ = cn - } - }() -} - -func (mr *MapReduce) IngestWithCSV(fileReader io.Reader) error { - reader := csv.NewReader(fileReader) - reader.FieldsPerRecord = -1 // don't check number of fields - reader.ReuseRecord = true - - var err error - var fields []string - for lineNo := 1; err == nil; lineNo++ { - fields, err = reader.Read() - if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) - continue - } - rawBytes, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) - if err != nil { - return err - } - cert, err := ctx509.ParseCertificate(rawBytes) - if err != nil { - return err - } - - // The certificate chain is a list of base64 strings separated by semicolon (;). - strs := strings.Split(fields[CertChainColumn], ";") - chain := make([]*ctx509.Certificate, len(strs)) - for i, s := range strs { - rawBytes, err = base64.StdEncoding.DecodeString(s) - if err != nil { - return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) - } - chain[i], err = ctx509.ParseCertificate(rawBytes) - if err != nil { - return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) - } - } - mr.fromParserCh <- &CertData{ - Cert: cert, - CertChain: chain, - } - } - return nil -} diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 1e8e9032..b8e4e67e 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -5,11 +5,7 @@ import ( "encoding/csv" "fmt" "io" - "os" "strings" - "sync" - "sync/atomic" - "time" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/db" @@ -18,18 +14,9 @@ import ( type Processor struct { BatchSize int Conn db.Conn + Done chan struct{} - incomingFileCh chan File // indicates new file(s) with certificates to be ingested - fromParserCh chan *CertData // parser data to be sent to SMT and DB\ - batchProcessor *BatchProcessor - - root []byte // final root value after finishing ingestion - - // Statistics: - expiredCerts atomic.Uint64 - - errorCh chan error // errors accumulate here - doneCh chan error // the aggregation of all errors. Signals Processor is done + fromParserCh chan *CertData } type CertData struct { @@ -37,116 +24,29 @@ type CertData struct { CertChain []*ctx509.Certificate } -func NewProcessor(conn db.Conn) *Processor { +func NewMapReduce(conn db.Conn) *Processor { p := &Processor{ BatchSize: 1000, Conn: conn, + Done: make(chan struct{}), - incomingFileCh: make(chan File), - fromParserCh: make(chan *CertData), - batchProcessor: NewBatchProcessor(conn), - - errorCh: make(chan error), - doneCh: make(chan error), + fromParserCh: make(chan *CertData), } - p.start() + p.Process() return p } -func (p *Processor) start() { - // Process files and parse the CSV contents: +func (p *Processor) Process() { go func() { - wg := sync.WaitGroup{} - for f := range p.incomingFileCh { - f := f - wg.Add(1) - go func() { - defer wg.Done() - r, err := f.Open() - if err != nil { - p.errorCh <- err - return - } - if err := p.ingestWithCSV(r); err != nil { - p.errorCh <- err - return - } - if err := f.Close(); err != nil { - p.errorCh <- err - return - } - }() - } - wg.Wait() - fmt.Println("deleteme done with incoming files, closing parsed data channel") - // Because we are done writing parsed content, close that channel. - close(p.fromParserCh) - }() - - // Process the parsed content into the DB: - go func() { - batch := NewBatch() for data := range p.fromParserCh { - batch.AddData(data) - if batch.Full() { - p.batchProcessor.Process(batch) - fmt.Print(".") - batch = NewBatch() - } + cn := data.Cert.Subject.CommonName + // fmt.Printf("CN: %s\n", cn) + _ = cn } - // Process last batch, which may have zero size. - p.batchProcessor.Process(batch) - fmt.Println() - p.batchProcessor.Wait() - - fmt.Printf("\ndeleteme done ingesting the certificates. SMT still to go\n\n\n\n") - - // Now start processing the changed domains into the SMT: - smtProcessor := NewSMTUpdater(p.Conn, nil, 32) - smtProcessor.Start() - if err := smtProcessor.Wait(); err != nil { - fmt.Printf("deleteme error found in SMT processing: %s\n", err) - p.errorCh <- err - } - p.root = smtProcessor.smtTrie.Root - - // There is no more processing to do, close the errors channel and allow the - // error processor to finish. - close(p.errorCh) - }() - - go func() { - // Print errors and return error if there was any error printed: - p.doneCh <- p.processErrorChannel() }() } -func (p *Processor) Wait() error { - // Close the parsing and incoming channels: - fmt.Println("deleteme closing incomingFileCh") - close(p.incomingFileCh) - - // Wait until all data has been processed. - fmt.Println("deleteme waiting for done signal") - err := <-p.doneCh - fmt.Printf("Total of skipped certificates because they were expired: %d\n", - p.expiredCerts.Load()) - return err -} - -func (p *Processor) AddGzFiles(fileNames []string) { - for _, filename := range fileNames { - p.incomingFileCh <- (&GzFile{}).WithFile(filename) - } -} - -func (p *Processor) AddCsvFiles(fileNames []string) { - for _, filename := range fileNames { - p.incomingFileCh <- (&CsvFile{}).WithFile(filename) - } -} - -func (p *Processor) ingestWithCSV(fileReader io.Reader) error { +func (p *Processor) IngestWithCSV(fileReader io.Reader) error { reader := csv.NewReader(fileReader) reader.FieldsPerRecord = -1 // don't check number of fields reader.ReuseRecord = true @@ -167,12 +67,6 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return err } - // If the certificate is already expired, skip it altogether. - if time.Now().After(cert.NotAfter) { - p.expiredCerts.Add(1) - continue - } - // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) @@ -193,20 +87,3 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { } return nil } - -func (p *Processor) processErrorChannel() error { - var errorsFound bool - fmt.Println("deleteme processing error channel") - for err := range p.errorCh { - if err == nil { - continue - } - fmt.Println("deleteme errors found") - errorsFound = true - fmt.Fprintf(os.Stderr, "%s\n", err) - } - if errorsFound { - return fmt.Errorf("errors found while processing. See above") - } - return nil -} From 16a3cc3557f0cd36565c876a1c3a2d41cd046992 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 19 Jan 2023 20:31:34 +0100 Subject: [PATCH 004/187] Processor handles data parsing and map-reduce ops. The processor now relies on the goroutines mechanism to find the right balance of number of goroutines against bottlenecks. --- cmd/ingest/csv_files.go | 34 +++-------- cmd/ingest/main.go | 131 ++-------------------------------------- cmd/ingest/processor.go | 109 ++++++++++++++++++++++++++++++--- 3 files changed, 113 insertions(+), 161 deletions(-) diff --git a/cmd/ingest/csv_files.go b/cmd/ingest/csv_files.go index 1c8cea2f..4ff9b18a 100644 --- a/cmd/ingest/csv_files.go +++ b/cmd/ingest/csv_files.go @@ -7,36 +7,16 @@ import ( ) type File interface { - WithFile(string) File - Filename() string Open() (io.Reader, error) Close() error } -type baseFile struct { +type GzFile struct { FileName string reader *os.File -} - -func (f *baseFile) Filename() string { - return f.FileName -} - -func (f *baseFile) Close() error { - return f.reader.Close() -} - -type GzFile struct { - baseFile - gzReader *gzip.Reader } -func (f *GzFile) WithFile(fn string) File { - f.FileName = fn - return f -} - func (f *GzFile) Open() (io.Reader, error) { var err error f.reader, err = os.Open(f.FileName) @@ -58,12 +38,8 @@ func (f *GzFile) Close() error { } type CsvFile struct { - baseFile -} - -func (f *CsvFile) WithFile(fn string) File { - f.FileName = fn - return f + FileName string + reader *os.File } func (f *CsvFile) Open() (io.Reader, error) { @@ -74,3 +50,7 @@ func (f *CsvFile) Open() (io.Reader, error) { } return f.reader, nil } + +func (f *CsvFile) Close() error { + return f.reader.Close() +} diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 5199aa35..77755af9 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -1,13 +1,11 @@ package main import ( - "compress/gzip" "flag" "fmt" "io/ioutil" "os" "path/filepath" - "sync" "github.com/netsec-ethz/fpki/pkg/db" ) @@ -34,22 +32,17 @@ func main() { // Truncate DB. exitIfError(conn.TruncateAllTables()) - - const N = 2 - mapReduce := NewMapReduce(conn) - // Disable indices in DB. exitIfError(conn.DisableIndexing("domainEntries")) // Update certificates and chains. - err = updateCertificatesFromFiles(mapReduce, N, gzFiles, csvFiles) - exitIfError(err) - - // <-mapReduce.Done + proc := NewMapReduce(conn) + proc.AddGzFiles(gzFiles) + proc.AddCsvFiles(csvFiles) + exitIfError(proc.Wait()) // Re-enable indices in DB. exitIfError(conn.EnableIndexing("domainEntries")) - // Close DB and check errors. err = conn.Close() exitIfError(err) @@ -78,122 +71,6 @@ func listOurFiles(dir string) (gzFiles, csvFiles []string) { return } -func updateCertificatesFromFiles(proc *Processor, N int, gzFiles, csvFiles []string) error { - exitIfError(processCollection(gzFiles, N, func(fileNameCh chan string) error { - return updateFromGzFileName(proc, fileNameCh) - })) - - exitIfError(processCollection(csvFiles, N, func(fileNameCh chan string) error { - return updateFromFileName(proc, fileNameCh) - })) - - return nil -} - -func processCollection(fileNames []string, N int, - fcn func(fileNameCh chan string) error) error { - - // Use a channel to dispatch batches to go routines. - fileNameCh := make(chan string) - - errorCh := processConcurrently(N, func() error { - return fcn(fileNameCh) - }) - // Send the GZ file names to the channel: - for _, f := range fileNames { - fileNameCh <- f - } - close(fileNameCh) - - // fmt.Println("deleteme 10") - // If there had been any errors, report them and return an error as well. - var errorsFound bool - // fmt.Println("deleteme 31") - for err := range errorCh { - // fmt.Println("deleteme 32") - if err == nil { - continue - } - errorsFound = true - fmt.Fprintf(os.Stderr, "%s\n", err) - // fmt.Println("deleteme ---------------") - } - - // fmt.Println("deleteme 41") - if errorsFound { - return fmt.Errorf("found errors") - } - return nil -} - -func processConcurrently(N int, fcn func() error) chan error { - errorCh := make(chan error) - go func() { - // Use a WaitGroup to wait for all go routines to finish. - wg := sync.WaitGroup{} - // Span N go routines. - wg.Add(N) // TODO(juagargi) remove N and span as many routines as files. - for i := 0; i < N; i++ { - go func() { - // fmt.Println("deleteme 20") - defer wg.Done() - errorCh <- fcn() - // fmt.Println("deleteme 22") - }() - } - // fmt.Println("deleteme 19") - wg.Wait() - // fmt.Println("deleteme 29") - close(errorCh) - // fmt.Println("deleteme 30") - }() - - return errorCh -} - -func updateFromGzFileName(proc *Processor, fileNameCh chan string) error { - for filename := range fileNameCh { - fmt.Printf("deleteme BEGIN WORK with %s\n", filename) - f, err := os.Open(filename) - if err != nil { - return err - } - gz, err := gzip.NewReader(f) - if err != nil { - return err - } - - if err := proc.IngestWithCSV(gz); err != nil { - return err - } - - if err := gz.Close(); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - fmt.Printf("deleteme END WORK with %s\n", filename) - } - return nil -} - -func updateFromFileName(proc *Processor, fileNameCh chan string) error { - for filename := range fileNameCh { - f, err := os.Open(filename) - if err != nil { - return err - } - if err := proc.IngestWithCSV(f); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - } - return nil -} - func exitIfError(err error) { if err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index b8e4e67e..b9a09e2f 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -5,7 +5,9 @@ import ( "encoding/csv" "fmt" "io" + "os" "strings" + "sync" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/db" @@ -14,9 +16,11 @@ import ( type Processor struct { BatchSize int Conn db.Conn - Done chan struct{} - fromParserCh chan *CertData + incomingFileCh chan File // indicates new file(s) with certificates to be ingested + fromParserCh chan *CertData // parser data to be sent to SMT and DB + errorCh chan error // errors accumulate here + doneCh chan error // the aggregation of all errors. Signals Processor is done } type CertData struct { @@ -28,25 +32,99 @@ func NewMapReduce(conn db.Conn) *Processor { p := &Processor{ BatchSize: 1000, Conn: conn, - Done: make(chan struct{}), - fromParserCh: make(chan *CertData), + doneCh: make(chan error), + incomingFileCh: make(chan File), + fromParserCh: make(chan *CertData), + errorCh: make(chan error), } - p.Process() + p.start() return p } -func (p *Processor) Process() { +func (p *Processor) start() { + // Process files and parse the CSV contents: go func() { + wg := sync.WaitGroup{} + for f := range p.incomingFileCh { + f := f + wg.Add(1) + go func() { + defer wg.Done() + r, err := f.Open() + if err != nil { + p.errorCh <- err + return + } + if err := p.ingestWithCSV(r); err != nil { + p.errorCh <- err + return + } + if err := f.Close(); err != nil { + p.errorCh <- err + return + } + }() + } + wg.Wait() + fmt.Println("deleteme done with incoming files, closing parsed data channel") + // Because we are done writing parsed content, close that channel. + close(p.fromParserCh) + }() + + // Process the parsed content into the DB: + go func() { + i := 0 for data := range p.fromParserCh { cn := data.Cert.Subject.CommonName // fmt.Printf("CN: %s\n", cn) _ = cn + i++ + if i%10000 == 0 { + fmt.Println("deleteme tick!") + } } + // fmt.Println("deleteme signaling done") + // p.doneCh <- struct{}{} + + // There is no more processing to do, close the errors channel and allow the + // error processor to finish. + close(p.errorCh) + }() + + go func() { + // Print errors and return error if there was any error printed: + p.doneCh <- p.processErrorChannel() }() } -func (p *Processor) IngestWithCSV(fileReader io.Reader) error { +func (p *Processor) Wait() error { + // Close the parsing and incoming channels: + fmt.Println("deleteme closing incomingFileCh") + close(p.incomingFileCh) + + // Wait until all data has been processed. + fmt.Println("deleteme waiting for done signal") + return <-p.doneCh +} + +func (p *Processor) AddGzFiles(fileNames []string) { + for _, filename := range fileNames { + p.incomingFileCh <- &GzFile{ + FileName: filename, + } + } +} + +func (p *Processor) AddCsvFiles(fileNames []string) { + for _, filename := range fileNames { + p.incomingFileCh <- &CsvFile{ + FileName: filename, + } + } +} + +func (p *Processor) ingestWithCSV(fileReader io.Reader) error { reader := csv.NewReader(fileReader) reader.FieldsPerRecord = -1 // don't check number of fields reader.ReuseRecord = true @@ -87,3 +165,20 @@ func (p *Processor) IngestWithCSV(fileReader io.Reader) error { } return nil } + +func (p *Processor) processErrorChannel() error { + var errorsFound bool + fmt.Println("deleteme processing error channel") + for err := range p.errorCh { + if err == nil { + continue + } + fmt.Println("deleteme errors found") + errorsFound = true + fmt.Fprintf(os.Stderr, "%s\n", err) + } + if errorsFound { + return fmt.Errorf("errors found while processing. See above") + } + return nil +} From 0716fe542d27defe901c0d2e2dcc4bac2e19e9c3 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 19 Jan 2023 20:32:07 +0100 Subject: [PATCH 005/187] Ignore file containing the root hash data --- cmd/ingest/csv_files.go | 34 +++++++++++++++++++++++++++------- cmd/ingest/processor.go | 21 ++++++++------------- 2 files changed, 35 insertions(+), 20 deletions(-) diff --git a/cmd/ingest/csv_files.go b/cmd/ingest/csv_files.go index 4ff9b18a..1c8cea2f 100644 --- a/cmd/ingest/csv_files.go +++ b/cmd/ingest/csv_files.go @@ -7,16 +7,36 @@ import ( ) type File interface { + WithFile(string) File + Filename() string Open() (io.Reader, error) Close() error } -type GzFile struct { +type baseFile struct { FileName string reader *os.File +} + +func (f *baseFile) Filename() string { + return f.FileName +} + +func (f *baseFile) Close() error { + return f.reader.Close() +} + +type GzFile struct { + baseFile + gzReader *gzip.Reader } +func (f *GzFile) WithFile(fn string) File { + f.FileName = fn + return f +} + func (f *GzFile) Open() (io.Reader, error) { var err error f.reader, err = os.Open(f.FileName) @@ -38,8 +58,12 @@ func (f *GzFile) Close() error { } type CsvFile struct { - FileName string - reader *os.File + baseFile +} + +func (f *CsvFile) WithFile(fn string) File { + f.FileName = fn + return f } func (f *CsvFile) Open() (io.Reader, error) { @@ -50,7 +74,3 @@ func (f *CsvFile) Open() (io.Reader, error) { } return f.reader, nil } - -func (f *CsvFile) Close() error { - return f.reader.Close() -} diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index b9a09e2f..64400415 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -74,18 +74,17 @@ func (p *Processor) start() { // Process the parsed content into the DB: go func() { - i := 0 + count := 0 for data := range p.fromParserCh { cn := data.Cert.Subject.CommonName - // fmt.Printf("CN: %s\n", cn) _ = cn - i++ - if i%10000 == 0 { - fmt.Println("deleteme tick!") + count++ + if count%10000 == 0 { + // fmt.Println("deleteme tick!") + fmt.Print(".") } } - // fmt.Println("deleteme signaling done") - // p.doneCh <- struct{}{} + fmt.Println() // There is no more processing to do, close the errors channel and allow the // error processor to finish. @@ -110,17 +109,13 @@ func (p *Processor) Wait() error { func (p *Processor) AddGzFiles(fileNames []string) { for _, filename := range fileNames { - p.incomingFileCh <- &GzFile{ - FileName: filename, - } + p.incomingFileCh <- (&GzFile{}).WithFile(filename) } } func (p *Processor) AddCsvFiles(fileNames []string) { for _, filename := range fileNames { - p.incomingFileCh <- &CsvFile{ - FileName: filename, - } + p.incomingFileCh <- (&CsvFile{}).WithFile(filename) } } From f20a0e312202252d09775df3ef70f9f2b9b459ee Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 20 Jan 2023 10:22:50 +0100 Subject: [PATCH 006/187] Package cert data into batches. --- cmd/ingest/batch.go | 200 +--------------------------------------- cmd/ingest/processor.go | 46 ++++++--- 2 files changed, 37 insertions(+), 209 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 7d3e8557..931dfe79 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -1,214 +1,22 @@ package main -import ( - "context" - "fmt" - "sync" - "sync/atomic" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - mcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" -) - const BatchSize = 10000 type Batch struct { - Certs []*ctx509.Certificate - Chains [][]*ctx509.Certificate - names []*string // CN and SANs of each certificate + data []*CertData } func NewBatch() *Batch { return &Batch{ - Certs: make([]*ctx509.Certificate, 0, BatchSize), - Chains: make([][]*ctx509.Certificate, 0, BatchSize), - names: make([]*string, 0, BatchSize), + data: make([]*CertData, 0, BatchSize), } } // AddData pushed the cert data into the batch. func (b *Batch) AddData(d *CertData) { - b.Certs = append(b.Certs, d.Cert) - b.Chains = append(b.Chains, d.CertChain) - // Add common name and SANs: - seenNames := make(map[string]struct{}) - b.names = append(b.names, &d.Cert.Subject.CommonName) - seenNames[d.Cert.Subject.CommonName] = struct{}{} - for i, name := range d.Cert.DNSNames { - if _, ok := seenNames[name]; ok { - continue - } - b.names = append(b.names, &d.Cert.DNSNames[i]) - seenNames[name] = struct{}{} - } + b.data = append(b.data, d) } func (b *Batch) Full() bool { - return len(b.Certs) == BatchSize -} - -type BatchProcessor struct { - conn db.Conn - - incomingCh chan *Batch - incomingWg sync.WaitGroup - doneCh chan struct{} - - runningBatches map[string]*Batch - runningBatchesMu sync.Mutex - reschedules atomic.Int64 -} - -func NewBatchProcessor(conn db.Conn) *BatchProcessor { - p := &BatchProcessor{ - conn: conn, - incomingCh: make(chan *Batch), - doneCh: make(chan struct{}), - - runningBatches: make(map[string]*Batch), - } - p.start() - return p -} - -func (p *BatchProcessor) start() { - go func() { - wg := sync.WaitGroup{} - wg.Add(NumDBInserters) - for i := 0; i < NumDBInserters; i++ { - go func() { - defer wg.Done() - for batch := range p.incomingCh { - p.wrapBatch(batch) - } - }() - } - wg.Wait() - p.doneCh <- struct{}{} - }() - -} - -func (p *BatchProcessor) Wait() { - fmt.Println("deleteme waiting 1") - p.incomingWg.Wait() - close(p.incomingCh) - fmt.Println("deleteme waiting 2") - <-p.doneCh - fmt.Println("deleteme waiting 3") - fmt.Printf("# reschedules: %d\n", p.reschedules.Load()) -} - -// Process processes a Batch into the DB. -func (p *BatchProcessor) Process(b *Batch) { - p.incomingWg.Add(1) // one more batch to process - go func() { - p.incomingCh <- b - }() -} - -// wrapBatch protects the processing of a batch. -func (p *BatchProcessor) wrapBatch(batch *Batch) { - if err := p.checkIfBatchClashes(batch); err != nil { - // At least one name in this batch is already being processed at a different batch, - // and we can't use different batches that contain a non nil intersection. - // Just reschedule the batch in the hopes that it will eventually picked up when - // the active batches don't clash with it: - p.reschedules.Add(1) - p.incomingCh <- batch - return - } - - p.addBatchAsActive(batch) - defer p.removeBatchFromActive(batch) - defer p.incomingWg.Done() // one less batch to process - - p.processBatch(batch) - fmt.Println("batch processed") -} - -func (p *BatchProcessor) processBatch(batch *Batch) { - // Compute which domains could be affected: - affectedDomainsMap, domainCertMap, domainCertChainMap := updater.GetAffectedDomainAndCertMap( - batch.Certs, batch.Chains) - if len(affectedDomainsMap) == 0 { - return - } - - // Get all affected entries already present in the DB: - affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsMap)) - for k := range affectedDomainsMap { - affectedDomainHashes = append(affectedDomainHashes, k) - } - domainEntries, err := p.conn.RetrieveDomainEntries(context.Background(), affectedDomainHashes) - if err != nil { - panic(err) - } - - // Obtain a map from SHAs to certificates: - shaToCerts := make(map[common.SHA256Output]*mcommon.DomainEntry) - for _, kv := range domainEntries { - entry, err := mcommon.DeserializeDomainEntry(kv.Value) - if err != nil { - panic(err) - } - shaToCerts[kv.Key] = entry - } - - // Update Domain Entries in DB: - updatedDomains, err := updater.UpdateDomainEntries(shaToCerts, domainCertMap, domainCertChainMap) - if err != nil { - panic(err) - } - shaToCerts, err = updater.GetDomainEntriesToWrite(updatedDomains, shaToCerts) - if err != nil { - panic(err) - } - domainEntries, err = updater.SerializeUpdatedDomainEntries(shaToCerts) - if err != nil { - panic(err) - } - _, err = p.conn.UpdateDomainEntries(context.Background(), domainEntries) - if err != nil { - panic(err) - } - - // Add entries to the `updates` table containing all the modified domains: - if _, err = p.conn.AddUpdatedDomains(context.Background(), affectedDomainHashes); err != nil { - panic(err) - } -} - -func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - if other, ok := p.runningBatches[*n]; ok && other != b { - return fmt.Errorf("same CN in different batches, pointers: %p, %p. CN: %s", - other, b.names, *n) - } - } - return nil -} - -func (p *BatchProcessor) addBatchAsActive(b *Batch) { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - p.runningBatches[*n] = b - } -} - -func (p *BatchProcessor) removeBatchFromActive(b *Batch) { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - delete(p.runningBatches, *n) - } + return len(b.data) == BatchSize } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 64400415..67aead13 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -17,10 +17,13 @@ type Processor struct { BatchSize int Conn db.Conn - incomingFileCh chan File // indicates new file(s) with certificates to be ingested - fromParserCh chan *CertData // parser data to be sent to SMT and DB - errorCh chan error // errors accumulate here - doneCh chan error // the aggregation of all errors. Signals Processor is done + incomingFileCh chan File // indicates new file(s) with certificates to be ingested + fromParserCh chan *CertData // parser data to be sent to SMT and DB\ + batchCh chan *Batch // batches are sent here to be inserted in DB + batchDispatches map[string]*Batch // CN to active batches, to avoid same CN in different ones + + errorCh chan error // errors accumulate here + doneCh chan error // the aggregation of all errors. Signals Processor is done } type CertData struct { @@ -33,10 +36,13 @@ func NewMapReduce(conn db.Conn) *Processor { BatchSize: 1000, Conn: conn, - doneCh: make(chan error), - incomingFileCh: make(chan File), - fromParserCh: make(chan *CertData), - errorCh: make(chan error), + incomingFileCh: make(chan File), + fromParserCh: make(chan *CertData), + batchCh: make(chan *Batch), + batchDispatches: make(map[string]*Batch), + + errorCh: make(chan error), + doneCh: make(chan error), } p.start() return p @@ -74,18 +80,32 @@ func (p *Processor) start() { // Process the parsed content into the DB: go func() { - count := 0 + // count := 0 + batch := NewBatch() for data := range p.fromParserCh { cn := data.Cert.Subject.CommonName - _ = cn - count++ - if count%10000 == 0 { - // fmt.Println("deleteme tick!") + if b, ok := p.batchDispatches[cn]; ok && b != batch { + // Same CN being processed in a different batch + panic("same CN in different batches") + } + batch.AddData(data) + if batch.Full() { + p.batchCh <- batch fmt.Print(".") + batch = NewBatch() } } + // Sent last batch, which may have zero size. + p.batchCh <- batch fmt.Println() + // We have just packaged the data into batches. We can close the batch channel now. + close(p.batchCh) + }() + go func() { + for batch := range p.batchCh { + _ = batch + } // There is no more processing to do, close the errors channel and allow the // error processor to finish. close(p.errorCh) From eaa0e737dd9b75595ec67119e1e61374be77b098 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 20 Jan 2023 12:34:01 +0100 Subject: [PATCH 007/187] Add a BatchProcessor. Check there are no clashes among batches (per common name). --- cmd/ingest/batch.go | 83 +++++++++++++++++++++++++++++++++++++++++ cmd/ingest/main.go | 2 +- cmd/ingest/processor.go | 34 +++++------------ 3 files changed, 94 insertions(+), 25 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 931dfe79..bb917505 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -1,22 +1,105 @@ package main +import ( + "fmt" + "sync" + + "github.com/netsec-ethz/fpki/pkg/db" +) + const BatchSize = 10000 type Batch struct { data []*CertData + cns []*string } func NewBatch() *Batch { return &Batch{ data: make([]*CertData, 0, BatchSize), + cns: make([]*string, 0, BatchSize), } } // AddData pushed the cert data into the batch. func (b *Batch) AddData(d *CertData) { b.data = append(b.data, d) + b.cns = append(b.cns, &d.Cert.Subject.CommonName) } func (b *Batch) Full() bool { return len(b.data) == BatchSize } + +type BatchProcessor struct { + conn db.Conn + incomingCh chan *Batch + // finishedCh chan *Batch + + runningBatches map[string]*Batch + runningBatchesMu sync.Mutex +} + +func NewBatchProcessor(conn db.Conn) *BatchProcessor { + p := &BatchProcessor{ + conn: conn, + incomingCh: make(chan *Batch), + + runningBatches: make(map[string]*Batch), + runningBatchesMu: sync.Mutex{}, + } + p.start() + return p +} + +func (p *BatchProcessor) start() { + go func() { + for batch := range p.incomingCh { + go p.processBatch(batch) + } + }() +} + +// Process processes a Batch into the DB. +func (p *BatchProcessor) Process(b *Batch) { + p.incomingCh <- b +} + +func (p *BatchProcessor) processBatch(b *Batch) { + if err := p.checkIfBatchClashes(b); err != nil { + panic(err) + } + p.addBatchAsActive(b) + // TODO(juagargi) do the actual update + p.removeBatchFromActive(b) +} + +func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { + p.runningBatchesMu.Lock() + defer p.runningBatchesMu.Unlock() + + for _, cn := range b.cns { + if other, ok := p.runningBatches[*cn]; ok && other != b { + return fmt.Errorf("same CN in different batches") + } + } + return nil +} + +func (p *BatchProcessor) addBatchAsActive(b *Batch) { + p.runningBatchesMu.Lock() + defer p.runningBatchesMu.Unlock() + + for _, cn := range b.cns { + p.runningBatches[*cn] = b + } +} + +func (p *BatchProcessor) removeBatchFromActive(b *Batch) { + p.runningBatchesMu.Lock() + defer p.runningBatchesMu.Unlock() + + for _, cn := range b.cns { + delete(p.runningBatches, *cn) + } +} diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 77755af9..b07539c0 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -36,7 +36,7 @@ func main() { exitIfError(conn.DisableIndexing("domainEntries")) // Update certificates and chains. - proc := NewMapReduce(conn) + proc := NewProcessor(conn) proc.AddGzFiles(gzFiles) proc.AddCsvFiles(csvFiles) exitIfError(proc.Wait()) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 67aead13..37901573 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -17,10 +17,9 @@ type Processor struct { BatchSize int Conn db.Conn - incomingFileCh chan File // indicates new file(s) with certificates to be ingested - fromParserCh chan *CertData // parser data to be sent to SMT and DB\ - batchCh chan *Batch // batches are sent here to be inserted in DB - batchDispatches map[string]*Batch // CN to active batches, to avoid same CN in different ones + incomingFileCh chan File // indicates new file(s) with certificates to be ingested + fromParserCh chan *CertData // parser data to be sent to SMT and DB\ + batchProcessor *BatchProcessor errorCh chan error // errors accumulate here doneCh chan error // the aggregation of all errors. Signals Processor is done @@ -31,15 +30,14 @@ type CertData struct { CertChain []*ctx509.Certificate } -func NewMapReduce(conn db.Conn) *Processor { +func NewProcessor(conn db.Conn) *Processor { p := &Processor{ BatchSize: 1000, Conn: conn, - incomingFileCh: make(chan File), - fromParserCh: make(chan *CertData), - batchCh: make(chan *Batch), - batchDispatches: make(map[string]*Batch), + incomingFileCh: make(chan File), + fromParserCh: make(chan *CertData), + batchProcessor: NewBatchProcessor(conn), errorCh: make(chan error), doneCh: make(chan error), @@ -83,29 +81,17 @@ func (p *Processor) start() { // count := 0 batch := NewBatch() for data := range p.fromParserCh { - cn := data.Cert.Subject.CommonName - if b, ok := p.batchDispatches[cn]; ok && b != batch { - // Same CN being processed in a different batch - panic("same CN in different batches") - } batch.AddData(data) if batch.Full() { - p.batchCh <- batch + p.batchProcessor.Process(batch) fmt.Print(".") batch = NewBatch() } } - // Sent last batch, which may have zero size. - p.batchCh <- batch + // Process last batch, which may have zero size. + p.batchProcessor.Process(batch) fmt.Println() - // We have just packaged the data into batches. We can close the batch channel now. - close(p.batchCh) - }() - go func() { - for batch := range p.batchCh { - _ = batch - } // There is no more processing to do, close the errors channel and allow the // error processor to finish. close(p.errorCh) From 277d0038610027a2938f56d0e93f089353ce485c Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 20 Jan 2023 20:19:44 +0100 Subject: [PATCH 008/187] WIP updating DB with certs --- cmd/ingest/batch.go | 83 ++++++++++++++++++++++++++++++++--------- cmd/ingest/processor.go | 2 +- 2 files changed, 66 insertions(+), 19 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index bb917505..596fe9b7 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -3,50 +3,59 @@ package main import ( "fmt" "sync" + "sync/atomic" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) const BatchSize = 10000 type Batch struct { - data []*CertData - cns []*string + Certs []*ctx509.Certificate + Chains [][]*ctx509.Certificate + cns []*string } func NewBatch() *Batch { return &Batch{ - data: make([]*CertData, 0, BatchSize), - cns: make([]*string, 0, BatchSize), + Certs: make([]*ctx509.Certificate, 0, BatchSize), + Chains: make([][]*ctx509.Certificate, 0, BatchSize), + cns: make([]*string, 0, BatchSize), } } // AddData pushed the cert data into the batch. func (b *Batch) AddData(d *CertData) { - b.data = append(b.data, d) + b.Certs = append(b.Certs, d.Cert) + b.Chains = append(b.Chains, d.CertChain) b.cns = append(b.cns, &d.Cert.Subject.CommonName) } func (b *Batch) Full() bool { - return len(b.data) == BatchSize + return len(b.Certs) == BatchSize } type BatchProcessor struct { - conn db.Conn + conn db.Conn + incomingCh chan *Batch - // finishedCh chan *Batch + incomingWg sync.WaitGroup + doneCh chan struct{} runningBatches map[string]*Batch runningBatchesMu sync.Mutex + reschedules atomic.Int64 } func NewBatchProcessor(conn db.Conn) *BatchProcessor { p := &BatchProcessor{ conn: conn, incomingCh: make(chan *Batch), + doneCh: make(chan struct{}), - runningBatches: make(map[string]*Batch), - runningBatchesMu: sync.Mutex{}, + runningBatches: make(map[string]*Batch), } p.start() return p @@ -55,23 +64,60 @@ func NewBatchProcessor(conn db.Conn) *BatchProcessor { func (p *BatchProcessor) start() { go func() { for batch := range p.incomingCh { - go p.processBatch(batch) + go p.wrapBatch(batch) } + p.doneCh <- struct{}{} }() + +} + +func (p *BatchProcessor) Wait() { + fmt.Println("deleteme waiting 1") + p.incomingWg.Wait() + close(p.incomingCh) + fmt.Println("deleteme waiting 2") + <-p.doneCh + fmt.Println("deleteme waiting 3") + fmt.Printf("# reschedules: %d\n", p.reschedules.Load()) } // Process processes a Batch into the DB. func (p *BatchProcessor) Process(b *Batch) { - p.incomingCh <- b + p.incomingWg.Add(1) // one more batch to process + go func() { + p.incomingCh <- b + }() +} + +// wrapBatch protects the processing of a batch. +func (p *BatchProcessor) wrapBatch(batch *Batch) { + if err := p.checkIfBatchClashes(batch); err != nil { + // At least one name in this batch is already being processed at a different batch, + // and we can't use different batches that contain a non nil intersection. + // Just reschedule the batch in the hopes that it will eventually picked up when + // the active batches don't clash with it: + p.reschedules.Add(1) + p.incomingCh <- batch + return + } + + p.addBatchAsActive(batch) + defer p.removeBatchFromActive(batch) + defer p.incomingWg.Done() // one less batch to process + + p.processBatch(batch) } -func (p *BatchProcessor) processBatch(b *Batch) { - if err := p.checkIfBatchClashes(b); err != nil { - panic(err) +func (p *BatchProcessor) processBatch(batch *Batch) { + affectedDomainsMap, domainCertMap, domainCertChainMap := + updater.GetAffectedDomainAndCertMap(batch.Certs, batch.Chains) + if len(affectedDomainsMap) == 0 { + return } - p.addBatchAsActive(b) + _ = affectedDomainsMap + _ = domainCertMap + _ = domainCertChainMap // TODO(juagargi) do the actual update - p.removeBatchFromActive(b) } func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { @@ -80,7 +126,8 @@ func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { for _, cn := range b.cns { if other, ok := p.runningBatches[*cn]; ok && other != b { - return fmt.Errorf("same CN in different batches") + return fmt.Errorf("same CN in different batches, pointers: %p, %p. CN: %s", + other, b.cns, *cn) } } return nil diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 37901573..b73f6653 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -78,7 +78,6 @@ func (p *Processor) start() { // Process the parsed content into the DB: go func() { - // count := 0 batch := NewBatch() for data := range p.fromParserCh { batch.AddData(data) @@ -91,6 +90,7 @@ func (p *Processor) start() { // Process last batch, which may have zero size. p.batchProcessor.Process(batch) fmt.Println() + p.batchProcessor.Wait() // There is no more processing to do, close the errors channel and allow the // error processor to finish. From 3b29c0722160efc0dd4adfe452604cbdb4789fb4 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 20 Jan 2023 21:10:43 +0100 Subject: [PATCH 009/187] Store changes in DB. Partially use the calls in updater and map updater to serialize the set of certificates and store it in the DB. --- cmd/ingest/batch.go | 50 +++++++++++++++++++++++++++++++++++++++------ 1 file changed, 44 insertions(+), 6 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 596fe9b7..fb8573a3 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -1,12 +1,15 @@ package main import ( + "context" "fmt" "sync" "sync/atomic" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + mcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) @@ -109,15 +112,50 @@ func (p *BatchProcessor) wrapBatch(batch *Batch) { } func (p *BatchProcessor) processBatch(batch *Batch) { - affectedDomainsMap, domainCertMap, domainCertChainMap := - updater.GetAffectedDomainAndCertMap(batch.Certs, batch.Chains) + // Compute which domains could be affected: + affectedDomainsMap, domainCertMap, domainCertChainMap := updater.GetAffectedDomainAndCertMap( + batch.Certs, batch.Chains) if len(affectedDomainsMap) == 0 { return } - _ = affectedDomainsMap - _ = domainCertMap - _ = domainCertChainMap - // TODO(juagargi) do the actual update + + // Get all affected entries already present in the DB: + affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsMap)) + for k := range affectedDomainsMap { + affectedDomainHashes = append(affectedDomainHashes, k) + } + domainEntries, err := p.conn.RetrieveDomainEntries(context.Background(), affectedDomainHashes) + if err != nil { + panic(err) + } + + // Obtain a map from SHAs to certificates: + shaToCerts := make(map[common.SHA256Output]*mcommon.DomainEntry) + for _, kv := range domainEntries { + entry, err := mcommon.DeserializeDomainEntry(kv.Value) + if err != nil { + panic(err) + } + shaToCerts[kv.Key] = entry + } + + // Update Domain Entries in DB: + updatedDomains, err := updater.UpdateDomainEntries(shaToCerts, domainCertMap, domainCertChainMap) + if err != nil { + panic(err) + } + shaToCerts, err = updater.GetDomainEntriesToWrite(updatedDomains, shaToCerts) + if err != nil { + panic(err) + } + domainEntries, err = updater.SerializeUpdatedDomainEntries(shaToCerts) + if err != nil { + panic(err) + } + _, err = p.conn.UpdateDomainEntries(context.Background(), domainEntries) + if err != nil { + panic(err) + } } func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { From 607bc534ce8ecd182dbce6b756dd802d6e7d9c35 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 23 Jan 2023 13:26:38 +0100 Subject: [PATCH 010/187] Preparing to do SMT updates. --- cmd/ingest/batch.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index fb8573a3..c8cb78a0 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -109,6 +109,7 @@ func (p *BatchProcessor) wrapBatch(batch *Batch) { defer p.incomingWg.Done() // one less batch to process p.processBatch(batch) + fmt.Println("batch processed") } func (p *BatchProcessor) processBatch(batch *Batch) { @@ -156,6 +157,14 @@ func (p *BatchProcessor) processBatch(batch *Batch) { if err != nil { panic(err) } + + inputKeys, inputValues, err := updater.KeyValuePairToSMTInput(domainEntries) + if err != nil { + panic(err) + } + // TODO(juagargi) update SMT with the above + _ = inputKeys + _ = inputValues } func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { From d0b4e2c4e19e86bb1e1690d334a91004e8419783 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 24 Jan 2023 06:25:07 +0100 Subject: [PATCH 011/187] Add modified domains to updates table. --- cmd/ingest/batch.go | 12 ++++-------- cmd/ingest/main.go | 2 ++ 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index c8cb78a0..319e0f0c 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -145,6 +145,10 @@ func (p *BatchProcessor) processBatch(batch *Batch) { if err != nil { panic(err) } + // Add entries to the `updates` table containing all the modified domains: + if _, err = p.conn.AddUpdatedDomains(context.Background(), affectedDomainHashes); err != nil { + panic(err) + } shaToCerts, err = updater.GetDomainEntriesToWrite(updatedDomains, shaToCerts) if err != nil { panic(err) @@ -157,14 +161,6 @@ func (p *BatchProcessor) processBatch(batch *Batch) { if err != nil { panic(err) } - - inputKeys, inputValues, err := updater.KeyValuePairToSMTInput(domainEntries) - if err != nil { - panic(err) - } - // TODO(juagargi) update SMT with the above - _ = inputKeys - _ = inputValues } func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index b07539c0..1a4c4b5b 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -34,6 +34,7 @@ func main() { exitIfError(conn.TruncateAllTables()) // Disable indices in DB. exitIfError(conn.DisableIndexing("domainEntries")) + exitIfError(conn.DisableIndexing("updates")) // Update certificates and chains. proc := NewProcessor(conn) @@ -42,6 +43,7 @@ func main() { exitIfError(proc.Wait()) // Re-enable indices in DB. + exitIfError(conn.EnableIndexing("updates")) exitIfError(conn.EnableIndexing("domainEntries")) // Close DB and check errors. err = conn.Close() From ee2adc19959a7731cf6467ed9212967581d60576 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 24 Jan 2023 17:34:35 +0100 Subject: [PATCH 012/187] Update SMT. The pipeline is not efficient, as we read certificates, we write them, we read them again, update SMT, and write SMT. Also the SMT updater can't update the records in parallel. --- cmd/ingest/batch.go | 9 +++++---- cmd/ingest/processor.go | 10 ++++++++++ pkg/db/write.go | 12 ++++++++---- 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 319e0f0c..15e07847 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -145,10 +145,6 @@ func (p *BatchProcessor) processBatch(batch *Batch) { if err != nil { panic(err) } - // Add entries to the `updates` table containing all the modified domains: - if _, err = p.conn.AddUpdatedDomains(context.Background(), affectedDomainHashes); err != nil { - panic(err) - } shaToCerts, err = updater.GetDomainEntriesToWrite(updatedDomains, shaToCerts) if err != nil { panic(err) @@ -161,6 +157,11 @@ func (p *BatchProcessor) processBatch(batch *Batch) { if err != nil { panic(err) } + + // Add entries to the `updates` table containing all the modified domains: + if _, err = p.conn.AddUpdatedDomains(context.Background(), affectedDomainHashes); err != nil { + panic(err) + } } func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index b73f6653..70de5d24 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -92,6 +92,16 @@ func (p *Processor) start() { fmt.Println() p.batchProcessor.Wait() + fmt.Printf("\ndeleteme done ingesting the certificates. SMT still to go\n\n\n\n") + + // Now start processing the changed domains into the SMT: + smtProcessor := NewSMTUpdater(p.Conn, nil, 32) + smtProcessor.Start() + if err := smtProcessor.Wait(); err != nil { + fmt.Printf("deleteme error found in SMT processing: %s\n", err) + p.errorCh <- err + } + // There is no more processing to do, close the errors channel and allow the // error processor to finish. close(p.errorCh) diff --git a/pkg/db/write.go b/pkg/db/write.go index 932fd9ea..48059439 100644 --- a/pkg/db/write.go +++ b/pkg/db/write.go @@ -62,10 +62,14 @@ func (c *mysqlDB) RemoveAllUpdatedDomains(ctx context.Context) error { return nil } -type HugeLeafError struct { - ID *common.SHA256Output - Index int -} +// ******************************************************************** +// +// Common +// +// ******************************************************************** +// worker to update key-value pairs +func (c *mysqlDB) doUpdatePairs(ctx context.Context, keyValuePairs []*KeyValuePair, + stmtGetter prepStmtGetter) (int, error) { func (HugeLeafError) Error() string { return "Huge Leaf" From c680ee1b6dff940100474ae3f0ecc3fa16ec8aa0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 25 Jan 2023 10:52:27 +0100 Subject: [PATCH 013/187] Check clashes between batches also using SANs. --- cmd/ingest/batch.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 15e07847..4ff17861 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -18,14 +18,14 @@ const BatchSize = 10000 type Batch struct { Certs []*ctx509.Certificate Chains [][]*ctx509.Certificate - cns []*string + names []*string // CN and SANs of each certificate } func NewBatch() *Batch { return &Batch{ Certs: make([]*ctx509.Certificate, 0, BatchSize), Chains: make([][]*ctx509.Certificate, 0, BatchSize), - cns: make([]*string, 0, BatchSize), + names: make([]*string, 0, BatchSize), } } @@ -33,7 +33,17 @@ func NewBatch() *Batch { func (b *Batch) AddData(d *CertData) { b.Certs = append(b.Certs, d.Cert) b.Chains = append(b.Chains, d.CertChain) - b.cns = append(b.cns, &d.Cert.Subject.CommonName) + // Add common name and SANs: + seenNames := make(map[string]struct{}) + b.names = append(b.names, &d.Cert.Subject.CommonName) + seenNames[d.Cert.Subject.CommonName] = struct{}{} + for i, name := range d.Cert.DNSNames { + if _, ok := seenNames[name]; ok { + continue + } + b.names = append(b.names, &d.Cert.DNSNames[i]) + seenNames[name] = struct{}{} + } } func (b *Batch) Full() bool { @@ -168,10 +178,10 @@ func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { p.runningBatchesMu.Lock() defer p.runningBatchesMu.Unlock() - for _, cn := range b.cns { - if other, ok := p.runningBatches[*cn]; ok && other != b { + for _, n := range b.names { + if other, ok := p.runningBatches[*n]; ok && other != b { return fmt.Errorf("same CN in different batches, pointers: %p, %p. CN: %s", - other, b.cns, *cn) + other, b.names, *n) } } return nil @@ -181,8 +191,8 @@ func (p *BatchProcessor) addBatchAsActive(b *Batch) { p.runningBatchesMu.Lock() defer p.runningBatchesMu.Unlock() - for _, cn := range b.cns { - p.runningBatches[*cn] = b + for _, n := range b.names { + p.runningBatches[*n] = b } } @@ -190,7 +200,7 @@ func (p *BatchProcessor) removeBatchFromActive(b *Batch) { p.runningBatchesMu.Lock() defer p.runningBatchesMu.Unlock() - for _, cn := range b.cns { - delete(p.runningBatches, *cn) + for _, n := range b.names { + delete(p.runningBatches, *n) } } From 437d405a1f9ceb492255995f037f3724388ddda4 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 27 Jan 2023 16:32:08 +0100 Subject: [PATCH 014/187] New DB structure. Describe the new DB structure in the README. Change the creation script. --- pkg/db/README.md | 67 ++++++++++++++++- tools/create_schema.sh | 167 +++++++++++++++++++++-------------------- 2 files changed, 153 insertions(+), 81 deletions(-) diff --git a/pkg/db/README.md b/pkg/db/README.md index 21eaf4de..80d43ea8 100644 --- a/pkg/db/README.md +++ b/pkg/db/README.md @@ -1,2 +1,67 @@ + +# Legacy, notes from Yongzhe For functions which retrieves one key-value pair, sql.ErrNoRows will be thrown if no rows is founded. -For functions which retrieves a list of key-value pairs, sql.ErrNoRows will be omitted. So please check the length of the output to decide if the fetching is incomplete. \ No newline at end of file +For functions which retrieves a list of key-value pairs, sql.ErrNoRows will be omitted. So please check the length of the output to decide if the fetching is incomplete. + +# New notes + +# Design + +The DB supports the map server in two aspects: +1. Stores the certificates and their trust chains +2. Stores the Sparse Merkle Tree structure on disk + +We need very efficient requests based on the domain name. +The update process has to simply retrieve the new certificates and their trust chains, +add them to the DB, write down those updated domains, and process the SMT for those +domains only. + + +## Tables +For performance reasons, no foreign keys exist in any table. + +- `certs` table + 1. `id`: PK, this is the SHA256 of the certificate. + 2. `payload`: BLOB, this is the certificate, serialized. + 3. `parent`: this is the parent certificate, in the trust chain, or `NULL` if root. +- `domains` table. This table is updated in DB from the `certs` table + 1. `cert_id`: PK, SHA256 of the certificate + 2. `domain_id`: PK, SHA256 of the domain + 3. `domain`: index, text, the name of the domain + 4. `payload_id`: BIGINT, points to the serialized certificate collection, + according to the rules. +- `domain_payloads` table. Holds the collection of certificates for each domain. + This comes from all the certificates that have their `certs.domain` equal + to this `domains.domain`, serialized following certain rules. + 1. `id`: BIGINT + 2. `payload`: BLOB + 4. `payload_hash`: SHA256 of the serialized certificate collection for the domain. +- `dirty` table + 1. `domain_id`: PK, SHA256 of each of the modified domains. + +SMT tables: +- `tree` table, remains the same as before + 1. `id`: PK, auto increment. + 2. `key32`: index, whatever the SMT library uses as key, 32 bytes. + 3. `value`: whatever the SMT library uses as value. +- `root` table. Should contain zero or one elements. + 1. `key32`: PK, 32 bytes, SHA256 of the root of the SMT. + +The `dirty` table should always be non-empty when the SMT update process starts, +as it contains the domains that have been altered, and those that will be +sent to the SMT to update. + + + +## Update Process +We describe the update process with the following steps: +1. Obtain the data. +2. Create (`upsert` or similar) a new record per new certificate C and domain D. +3. Write the modified domains into a table `dirty` (formerly known as the `updates` table). +4. In DB and via a stored procedure, +serialize the certificate collection (following certain rules) and write it, plus its SHA256, +to the table. +5. Wait until all batches have finished. +6. Update the SMT with the material from (4), and using the domains in `dirty`. +7. Store the `tree` table in DB. +8. Truncate the `dirty` table. diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 9ecbd667..257c7587 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -18,53 +18,49 @@ MYSQLCMD="mysql -u root" CMD=$(cat < Date: Fri, 27 Jan 2023 16:37:39 +0100 Subject: [PATCH 015/187] WIP Insert certificates each in their own record. Disabled old update functionality in pkg/db. Missing: - Check if they exist already. - Update domains, domain_payloads, and dirty tables. --- cmd/ingest/batch.go | 163 +++++---------- cmd/ingest/main.go | 6 - cmd/ingest/processor.go | 21 +- pkg/db/db.go | 4 + pkg/db/mysql.go | 196 ++++++++++++------- pkg/mapserver/internal/mockdb_for_testing.go | 14 ++ pkg/mapserver/updater/certs_updater.go | 66 +++++-- pkg/mapserver/updater/certs_updater_test.go | 74 +++++-- pkg/mapserver/updater/dbutil.go | 6 +- pkg/mapserver/updater/tools.go | 4 +- pkg/mapserver/updater/tools_test.go | 2 +- pkg/mapserver/updater/updater.go | 35 +++- pkg/mapserver/updater/updater_test.go | 2 +- 13 files changed, 348 insertions(+), 245 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 4ff17861..93f73c9c 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -4,46 +4,30 @@ import ( "context" "fmt" "sync" - "sync/atomic" ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" - mcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) -const BatchSize = 10000 +const BatchSize = 1000 type Batch struct { Certs []*ctx509.Certificate Chains [][]*ctx509.Certificate - names []*string // CN and SANs of each certificate } func NewBatch() *Batch { return &Batch{ Certs: make([]*ctx509.Certificate, 0, BatchSize), Chains: make([][]*ctx509.Certificate, 0, BatchSize), - names: make([]*string, 0, BatchSize), } } -// AddData pushed the cert data into the batch. -func (b *Batch) AddData(d *CertData) { +// AddCert pushed the cert data into the batch. +func (b *Batch) AddCert(d *CertData) { b.Certs = append(b.Certs, d.Cert) b.Chains = append(b.Chains, d.CertChain) - // Add common name and SANs: - seenNames := make(map[string]struct{}) - b.names = append(b.names, &d.Cert.Subject.CommonName) - seenNames[d.Cert.Subject.CommonName] = struct{}{} - for i, name := range d.Cert.DNSNames { - if _, ok := seenNames[name]; ok { - continue - } - b.names = append(b.names, &d.Cert.DNSNames[i]) - seenNames[name] = struct{}{} - } } func (b *Batch) Full() bool { @@ -56,10 +40,6 @@ type BatchProcessor struct { incomingCh chan *Batch incomingWg sync.WaitGroup doneCh chan struct{} - - runningBatches map[string]*Batch - runningBatchesMu sync.Mutex - reschedules atomic.Int64 } func NewBatchProcessor(conn db.Conn) *BatchProcessor { @@ -67,21 +47,58 @@ func NewBatchProcessor(conn db.Conn) *BatchProcessor { conn: conn, incomingCh: make(chan *Batch), doneCh: make(chan struct{}), - - runningBatches: make(map[string]*Batch), } p.start() return p } func (p *BatchProcessor) start() { + db := p.conn.DB() + _ = db + ini := func() { + // _, err := db.Exec("LOCK TABLES certs WRITE;") + // if err != nil { + // panic(err) + // } + // if _, err := db.Exec("SET autocommit=0"); err != nil { + // panic(err) + // } + // if _, err := db.Exec("ALTER TABLE certs DISABLE KEYS"); err != nil { + // panic(err) + // } + if _, err := db.Exec("ALTER TABLE certs DROP INDEX id"); err != nil { + panic(err) + } + } + end := func() { + + fmt.Println("deleteme before enabling keys") + if _, err := db.Exec("ALTER TABLE certs ADD UNIQUE INDEX id (id ASC)"); err != nil { + panic(err) + } + // if _, err := db.Exec("ALTER TABLE certs ENABLE KEYS"); err != nil { + // panic(err) + // } + fmt.Println("deleteme keys enabled.") + + fmt.Println("deleteme about to commit all changes") + // if _, err := db.Exec("COMMIT"); err != nil { + // panic(err) + // } + fmt.Println("deleteme commit succeeded.") + // if _, err := db.Exec("UNLOCK TABLES"); err != nil { + // panic(err) + // } + } + + ini() go func() { for batch := range p.incomingCh { go p.wrapBatch(batch) } + end() p.doneCh <- struct{}{} }() - } func (p *BatchProcessor) Wait() { @@ -91,7 +108,6 @@ func (p *BatchProcessor) Wait() { fmt.Println("deleteme waiting 2") <-p.doneCh fmt.Println("deleteme waiting 3") - fmt.Printf("# reschedules: %d\n", p.reschedules.Load()) } // Process processes a Batch into the DB. @@ -104,103 +120,16 @@ func (p *BatchProcessor) Process(b *Batch) { // wrapBatch protects the processing of a batch. func (p *BatchProcessor) wrapBatch(batch *Batch) { - if err := p.checkIfBatchClashes(batch); err != nil { - // At least one name in this batch is already being processed at a different batch, - // and we can't use different batches that contain a non nil intersection. - // Just reschedule the batch in the hopes that it will eventually picked up when - // the active batches don't clash with it: - p.reschedules.Add(1) - p.incomingCh <- batch - return - } - - p.addBatchAsActive(batch) - defer p.removeBatchFromActive(batch) defer p.incomingWg.Done() // one less batch to process - p.processBatch(batch) fmt.Println("batch processed") } func (p *BatchProcessor) processBatch(batch *Batch) { - // Compute which domains could be affected: - affectedDomainsMap, domainCertMap, domainCertChainMap := updater.GetAffectedDomainAndCertMap( - batch.Certs, batch.Chains) - if len(affectedDomainsMap) == 0 { - return - } - - // Get all affected entries already present in the DB: - affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsMap)) - for k := range affectedDomainsMap { - affectedDomainHashes = append(affectedDomainHashes, k) - } - domainEntries, err := p.conn.RetrieveDomainEntries(context.Background(), affectedDomainHashes) - if err != nil { - panic(err) - } - - // Obtain a map from SHAs to certificates: - shaToCerts := make(map[common.SHA256Output]*mcommon.DomainEntry) - for _, kv := range domainEntries { - entry, err := mcommon.DeserializeDomainEntry(kv.Value) - if err != nil { - panic(err) - } - shaToCerts[kv.Key] = entry - } - - // Update Domain Entries in DB: - updatedDomains, err := updater.UpdateDomainEntries(shaToCerts, domainCertMap, domainCertChainMap) + // Store certificates in DB: + err := updater.UpdateCerts(context.Background(), p.conn, batch.Certs, batch.Chains) if err != nil { panic(err) } - shaToCerts, err = updater.GetDomainEntriesToWrite(updatedDomains, shaToCerts) - if err != nil { - panic(err) - } - domainEntries, err = updater.SerializeUpdatedDomainEntries(shaToCerts) - if err != nil { - panic(err) - } - _, err = p.conn.UpdateDomainEntries(context.Background(), domainEntries) - if err != nil { - panic(err) - } - - // Add entries to the `updates` table containing all the modified domains: - if _, err = p.conn.AddUpdatedDomains(context.Background(), affectedDomainHashes); err != nil { - panic(err) - } -} - -func (p *BatchProcessor) checkIfBatchClashes(b *Batch) error { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - if other, ok := p.runningBatches[*n]; ok && other != b { - return fmt.Errorf("same CN in different batches, pointers: %p, %p. CN: %s", - other, b.names, *n) - } - } - return nil -} - -func (p *BatchProcessor) addBatchAsActive(b *Batch) { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - p.runningBatches[*n] = b - } -} - -func (p *BatchProcessor) removeBatchFromActive(b *Batch) { - p.runningBatchesMu.Lock() - defer p.runningBatchesMu.Unlock() - - for _, n := range b.names { - delete(p.runningBatches, *n) - } + // TODO(juagargi) push entries to the dirty table } diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 1a4c4b5b..857c1ce6 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -32,9 +32,6 @@ func main() { // Truncate DB. exitIfError(conn.TruncateAllTables()) - // Disable indices in DB. - exitIfError(conn.DisableIndexing("domainEntries")) - exitIfError(conn.DisableIndexing("updates")) // Update certificates and chains. proc := NewProcessor(conn) @@ -42,9 +39,6 @@ func main() { proc.AddCsvFiles(csvFiles) exitIfError(proc.Wait()) - // Re-enable indices in DB. - exitIfError(conn.EnableIndexing("updates")) - exitIfError(conn.EnableIndexing("domainEntries")) // Close DB and check errors. err = conn.Close() exitIfError(err) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 70de5d24..70a87076 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -11,6 +11,7 @@ import ( ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) type Processor struct { @@ -18,7 +19,7 @@ type Processor struct { Conn db.Conn incomingFileCh chan File // indicates new file(s) with certificates to be ingested - fromParserCh chan *CertData // parser data to be sent to SMT and DB\ + fromParserCh chan *CertData // parser data to be sent to SMT and DB batchProcessor *BatchProcessor errorCh chan error // errors accumulate here @@ -26,8 +27,9 @@ type Processor struct { } type CertData struct { - Cert *ctx509.Certificate - CertChain []*ctx509.Certificate + DomainNames []string + Cert *ctx509.Certificate + CertChain []*ctx509.Certificate } func NewProcessor(conn db.Conn) *Processor { @@ -80,7 +82,7 @@ func (p *Processor) start() { go func() { batch := NewBatch() for data := range p.fromParserCh { - batch.AddData(data) + batch.AddCert(data) if batch.Full() { p.batchProcessor.Process(batch) fmt.Print(".") @@ -94,6 +96,10 @@ func (p *Processor) start() { fmt.Printf("\ndeleteme done ingesting the certificates. SMT still to go\n\n\n\n") + if 4%5 != 0 { // deleteme + close(p.errorCh) + return + } // Now start processing the changed domains into the SMT: smtProcessor := NewSMTUpdater(p.Conn, nil, 32) smtProcessor.Start() @@ -156,6 +162,8 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return err } + domainNames := updater.ExtractCertDomains(cert) + // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) @@ -170,8 +178,9 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { } } p.fromParserCh <- &CertData{ - Cert: cert, - CertChain: chain, + DomainNames: domainNames, + Cert: cert, + CertChain: chain, } } return nil diff --git a/pkg/db/db.go b/pkg/db/db.go index c3b52042..b9779fb3 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -17,6 +17,7 @@ type KeyValuePair struct { // Conn: interface for db connection type Conn interface { + // TODO(juagargi) remove the temporary access to the sql.DB object DB() *sql.DB // Close closes the connection. Close() error @@ -30,6 +31,9 @@ type Conn interface { // DisableIndexing starts the indexing in the table. EnableIndexing(table string) error + InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, + parents []common.SHA256Output) error + // ************************************************************ // Function for Tree table // ************************************************************ diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index a886e8b5..d86ec20b 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -1,11 +1,14 @@ package db import ( + "context" "database/sql" "fmt" "strings" + "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql" + "github.com/netsec-ethz/fpki/pkg/common" ) // NOTE @@ -37,80 +40,80 @@ type mysqlDB struct { // NewMysqlDB is called to create a new instance of the mysqlDB, initializing certain values, // like stored procedures. func NewMysqlDB(db *sql.DB) (*mysqlDB, error) { - prepGetValueDomainEntries, err := db.Prepare("SELECT `value` from `domainEntries` WHERE `key`=?") - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueDomainEntries: %w", err) - } - prepGetValueTree, err := db.Prepare("SELECT `value` from `tree` WHERE `key`=?") - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueTree: %w", err) - } - prepGetUpdatedDomains, err := db.Prepare("SELECT `key` FROM `updates`") - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetUpdatedDomains: %w", err) - } - - str := "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(batchSize, 2) - prepReplaceDomainEntries, err := db.Prepare(str) - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceDomainEntries: %w", err) - } - str = "REPLACE into tree (`key`, `value`) values " + repeatStmt(batchSize, 2) - prepReplaceTree, err := db.Prepare(str) - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceTree: %w", err) - } - str = "REPLACE into `updates` (`key`) VALUES " + repeatStmt(batchSize, 1) - prepReplaceUpdates, err := db.Prepare(str) - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceUpdates: %w", err) - } - str = "DELETE from `tree` WHERE `key` IN " + repeatStmt(1, batchSize) - prepDeleteUpdates, err := db.Prepare(str) - if err != nil { - return nil, fmt.Errorf("NewMysqlDB | preparing statement prepDeleteUpdates: %w", err) - } + // prepGetValueDomainEntries, err := db.Prepare("SELECT `value` from `domainEntries` WHERE `key`=?") + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueDomainEntries: %w", err) + // } + // prepGetValueTree, err := db.Prepare("SELECT `value` from `tree` WHERE `key32`=?") + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueTree: %w", err) + // } + // prepGetUpdatedDomains, err := db.Prepare("SELECT `key` FROM `updates`") + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetUpdatedDomains: %w", err) + // } + + // str := "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(batchSize, 2) + // prepReplaceDomainEntries, err := db.Prepare(str) + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceDomainEntries: %w", err) + // } + // str = "REPLACE into tree (`key32`, `value`) values " + repeatStmt(batchSize, 2) + // prepReplaceTree, err := db.Prepare(str) + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceTree: %w", err) + // } + // str = "REPLACE into `updates` (`key`) VALUES " + repeatStmt(batchSize, 1) + // prepReplaceUpdates, err := db.Prepare(str) + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceUpdates: %w", err) + // } + // str = "DELETE from `tree` WHERE `key32` IN " + repeatStmt(1, batchSize) + // prepDeleteUpdates, err := db.Prepare(str) + // if err != nil { + // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepDeleteUpdates: %w", err) + // } return &mysqlDB{ - db: db, - prepGetValueDomainEntries: prepGetValueDomainEntries, - prepGetValueTree: prepGetValueTree, - prepGetUpdatedDomains: prepGetUpdatedDomains, - getDomainEntriesUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - str = "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(count, 2) - prepPartial, err := db.Prepare(str) - if err != nil { - panic(err) - } - return prepReplaceDomainEntries, prepPartial - }, - getTreeStructureUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - str := "REPLACE into tree (`key`, `value`) values " + repeatStmt(count, 2) - prepPartial, err := db.Prepare(str) - if err != nil { - panic(err) - } - return prepReplaceTree, prepPartial - }, - getUpdatesInsertStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - str := "REPLACE into `updates` (`key`) VALUES " + repeatStmt(count, 1) - prepPartial, err := db.Prepare(str) - if err != nil { - panic(err) - } - return prepReplaceUpdates, prepPartial - }, - getTreeDeleteStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - if count == 0 { - return prepDeleteUpdates, nil - } - str := "DELETE from `tree` WHERE `key` IN " + repeatStmt(1, count) - prepPartial, err := db.Prepare(str) - if err != nil { - panic(err) - } - return prepDeleteUpdates, prepPartial - }, + db: db, + // prepGetValueDomainEntries: prepGetValueDomainEntries, + // prepGetValueTree: prepGetValueTree, + // prepGetUpdatedDomains: prepGetUpdatedDomains, + // getDomainEntriesUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { + // str = "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(count, 2) + // prepPartial, err := db.Prepare(str) + // if err != nil { + // panic(err) + // } + // return prepReplaceDomainEntries, prepPartial + // }, + // getTreeStructureUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { + // str := "REPLACE into tree (`key`, `value`) values " + repeatStmt(count, 2) + // prepPartial, err := db.Prepare(str) + // if err != nil { + // panic(err) + // } + // return prepReplaceTree, prepPartial + // }, + // getUpdatesInsertStmts: func(count int) (*sql.Stmt, *sql.Stmt) { + // str := "REPLACE into `updates` (`key`) VALUES " + repeatStmt(count, 1) + // prepPartial, err := db.Prepare(str) + // if err != nil { + // panic(err) + // } + // return prepReplaceUpdates, prepPartial + // }, + // getTreeDeleteStmts: func(count int) (*sql.Stmt, *sql.Stmt) { + // if count == 0 { + // return prepDeleteUpdates, nil + // } + // str := "DELETE from `tree` WHERE `key` IN " + repeatStmt(1, count) + // prepPartial, err := db.Prepare(str) + // if err != nil { + // panic(err) + // } + // return prepDeleteUpdates, prepPartial + // }, getProofLimiter: make(chan struct{}, 128), }, nil } @@ -121,8 +124,8 @@ func (c *mysqlDB) DB() *sql.DB { // Close: close connection func (c *mysqlDB) Close() error { - c.prepGetValueTree.Close() - c.prepGetValueDomainEntries.Close() + // c.prepGetValueTree.Close() + // c.prepGetValueDomainEntries.Close() return c.db.Close() } @@ -150,6 +153,51 @@ func (c *mysqlDB) EnableIndexing(table string) error { return err } +func (c *mysqlDB) InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, + parents []common.SHA256Output) error { + + for tryNumber := 0; tryNumber < 2; tryNumber++ { + // TODO(juagargi) set a prepared statement in constructor + str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) + insertCerts, err := c.db.Prepare(str) + if err != nil { + return err + } + + data := make([]interface{}, 3*len(ids)) + for i := range ids { + data[i*3] = ids[i][:] + data[i*3+1] = payloads[1] + data[i*3+2] = parents[i][:] + } + + res, err := insertCerts.ExecContext(ctx, data...) + if err != nil { + if myErr, ok := err.(*mysql.MySQLError); ok { + if myErr.Number == 1213 { + // TODO(juagargi) find out why so many deadlocks occur and fix the situation + fmt.Println("deleteme deadlock") + + // XXX(juagargi) retrying seems to be around 50% more expensive than if + // we had no deadlock. + // break + continue + } + } + fmt.Printf("type %T\n", err) + panic(err) + return err + } + n, err := res.RowsAffected() + if err != nil { + return err + } + fmt.Printf("inserted %d certificates\n", n) + break + } + return nil +} + // repeatStmt returns ( (?,..inner..,?), ...outer... ) func repeatStmt(outer int, inner int) string { components := make([]string, inner) diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index 7627d05c..adb773bc 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -30,6 +30,18 @@ func NewMockDB() *MockDB { // Close closes the connection. func (d *MockDB) Close() error { return nil } +func (d *MockDB) TruncateAllTables() error { return nil } + +func (d *MockDB) DisableIndexing(table string) error { return nil } + +func (d *MockDB) EnableIndexing(table string) error { return nil } + +func (d *MockDB) InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, + parents []common.SHA256Output) error { + + return nil +} + func (d *MockDB) RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ([]byte, error) { return d.TreeTable[id], nil } @@ -111,3 +123,5 @@ func (d *MockDB) RemoveAllUpdatedDomains(ctx context.Context) error { d.UpdatesTable = make(map[common.SHA256Output]struct{}) return nil } + +func (d *MockDB) UpdatedDomains() (chan []common.SHA256Output, chan error) { return nil, nil } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 55493943..56311652 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -5,7 +5,7 @@ import ( "fmt" "time" - "github.com/google/certificate-transparency-go/x509" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" @@ -19,8 +19,15 @@ type uniqueSet map[common.SHA256Output]struct{} type uniqueStringSet map[string]struct{} // UpdateDomainEntriesTableUsingCerts: Update the domain entries using the domain certificates -func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts(ctx context.Context, - certs []*x509.Certificate, certChains [][]*x509.Certificate) ([]*db.KeyValuePair, int, error) { +func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( + ctx context.Context, + certs []*ctx509.Certificate, + certChains [][]*ctx509.Certificate, +) ( + []*db.KeyValuePair, + int, + error, +) { if len(certs) == 0 { return nil, 0, nil @@ -28,20 +35,20 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts(ctx context.Con start := time.Now() // get the unique list of affected domains - affectedDomainsMap, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap( + affectedDomainsSet, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap( certs, certChains) end := time.Now() fmt.Println("(memory) time to process certs: ", end.Sub(start)) // if no domain to update - if len(affectedDomainsMap) == 0 { + if len(affectedDomainsSet) == 0 { return nil, 0, nil } start = time.Now() // retrieve (possibly)affected domain entries from db // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, err := mapUpdater.retrieveAffectedDomainFromDB(ctx, affectedDomainsMap) + domainEntriesMap, err := mapUpdater.retrieveAffectedDomainFromDB(ctx, affectedDomainsSet) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | %w", err) } @@ -94,16 +101,16 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts(ctx context.Con // Second return value: "domain name" -> certs. So later, one can look through the map to decide which certs to // // added to which domain. -func GetAffectedDomainAndCertMap(certs []*x509.Certificate, certChains [][]*x509.Certificate) (uniqueSet, - map[string][]*x509.Certificate, map[string][][]*x509.Certificate) { +func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) (uniqueSet, + map[string][]*ctx509.Certificate, map[string][][]*ctx509.Certificate) { // Set with the SHAs of the updated domains. affectedDomainsMap := make(uniqueSet) // Map "domain name" -> cert list (certs to be added to this domain). - domainCertMap := make(map[string][]*x509.Certificate) + domainCertMap := make(map[string][]*ctx509.Certificate) // Analogous to the map above except that we map "domain name" -> cert chains. - domainCertChainMap := make(map[string][][]*x509.Certificate) + domainCertChainMap := make(map[string][][]*ctx509.Certificate) // extract the affected domain of every certificates for i, cert := range certs { @@ -111,7 +118,7 @@ func GetAffectedDomainAndCertMap(certs []*x509.Certificate, certChains [][]*x509 certChain := certChains[i] // get unique list of domain names - domains := extractCertDomains(cert) + domains := ExtractCertDomains(cert) if len(domains) == 0 { continue } @@ -132,17 +139,46 @@ func GetAffectedDomainAndCertMap(certs []*x509.Certificate, certChains [][]*x509 domainCertMap[domainName] = append(domainCertMap[domainName], cert) domainCertChainMap[domainName] = append(domainCertChainMap[domainName], certChain) } else { - domainCertMap[domainName] = []*x509.Certificate{cert} - domainCertChainMap[domainName] = [][]*x509.Certificate{certChain} + domainCertMap[domainName] = []*ctx509.Certificate{cert} + domainCertChainMap[domainName] = [][]*ctx509.Certificate{certChain} } } } return affectedDomainsMap, domainCertMap, domainCertChainMap } +// UnfoldCerts takes a slice of certificates and chains with the same length, +// and returns all certificates once, without duplicates, and a pointer to the parent in the +// trust chain, or nil if the certificate is root. +func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( + certificates, parents []*ctx509.Certificate) { + + for len(certs) > 0 { + var pendingCerts []*ctx509.Certificate + var pendingChains [][]*ctx509.Certificate + for i, c := range certs { + certificates = append(certificates, c) + var parent *ctx509.Certificate + if len(chains[i]) > 0 { + // The certificate has a trust chain (it is not root): add the first certificate + // from the chain as the parent. + parent = chains[i][0] + // Add this parent to the back of the certs, plus the corresponding chain entry, + // so that it's processed as a certificate. + pendingCerts = append(pendingCerts, parent) + pendingChains = append(pendingChains, chains[i][1:]) + } + parents = append(parents, parent) + } + certs = pendingCerts + chains = pendingChains + } + return +} + // update domain entries func UpdateDomainEntries(domainEntries map[common.SHA256Output]*mcommon.DomainEntry, - certDomainMap map[string][]*x509.Certificate, certChainDomainMap map[string][][]*x509.Certificate) (uniqueSet, error) { + certDomainMap map[string][]*ctx509.Certificate, certChainDomainMap map[string][][]*ctx509.Certificate) (uniqueSet, error) { updatedDomainHash := make(uniqueSet) // read from previous map @@ -179,7 +215,7 @@ func UpdateDomainEntries(domainEntries map[common.SHA256Output]*mcommon.DomainEn // updateDomainEntry: insert certificate into correct CAEntry // return: if this domain entry is updated -func updateDomainEntry(domainEntry *mcommon.DomainEntry, cert *x509.Certificate, certChain []*x509.Certificate) bool { +func updateDomainEntry(domainEntry *mcommon.DomainEntry, cert *ctx509.Certificate, certChain []*ctx509.Certificate) bool { return domainEntry.AddCert(cert, certChain) } diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 6c6ec5ae..e7c694eb 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -2,29 +2,30 @@ package updater import ( "bytes" + "fmt" "io/ioutil" "testing" - projectCommon "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/domain" - - "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/domain" + mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" ) // TestUpdateDomainEntriesUsingCerts: test UpdateDomainEntriesUsingCerts // This test tests the individual functions of the UpdateDomainEntriesUsingCerts() func TestUpdateDomainEntriesUsingCerts(t *testing.T) { - certs := []*x509.Certificate{} + certs := []*ctx509.Certificate{} // load test certs files, err := ioutil.ReadDir("./testdata/certs/") require.NoError(t, err, "ioutil.ReadDir") - certChains := make([][]*x509.Certificate, len(files)) + certChains := make([][]*ctx509.Certificate, len(files)) for _, file := range files { - cert, err := projectCommon.CTX509CertFromFile("./testdata/certs/" + file.Name()) + cert, err := common.CTX509CertFromFile("./testdata/certs/" + file.Name()) require.NoError(t, err, "projectCommon.CTX509CertFromFile") certs = append(certs, cert) } @@ -36,7 +37,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // test if all the certs are correctly added to the affectedDomainsMap and domainCertMap for _, cert := range certs { // get common name and SAN of the certificate - domainNames := extractCertDomains(cert) + domainNames := ExtractCertDomains(cert) // get the valid domain name from domainNames list affectedDomains := domain.ExtractAffectedDomains(domainNames) @@ -47,8 +48,8 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // check the affected domain is correctly added to the affectedDomains for _, affectedDomain := range affectedDomains { - var affectedNameHash projectCommon.SHA256Output - copy(affectedNameHash[:], projectCommon.SHA256Hash([]byte(affectedDomain))) + var affectedNameHash common.SHA256Output + copy(affectedNameHash[:], common.SHA256Hash([]byte(affectedDomain))) _, ok := affectedDomainsMap[affectedNameHash] assert.True(t, ok, "domain not found in affectedDomainsMap") @@ -73,7 +74,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { } // empty domainEntriesMap - domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) + domainEntriesMap := make(map[common.SHA256Output]*mapCommon.DomainEntry) updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) require.NoError(t, err, "updateDomainEntries") @@ -81,7 +82,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // check if domainEntriesMap is correctly updated for _, cert := range certs { - domainNames := extractCertDomains(cert) + domainNames := ExtractCertDomains(cert) caName := cert.Issuer.String() // check if this cert has valid name @@ -92,8 +93,8 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // check domainEntriesMap for _, domainName := range affectedDomains { - var domainHash projectCommon.SHA256Output - copy(domainHash[:], projectCommon.SHA256Hash([]byte(domainName))) + var domainHash common.SHA256Output + copy(domainHash[:], common.SHA256Hash([]byte(domainName))) domainEntry, ok := domainEntriesMap[domainHash] assert.True(t, ok, "domainEntriesMap error") @@ -129,20 +130,20 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // TestUpdateSameCertTwice: update the same certs twice, number of updates should be zero func TestUpdateSameCertTwice(t *testing.T) { - certs := []*x509.Certificate{} + certs := []*ctx509.Certificate{} // check if files, err := ioutil.ReadDir("./testdata/certs/") require.NoError(t, err, "ioutil.ReadDir") - certChains := make([][]*x509.Certificate, len(files)) + certChains := make([][]*ctx509.Certificate, len(files)) for _, file := range files { - cert, err := projectCommon.CTX509CertFromFile("./testdata/certs/" + file.Name()) + cert, err := common.CTX509CertFromFile("./testdata/certs/" + file.Name()) require.NoError(t, err, "projectCommon.CTX509CertFromFile") certs = append(certs, cert) } _, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap(certs, certChains) - domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) + domainEntriesMap := make(map[common.SHA256Output]*mapCommon.DomainEntry) // update domain entry with certs updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) @@ -158,3 +159,38 @@ func TestUpdateSameCertTwice(t *testing.T) { // Now the length of updatedDomains should be zero. assert.Equal(t, 0, len(updatedDomains), "updated domain should be 0") } + +func TestUnfoldCerts(t *testing.T) { + // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. + a := &ctx509.Certificate{} + b := &ctx509.Certificate{} + c := &ctx509.Certificate{} + d := &ctx509.Certificate{} + certs := []*ctx509.Certificate{ + a, + b, + } + chains := [][]*ctx509.Certificate{ + nil, + {c, d}, + } + allCerts, parents := UnfoldCerts(certs, chains) + + fmt.Printf("[%p %p %p %p]\n", a, b, c, d) + fmt.Printf("%v\n", allCerts) + fmt.Printf("%v\n", parents) + + assert.Len(t, allCerts, 4) + assert.Len(t, parents, 4) + + assert.Equal(t, a, allCerts[0]) + assert.Equal(t, b, allCerts[1]) + assert.Equal(t, c, allCerts[2]) + assert.Equal(t, d, allCerts[3]) + + nilParent := (*ctx509.Certificate)(nil) + assert.Equal(t, nilParent, parents[0], "bad parent at 0") + assert.Equal(t, c, parents[1], "bad parent at 1") + assert.Equal(t, d, parents[2], "bad parent at 2") + assert.Equal(t, nilParent, parents[3], "bad parent at 3") +} diff --git a/pkg/mapserver/updater/dbutil.go b/pkg/mapserver/updater/dbutil.go index 1104952b..5628ad55 100644 --- a/pkg/mapserver/updater/dbutil.go +++ b/pkg/mapserver/updater/dbutil.go @@ -19,12 +19,12 @@ type dbResult struct { // retrieveAffectedDomainFromDB: get affected domain entries from db func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDB(ctx context.Context, - affectedDomainsMap uniqueSet) (map[common.SHA256Output]*mapCommon.DomainEntry, error) { + affectedDomainsSet uniqueSet) (map[common.SHA256Output]*mapCommon.DomainEntry, error) { // XXX(juagargi) review why passing a set (we need to convert it to a slice) // list of domain hashes to fetch the domain entries from db - affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsMap)) - for k := range affectedDomainsMap { + affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsSet)) + for k := range affectedDomainsSet { affectedDomainHashes = append(affectedDomainHashes, k) } diff --git a/pkg/mapserver/updater/tools.go b/pkg/mapserver/updater/tools.go index b0e2a98b..3f0444ee 100644 --- a/pkg/mapserver/updater/tools.go +++ b/pkg/mapserver/updater/tools.go @@ -8,8 +8,8 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/common" ) -// extractCertDomains: get domain from cert: {Common Name, SANs} -func extractCertDomains(cert *x509.Certificate) []string { +// ExtractCertDomains: get domain from cert: {Common Name, SANs} +func ExtractCertDomains(cert *x509.Certificate) []string { domains := make(uniqueStringSet) if len(cert.Subject.CommonName) != 0 { domains[cert.Subject.CommonName] = struct{}{} diff --git a/pkg/mapserver/updater/tools_test.go b/pkg/mapserver/updater/tools_test.go index f380a70b..1abd49b8 100644 --- a/pkg/mapserver/updater/tools_test.go +++ b/pkg/mapserver/updater/tools_test.go @@ -16,7 +16,7 @@ func TestExtractCertDomains(t *testing.T) { cert, err := common.CTX509CertFromFile("./testdata/certs/adiq.com.br144.cer") require.NoError(t, err, "projectCommon.CTX509CertFromFile") - result := extractCertDomains(cert) + result := ExtractCertDomains(cert) assert.Equal(t, 2, len(result)) assert.Contains(t, result, "*.adiq.com.br") assert.Contains(t, result, "adiq.com.br") diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 17852fb9..eafb96a1 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -87,7 +87,13 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ certChains[i] = append(certChains[i], certChainItem) } } - return mapUpdater.updateCerts(ctx, certs, certChains) + return mapUpdater.updateCerts2(ctx, certs, certChains) +} + +func (m *MapUpdater) updateCerts2(ctx context.Context, certs []*ctx509.Certificate, + chains [][]*ctx509.Certificate) error { + + return UpdateCerts(ctx, m.dbConn, certs, chains) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -227,3 +233,30 @@ func (mapUpdater *MapUpdater) GetRoot() []byte { func (mapUpdater *MapUpdater) Close() error { return mapUpdater.smt.Close() } + +func UpdateCerts(ctx context.Context, conn db.Conn, certs []*ctx509.Certificate, + chains [][]*ctx509.Certificate) error { + + certs, parents := UnfoldCerts(certs, chains) + + ids := make([]common.SHA256Output, len(certs)) + payloads := make([][]byte, len(certs)) + parentIds := make([]common.SHA256Output, len(certs)) + for i, c := range certs { + ids[i] = common.SHA256Hash32Bytes(c.Raw) + payloads[i] = c.Raw + if parents[i] != nil { + parentIds[i] = common.SHA256Hash32Bytes(parents[i].Raw) + } + } + + // TODO(juagargi) check first in DB which cert ids are already present and skip sending them + + if err := conn.InsertCerts(context.Background(), ids, payloads, parentIds); err != nil { + panic(err) + } + + // Each cert that has been updated needs an entry in `domains` and `dirty` + // TODO + return nil +} diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index e119ba02..471f704c 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -51,7 +51,7 @@ func TestUpdateCerts(t *testing.T) { // check whether certs are correctly added to the db for _, cert := range certs { - domains := domain.ExtractAffectedDomains(extractCertDomains(cert)) + domains := domain.ExtractAffectedDomains(ExtractCertDomains(cert)) for _, domain := range domains { domainHash := projectCommon.SHA256Hash32Bytes([]byte(domain)) From db4c7b50f4f6929a6b3e9f9085cae59f85855adc Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 2 Feb 2023 10:38:49 +0100 Subject: [PATCH 016/187] add profiling to ingest --- cmd/ingest/main.go | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 857c1ce6..98d8fefe 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -6,6 +6,7 @@ import ( "io/ioutil" "os" "path/filepath" + "runtime/pprof" "github.com/netsec-ethz/fpki/pkg/db" ) @@ -16,12 +17,36 @@ const ( ) func main() { + os.Exit(mainFunction()) +} +func mainFunction() int { flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n%s directory\n", os.Args[0]) + flag.PrintDefaults() } + cpuProfile := flag.String("cpuprofile", "", "write a CPU profile to file") + memProfile := flag.String("memprofile", "", "write a memory profile to file") flag.Parse() if flag.NArg() != 1 { flag.Usage() + return 1 + } + + // Profiling: + if *cpuProfile != "" { + f, err := os.Create(*cpuProfile) + exitIfError(err) + err = pprof.StartCPUProfile(f) + exitIfError(err) + defer pprof.StopCPUProfile() + } + if *memProfile != "" { + defer func() { + f, err := os.Create(*memProfile) + exitIfError(err) + err = pprof.WriteHeapProfile(f) + exitIfError(err) + }() } conn, err := db.Connect(nil) @@ -42,6 +67,7 @@ func main() { // Close DB and check errors. err = conn.Close() exitIfError(err) + return 0 } func listOurFiles(dir string) (gzFiles, csvFiles []string) { From 790edc4d00dedbc0a9347d0c0dd79b847ab566d6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 2 Feb 2023 10:58:10 +0100 Subject: [PATCH 017/187] catch sigint --- cmd/ingest/main.go | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 98d8fefe..202723a6 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -5,8 +5,10 @@ import ( "fmt" "io/ioutil" "os" + "os/signal" "path/filepath" "runtime/pprof" + "syscall" "github.com/netsec-ethz/fpki/pkg/db" ) @@ -33,21 +35,32 @@ func mainFunction() int { } // Profiling: + stopProfiles := func() { + if *cpuProfile != "" { + pprof.StopCPUProfile() + } + if *memProfile != "" { + f, err := os.Create(*memProfile) + exitIfError(err) + err = pprof.WriteHeapProfile(f) + exitIfError(err) + } + } if *cpuProfile != "" { f, err := os.Create(*cpuProfile) exitIfError(err) err = pprof.StartCPUProfile(f) exitIfError(err) - defer pprof.StopCPUProfile() - } - if *memProfile != "" { - defer func() { - f, err := os.Create(*memProfile) - exitIfError(err) - err = pprof.WriteHeapProfile(f) - exitIfError(err) - }() } + defer stopProfiles() + + // Signals catching: + signals := make(chan os.Signal) + signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) + go func() { + <-signals + stopProfiles() + }() conn, err := db.Connect(nil) exitIfError(err) From aba32b299d2d38b75bea617d937a9fa08a0d2914 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 2 Feb 2023 17:08:19 +0100 Subject: [PATCH 018/187] Specify num readers and parsers --- cmd/ingest/main.go | 6 +++ cmd/ingest/processor.go | 83 ++++++++++++++++++++++++++++------------- 2 files changed, 64 insertions(+), 25 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 202723a6..6a28bff4 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -13,6 +13,11 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" ) +const ( + NumFileReaders = 8 + NumParsers = 64 +) + const ( CertificateColumn = 3 CertChainColumn = 4 @@ -60,6 +65,7 @@ func mainFunction() int { go func() { <-signals stopProfiles() + os.Exit(1) }() conn, err := db.Connect(nil) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 70a87076..3a53a7d5 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -52,28 +52,33 @@ func (p *Processor) start() { // Process files and parse the CSV contents: go func() { wg := sync.WaitGroup{} - for f := range p.incomingFileCh { - f := f - wg.Add(1) + wg.Add(NumFileReaders) + for r := 0; r < NumFileReaders; r++ { go func() { defer wg.Done() - r, err := f.Open() - if err != nil { - p.errorCh <- err - return - } - if err := p.ingestWithCSV(r); err != nil { - p.errorCh <- err - return - } - if err := f.Close(); err != nil { - p.errorCh <- err - return + for f := range p.incomingFileCh { + func() { + r, err := f.Open() + if err != nil { + p.errorCh <- err + return + } + if err := p.ingestWithCSV(r); err != nil { + p.errorCh <- err + return + } + if err := f.Close(); err != nil { + p.errorCh <- err + return + } + fmt.Printf(".") + }() } }() } wg.Wait() - fmt.Println("deleteme done with incoming files, closing parsed data channel") + fmt.Println() + fmt.Println("Done with incoming files, closing parsed data channel.") // Because we are done writing parsed content, close that channel. close(p.fromParserCh) }() @@ -85,7 +90,7 @@ func (p *Processor) start() { batch.AddCert(data) if batch.Full() { p.batchProcessor.Process(batch) - fmt.Print(".") + // fmt.Print(".") batch = NewBatch() } } @@ -144,15 +149,8 @@ func (p *Processor) AddCsvFiles(fileNames []string) { func (p *Processor) ingestWithCSV(fileReader io.Reader) error { reader := csv.NewReader(fileReader) reader.FieldsPerRecord = -1 // don't check number of fields - reader.ReuseRecord = true - var err error - var fields []string - for lineNo := 1; err == nil; lineNo++ { - fields, err = reader.Read() - if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) - continue - } + parseFunction := func(fields []string, lineNo int) error { rawBytes, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) if err != nil { return err @@ -182,7 +180,42 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { Cert: cert, CertChain: chain, } + return nil + } + + type lineAndFields struct { + lineNo int + fields []string + } + recordsChan := make(chan *lineAndFields) + + wg := sync.WaitGroup{} + wg.Add(NumParsers) + for r := 0; r < NumParsers; r++ { + go func() { + defer wg.Done() + for x := range recordsChan { + if err := parseFunction(x.fields, x.lineNo); err != nil { + panic(err) + } + } + }() + } + records, err := reader.ReadAll() + if err != nil { + return err + } + for lineNo, fields := range records { + if len(fields) == 0 { // there exist empty lines (e.g. at the end of the gz files) + continue + } + recordsChan <- &lineAndFields{ + lineNo: lineNo, + fields: fields, + } } + close(recordsChan) + wg.Wait() return nil } From 1bb40e66e313dae533a3d0b34bb1967e77f2a147 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 2 Feb 2023 17:23:50 +0100 Subject: [PATCH 019/187] wip less verbose --- cmd/ingest/batch.go | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 93f73c9c..fa9db8d6 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "sync" ctx509 "github.com/google/certificate-transparency-go/x509" @@ -102,12 +101,12 @@ func (p *BatchProcessor) start() { } func (p *BatchProcessor) Wait() { - fmt.Println("deleteme waiting 1") + // fmt.Println("deleteme waiting 1") p.incomingWg.Wait() close(p.incomingCh) - fmt.Println("deleteme waiting 2") + // fmt.Println("deleteme waiting 2") <-p.doneCh - fmt.Println("deleteme waiting 3") + // fmt.Println("deleteme waiting 3") } // Process processes a Batch into the DB. @@ -122,11 +121,14 @@ func (p *BatchProcessor) Process(b *Batch) { func (p *BatchProcessor) wrapBatch(batch *Batch) { defer p.incomingWg.Done() // one less batch to process p.processBatch(batch) - fmt.Println("batch processed") + // fmt.Println("batch processed") } func (p *BatchProcessor) processBatch(batch *Batch) { // Store certificates in DB: + if len(batch.Certs) == 0 { + return + } err := updater.UpdateCerts(context.Background(), p.conn, batch.Certs, batch.Chains) if err != nil { panic(err) From aadf2b00b230c3e833457a175357f0fc5db049e5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 7 Feb 2023 17:02:06 +0100 Subject: [PATCH 020/187] update mysql driver --- go.mod | 2 +- go.sum | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index 8fd8af71..8c99a7bb 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/netsec-ethz/fpki go 1.17 require ( - github.com/go-sql-driver/mysql v1.6.0 + github.com/go-sql-driver/mysql v1.7.0 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/google/certificate-transparency-go v1.1.3 github.com/google/trillian v1.4.1 diff --git a/go.sum b/go.sum index 45d84c20..ebbaa41c 100644 --- a/go.sum +++ b/go.sum @@ -289,8 +289,9 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= From b6fb9f486408e1a2d5e0d596afb4b6ac5ce16dfd Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 7 Feb 2023 17:07:38 +0100 Subject: [PATCH 021/187] Use MyISAM --- tools/create_schema.sh | 32 ++++++++------------------------ 1 file changed, 8 insertions(+), 24 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 257c7587..12ac5ce6 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -24,34 +24,18 @@ EOF echo "$CMD" | mysql -u root -# CMD=$(cat < Date: Tue, 7 Feb 2023 17:35:27 +0100 Subject: [PATCH 022/187] Limit DB connections. Too many connections to DB while buffering writes make the go routines stall and create even more connections to DB, which allocates vast amounts of data. --- pkg/db/init.go | 46 +++++++++++++---------- pkg/db/mysql.go | 51 +++++++------------------- pkg/mapserver/updater/certs_updater.go | 8 ++++ pkg/mapserver/updater/updater.go | 33 +++++++---------- 4 files changed, 61 insertions(+), 77 deletions(-) diff --git a/pkg/db/init.go b/pkg/db/init.go index d5bebec8..7512a2c1 100644 --- a/pkg/db/init.go +++ b/pkg/db/init.go @@ -5,7 +5,6 @@ import ( "fmt" "net/url" "os" - "time" _ "github.com/go-sql-driver/mysql" ) @@ -22,7 +21,12 @@ type Configuration struct { // set to influence the connection. The defaults are set to yield "root@tcp(localhost)/fpki" as // the DSN. func ConfigFromEnvironment() *Configuration { - env := map[string]string{"MYSQL_USER": "root", "MYSQL_PASSWORD": "", "MYSQL_HOST": "localhost", "MYSQL_PORT": ""} + env := map[string]string{ + "MYSQL_USER": "root", + "MYSQL_PASSWORD": "", + "MYSQL_HOST": "127.0.0.1", + "MYSQL_PORT": "", + } for k := range env { v, exists := os.LookupEnv(k) if exists { @@ -54,29 +58,18 @@ func Connect(config *Configuration) (Conn, error) { if config == nil { config = ConfigFromEnvironment() } - dsn, err := url.Parse(config.Dsn) - if err != nil { - return nil, fmt.Errorf("bad connection string: %w", err) - } - uri := dsn.Query() - for k, v := range config.Values { - uri.Add(k, v) - } - dsn.RawQuery = uri.Encode() - db, err := sql.Open("mysql", dsn.String()) + + db, err := connect(config) if err != nil { return nil, fmt.Errorf("cannot open DB: %w", err) } - // value set higher could trigger issues in the system - maxConnections := 2048 + // Set a very small number of concurrent connections per sql.DB . + // This avoids routines creating connections to the DB and holding vast amounts of + // data (which impact the heap), and forcing to slow down the pipelines until the existing + // DB connections complete their work. + maxConnections := 8 db.SetMaxOpenConns(maxConnections) - db.SetMaxIdleConns(maxConnections) - db.SetConnMaxLifetime(-1) // don't close them - db.SetConnMaxIdleTime(1 * time.Minute) // don't close them - if _, err = db.Exec("SET GLOBAL max_connections = ?", maxConnections); err != nil { - return nil, err - } // check schema if config.CheckSchema { @@ -87,6 +80,19 @@ func Connect(config *Configuration) (Conn, error) { return NewMysqlDB(db) } +func connect(config *Configuration) (*sql.DB, error) { + dsn, err := url.Parse(config.Dsn) + if err != nil { + return nil, fmt.Errorf("bad connection string: %w", err) + } + uri := dsn.Query() + for k, v := range config.Values { + uri.Add(k, v) + } + dsn.RawQuery = uri.Encode() + return sql.Open("mysql", dsn.String()) +} + func checkSchema(c *sql.DB) error { _, err := c.Query("SELECT COUNT(*) FROM nodes") if err != nil { diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index d86ec20b..ec40f497 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -6,7 +6,6 @@ import ( "fmt" "strings" - "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" ) @@ -156,45 +155,21 @@ func (c *mysqlDB) EnableIndexing(table string) error { func (c *mysqlDB) InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, parents []common.SHA256Output) error { - for tryNumber := 0; tryNumber < 2; tryNumber++ { - // TODO(juagargi) set a prepared statement in constructor - str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) - insertCerts, err := c.db.Prepare(str) - if err != nil { - return err - } - - data := make([]interface{}, 3*len(ids)) - for i := range ids { - data[i*3] = ids[i][:] - data[i*3+1] = payloads[1] - data[i*3+2] = parents[i][:] - } + // TODO(juagargi) set a prepared statement in constructor + // str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) + str := "INSERT into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) + data := make([]interface{}, 3*len(ids)) + for i := range ids { + data[i*3] = ids[i][:] + data[i*3+1] = payloads[i] + data[i*3+2] = parents[i][:] + } + _, err := c.db.Exec(str, data...) + if err != nil { - res, err := insertCerts.ExecContext(ctx, data...) - if err != nil { - if myErr, ok := err.(*mysql.MySQLError); ok { - if myErr.Number == 1213 { - // TODO(juagargi) find out why so many deadlocks occur and fix the situation - fmt.Println("deleteme deadlock") - - // XXX(juagargi) retrying seems to be around 50% more expensive than if - // we had no deadlock. - // break - continue - } - } - fmt.Printf("type %T\n", err) - panic(err) - return err - } - n, err := res.RowsAffected() - if err != nil { - return err - } - fmt.Printf("inserted %d certificates\n", n) - break + return err } + return nil } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 56311652..6d769078 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -150,6 +150,8 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct // UnfoldCerts takes a slice of certificates and chains with the same length, // and returns all certificates once, without duplicates, and a pointer to the parent in the // trust chain, or nil if the certificate is root. +// The parents returned slice has the same elements as the certificates returned slice. +// When a certificate is root, it's corresponding parents entry is nil. func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( certificates, parents []*ctx509.Certificate) { @@ -176,6 +178,12 @@ func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( return } +func UnfoldCert(cert *ctx509.Certificate, chain []*ctx509.Certificate) ( + certificates, parents []*ctx509.Certificate) { + + return UnfoldCerts([]*ctx509.Certificate{cert}, [][]*ctx509.Certificate{chain}) +} + // update domain entries func UpdateDomainEntries(domainEntries map[common.SHA256Output]*mcommon.DomainEntry, certDomainMap map[string][]*ctx509.Certificate, certChainDomainMap map[string][][]*ctx509.Certificate) (uniqueSet, error) { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index eafb96a1..5f3d7f1f 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -69,31 +69,28 @@ func (u *MapUpdater) UpdateNextBatch(ctx context.Context) (int, error) { // UpdateCertsLocally: add certs (in the form of asn.1 encoded byte arrays) directly without querying log func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [][]byte, certChainList [][][]byte) error { - certs := []*ctx509.Certificate{} - certChains := [][]*ctx509.Certificate{} + names := make([][]string, 0, len(certList)) // Set of names per certificate + certs := make([]*ctx509.Certificate, 0, len(certList)) + certChains := make([][]*ctx509.Certificate, 0, len(certList)) for i, certRaw := range certList { cert, err := ctx509.ParseCertificate(certRaw) if err != nil { return err } certs = append(certs, cert) + names = append(names, ExtractCertDomains(cert)) - certChains = append(certChains, []*ctx509.Certificate{}) - for _, certChainItemRaw := range certChainList[i] { - certChainItem, err := ctx509.ParseCertificate(certChainItemRaw) + chain := make([]*ctx509.Certificate, len(certChainList[i])) + for i, certChainItemRaw := range certChainList[i] { + chain[i], err = ctx509.ParseCertificate(certChainItemRaw) if err != nil { return err } - certChains[i] = append(certChains[i], certChainItem) } + certChains = append(certChains, chain) } - return mapUpdater.updateCerts2(ctx, certs, certChains) -} - -func (m *MapUpdater) updateCerts2(ctx context.Context, certs []*ctx509.Certificate, - chains [][]*ctx509.Certificate) error { - - return UpdateCerts(ctx, m.dbConn, certs, chains) + certs, parents := UnfoldCerts(certs, certChains) + return UpdateCerts(ctx, mapUpdater.dbConn, names, certs, parents) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -234,10 +231,8 @@ func (mapUpdater *MapUpdater) Close() error { return mapUpdater.smt.Close() } -func UpdateCerts(ctx context.Context, conn db.Conn, certs []*ctx509.Certificate, - chains [][]*ctx509.Certificate) error { - - certs, parents := UnfoldCerts(certs, chains) +func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, + certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { ids := make([]common.SHA256Output, len(certs)) payloads := make([][]byte, len(certs)) @@ -253,10 +248,10 @@ func UpdateCerts(ctx context.Context, conn db.Conn, certs []*ctx509.Certificate, // TODO(juagargi) check first in DB which cert ids are already present and skip sending them if err := conn.InsertCerts(context.Background(), ids, payloads, parentIds); err != nil { - panic(err) + return err } // Each cert that has been updated needs an entry in `domains` and `dirty` - // TODO + // TODO(juagargi) return nil } From a1cc69b1b7d62023830893e9d5d3b84f0977f7a5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 8 Feb 2023 10:03:42 +0100 Subject: [PATCH 023/187] Pipeline overhaul. New pipeline has better error management. Pipeline prepares batches of certificate nodes (with parent). Some cleanup. Added more comments. --- cmd/ingest/batch.go | 186 ++++++++++++++++++++------------------ cmd/ingest/main.go | 13 ++- cmd/ingest/processor.go | 129 ++++++++++++++------------ cmd/ingest/smt_updater.go | 22 +++-- 4 files changed, 196 insertions(+), 154 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index fa9db8d6..e6e25cb2 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -2,136 +2,148 @@ package main import ( "context" + "fmt" "sync" + "time" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "go.uber.org/atomic" ) -const BatchSize = 1000 +type CertificateNode struct { + Names []string // collection of names per certificate + Cert *ctx509.Certificate + Parent *ctx509.Certificate +} -type Batch struct { - Certs []*ctx509.Certificate - Chains [][]*ctx509.Certificate +// CertBatch is an unwrapped collection of Certificate. +// All slices must have the same size. +type CertBatch struct { + Names [][]string // collection of names per certificate + Certs []*ctx509.Certificate + Parents []*ctx509.Certificate } -func NewBatch() *Batch { - return &Batch{ - Certs: make([]*ctx509.Certificate, 0, BatchSize), - Chains: make([][]*ctx509.Certificate, 0, BatchSize), +func NewCertificateBatch() *CertBatch { + return &CertBatch{ + Names: make([][]string, 0, BatchSize), + Certs: make([]*ctx509.Certificate, 0, BatchSize), + Parents: make([]*ctx509.Certificate, 0, BatchSize), } } -// AddCert pushed the cert data into the batch. -func (b *Batch) AddCert(d *CertData) { - b.Certs = append(b.Certs, d.Cert) - b.Chains = append(b.Chains, d.CertChain) +func (b *CertBatch) AddCertificate(c *CertificateNode) { + b.Names = append(b.Names, c.Names) + b.Certs = append(b.Certs, c.Cert) + b.Parents = append(b.Parents, c.Parent) } -func (b *Batch) Full() bool { +func (b *CertBatch) IsFull() bool { return len(b.Certs) == BatchSize } -type BatchProcessor struct { +// CertificateProcessor processes the insertion of certificate nodes into the DB. +// This is the most expensive stage, and as such, the processor prints the statistics about +// number of certificates and megabytes per second being inserted into the DB. +type CertificateProcessor struct { conn db.Conn - incomingCh chan *Batch - incomingWg sync.WaitGroup - doneCh chan struct{} + incomingCh chan *CertificateNode // From the previous processor + incomingBatch chan *CertBatch // Ready to be inserted + doneCh chan struct{} + // Statistics: + writtenCerts atomic.Int64 + writtenBytes atomic.Int64 } -func NewBatchProcessor(conn db.Conn) *BatchProcessor { - p := &BatchProcessor{ - conn: conn, - incomingCh: make(chan *Batch), - doneCh: make(chan struct{}), +func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode) *CertificateProcessor { + p := &CertificateProcessor{ + conn: conn, + incomingCh: incoming, + incomingBatch: make(chan *CertBatch), + doneCh: make(chan struct{}), } p.start() return p } -func (p *BatchProcessor) start() { - db := p.conn.DB() - _ = db - ini := func() { - // _, err := db.Exec("LOCK TABLES certs WRITE;") - // if err != nil { - // panic(err) - // } - // if _, err := db.Exec("SET autocommit=0"); err != nil { - // panic(err) - // } - // if _, err := db.Exec("ALTER TABLE certs DISABLE KEYS"); err != nil { - // panic(err) - // } - if _, err := db.Exec("ALTER TABLE certs DROP INDEX id"); err != nil { - panic(err) - } - } - end := func() { - - fmt.Println("deleteme before enabling keys") - if _, err := db.Exec("ALTER TABLE certs ADD UNIQUE INDEX id (id ASC)"); err != nil { - panic(err) +// start starts the pipeline. +// Two stages in this processor: from certificate node to batch, and from batch to DB. +func (p *CertificateProcessor) start() { + go func() { + batch := NewCertificateBatch() + for c := range p.incomingCh { + batch.AddCertificate(c) + if batch.IsFull() { + p.incomingBatch <- batch + batch = NewCertificateBatch() + } } - // if _, err := db.Exec("ALTER TABLE certs ENABLE KEYS"); err != nil { - // panic(err) - // } - fmt.Println("deleteme keys enabled.") - - fmt.Println("deleteme about to commit all changes") - // if _, err := db.Exec("COMMIT"); err != nil { - // panic(err) - // } - fmt.Println("deleteme commit succeeded.") - // if _, err := db.Exec("UNLOCK TABLES"); err != nil { - // panic(err) - // } - } + // Because the stage is finished, close the output channel: + close(p.incomingBatch) + }() - ini() go func() { - for batch := range p.incomingCh { - go p.wrapBatch(batch) + wg := sync.WaitGroup{} + wg.Add(NumDBWriters) + for w := 0; w < NumDBWriters; w++ { + go func() { + defer wg.Done() + for batch := range p.incomingBatch { + p.processBatch(batch) + } + }() } - end() + wg.Wait() + // This stage is finished, indicate so. p.doneCh <- struct{}{} }() -} -func (p *BatchProcessor) Wait() { - // fmt.Println("deleteme waiting 1") - p.incomingWg.Wait() - close(p.incomingCh) - // fmt.Println("deleteme waiting 2") - <-p.doneCh - // fmt.Println("deleteme waiting 3") -} - -// Process processes a Batch into the DB. -func (p *BatchProcessor) Process(b *Batch) { - p.incomingWg.Add(1) // one more batch to process + // Statistics. + ticker := time.NewTicker(2 * time.Second) + startTime := time.Now() go func() { - p.incomingCh <- b + for { + select { + case <-ticker.C: + case <-p.doneCh: + return + } + writtenCerts := p.writtenCerts.Load() + writtenBytes := p.writtenBytes.Load() + secondsSinceStart := float64(time.Since(startTime).Seconds()) + fmt.Printf("%.0f Certs / second, %.1f Mb/s\n", + float64(writtenCerts)/secondsSinceStart, + float64(writtenBytes)/1024./1024./secondsSinceStart, + ) + } }() } -// wrapBatch protects the processing of a batch. -func (p *BatchProcessor) wrapBatch(batch *Batch) { - defer p.incomingWg.Done() // one less batch to process - p.processBatch(batch) - // fmt.Println("batch processed") +func (p *CertificateProcessor) Wait() { + <-p.doneCh } -func (p *BatchProcessor) processBatch(batch *Batch) { +func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: - if len(batch.Certs) == 0 { - return - } - err := updater.UpdateCerts(context.Background(), p.conn, batch.Certs, batch.Chains) + err := updater.UpdateCerts(context.Background(), p.conn, batch.Names, batch.Certs, batch.Parents) if err != nil { panic(err) } + p.writtenCerts.Add(int64(len(batch.Certs))) + bytesInBatch := 0 + for i := range batch.Certs { + bytesInBatch += len(batch.Certs[i].Raw) + bytesInBatch += common.SHA256Size + if batch.Parents[i] != nil { + bytesInBatch += len(batch.Parents[i].Raw) + bytesInBatch += common.SHA256Size + } + } + p.writtenBytes.Add(int64(bytesInBatch)) + // TODO(juagargi) push entries to the dirty table } diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 6a28bff4..6fba0db3 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -16,6 +16,9 @@ import ( const ( NumFileReaders = 8 NumParsers = 64 + NumDBWriters = 32 + + BatchSize = 1000 // # of certificates inserted at once. ) const ( @@ -23,6 +26,10 @@ const ( CertChainColumn = 4 ) +// Times gathered at jupiter, 64 gz files, no CSV +// InnoDB: 8m 17s +// MyISAM: 1m 33s + func main() { os.Exit(mainFunction()) } @@ -68,9 +75,13 @@ func mainFunction() int { os.Exit(1) }() - conn, err := db.Connect(nil) + // Connect to DB via local socket, should be faster. + config := db.ConfigFromEnvironment() + config.Dsn = "root@unix(/var/run/mysqld/mysqld.sock)/fpki" + conn, err := db.Connect(config) exitIfError(err) + // All GZ and CSV files found under the directory of the argument. gzFiles, csvFiles := listOurFiles(flag.Arg(0)) fmt.Printf("# gzFiles: %d, # csvFiles: %d\n", len(gzFiles), len(csvFiles)) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 3a53a7d5..a1c947e4 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -14,32 +14,34 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) +// Processor is the pipeline that takes file names and process them into certificates +// inside the DB and SMT. It is composed of several different stages, +// described in the `start` method. type Processor struct { - BatchSize int - Conn db.Conn - - incomingFileCh chan File // indicates new file(s) with certificates to be ingested - fromParserCh chan *CertData // parser data to be sent to SMT and DB - batchProcessor *BatchProcessor - - errorCh chan error // errors accumulate here - doneCh chan error // the aggregation of all errors. Signals Processor is done + Conn db.Conn + incomingFileCh chan File // New files with certificates to be ingested + certWithChainChan chan *CertWithChainData // After parsing files + nodeChan chan *CertificateNode // After finding parents, to be sent to DB and SMT + batchProcessor *CertificateProcessor // Processes certificate nodes (with parent pointer) + + errorCh chan error // Errors accumulate here + doneCh chan error // Signals Processor is done } -type CertData struct { +type CertWithChainData struct { DomainNames []string Cert *ctx509.Certificate CertChain []*ctx509.Certificate } func NewProcessor(conn db.Conn) *Processor { + nodeChan := make(chan *CertificateNode) p := &Processor{ - BatchSize: 1000, - Conn: conn, - - incomingFileCh: make(chan File), - fromParserCh: make(chan *CertData), - batchProcessor: NewBatchProcessor(conn), + Conn: conn, + incomingFileCh: make(chan File), + certWithChainChan: make(chan *CertWithChainData), + nodeChan: nodeChan, + batchProcessor: NewBatchProcessor(conn, nodeChan), errorCh: make(chan error), doneCh: make(chan error), @@ -48,68 +50,54 @@ func NewProcessor(conn db.Conn) *Processor { return p } +// start starts the pipeline. The pipeline consists on the following transformations: +// - File to rows. +// - Row to certificate with chain. +// - Certificate with chain to certificate with immediate parent. +// This pipeline ends here, and it's picked up by other processor. +// Each stage (transformation) is represented by a goroutine spawned in this start function. +// Each stage reads from the previous channel and outputs to the next channel. +// Each stage closes the channel it outputs to. func (p *Processor) start() { // Process files and parse the CSV contents: go func() { + // Spawn a fixed number of file readers. wg := sync.WaitGroup{} wg.Add(NumFileReaders) for r := 0; r < NumFileReaders; r++ { go func() { defer wg.Done() for f := range p.incomingFileCh { - func() { - r, err := f.Open() - if err != nil { - p.errorCh <- err - return - } - if err := p.ingestWithCSV(r); err != nil { - p.errorCh <- err - return - } - if err := f.Close(); err != nil { - p.errorCh <- err - return - } - fmt.Printf(".") - }() + p.processFile(f) } }() } wg.Wait() fmt.Println() fmt.Println("Done with incoming files, closing parsed data channel.") - // Because we are done writing parsed content, close that channel. - close(p.fromParserCh) + // Because we are done writing parsed content, close this stage's output channel: + close(p.certWithChainChan) }() - // Process the parsed content into the DB: + // Process the parsed content into the DB, and from DB into SMT: go func() { - batch := NewBatch() - for data := range p.fromParserCh { - batch.AddCert(data) - if batch.Full() { - p.batchProcessor.Process(batch) - // fmt.Print(".") - batch = NewBatch() + for data := range p.certWithChainChan { + certs, parents := updater.UnfoldCert(data.Cert, data.CertChain) + for i := range certs { + p.nodeChan <- &CertificateNode{ + Names: data.DomainNames, + Cert: certs[i], + Parent: parents[i], + } } } - // Process last batch, which may have zero size. - p.batchProcessor.Process(batch) - fmt.Println() - p.batchProcessor.Wait() - - fmt.Printf("\ndeleteme done ingesting the certificates. SMT still to go\n\n\n\n") + // This stage has finished, close the output channel: + close(p.nodeChan) - if 4%5 != 0 { // deleteme - close(p.errorCh) - return - } // Now start processing the changed domains into the SMT: smtProcessor := NewSMTUpdater(p.Conn, nil, 32) smtProcessor.Start() if err := smtProcessor.Wait(); err != nil { - fmt.Printf("deleteme error found in SMT processing: %s\n", err) p.errorCh <- err } @@ -126,26 +114,51 @@ func (p *Processor) start() { func (p *Processor) Wait() error { // Close the parsing and incoming channels: - fmt.Println("deleteme closing incomingFileCh") close(p.incomingFileCh) // Wait until all data has been processed. - fmt.Println("deleteme waiting for done signal") return <-p.doneCh } +// AddGzFiles adds a CSV .gz file to the initial stage. +// It blocks until it is accepted. func (p *Processor) AddGzFiles(fileNames []string) { for _, filename := range fileNames { p.incomingFileCh <- (&GzFile{}).WithFile(filename) } } +// AddGzFiles adds a .csv file to the initial stage. +// It blocks until it is accepted. func (p *Processor) AddCsvFiles(fileNames []string) { for _, filename := range fileNames { p.incomingFileCh <- (&CsvFile{}).WithFile(filename) } } +// processFile processes any File. +// This stage is responsible of parsing the data into X509 certificates and chains. +func (p *Processor) processFile(f File) { + r, err := f.Open() + if err != nil { + p.errorCh <- err + return + } + // ingestWithCSV will send data to the cert with chain channel + if err := p.ingestWithCSV(r); err != nil { + p.errorCh <- err + return + } + if err := f.Close(); err != nil { + p.errorCh <- err + return + } +} + +// ingestWithCSV spawns as many goroutines as specified by the constant `NumParsers`, +// that divide the CSV rows and parse them. +// For efficiency reasons, the whole file is read at once in memory, and its rows divided +// from there. func (p *Processor) ingestWithCSV(fileReader io.Reader) error { reader := csv.NewReader(fileReader) reader.FieldsPerRecord = -1 // don't check number of fields @@ -175,7 +188,7 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) } } - p.fromParserCh <- &CertData{ + p.certWithChainChan <- &CertWithChainData{ DomainNames: domainNames, Cert: cert, CertChain: chain, @@ -219,14 +232,14 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return nil } +// processErrorChannel outputs the errors it encounters in the errors channel. +// Returns with error if any is found, or nil if no error. func (p *Processor) processErrorChannel() error { var errorsFound bool - fmt.Println("deleteme processing error channel") for err := range p.errorCh { if err == nil { continue } - fmt.Println("deleteme errors found") errorsFound = true fmt.Fprintf(os.Stderr, "%s\n", err) } diff --git a/cmd/ingest/smt_updater.go b/cmd/ingest/smt_updater.go index dd59b7e8..3ace9fe8 100644 --- a/cmd/ingest/smt_updater.go +++ b/cmd/ingest/smt_updater.go @@ -34,7 +34,16 @@ func NewSMTUpdater(conn db.Conn, root []byte, cacheHeight int) *SMTUpdater { } func (u *SMTUpdater) Start() { - fmt.Println("deleteme starting SMT updater") + fmt.Println("Starting SMT updater") + + // Start processing the error channel. + go u.processErrorChannel() + + if 4%5 != 0 { // deleteme + close(u.errorCh) + return + } + // Read batches of updated nodes from `updates`: go func() { domainsCh, errorCh := u.Store.UpdatedDomains() @@ -56,25 +65,22 @@ func (u *SMTUpdater) Start() { // Nothing else to process, close error channel. close(u.errorCh) }() - go u.processErrorChannel() + } func (u *SMTUpdater) Wait() error { - fmt.Println("deleteme waiting for SMT updater to finish") return <-u.doneCh } func (u *SMTUpdater) processErrorChannel() { var withErrors bool for err := range u.errorCh { - if err == nil { - continue + if err != nil { + withErrors = true + fmt.Printf("SMT update, error: %s\n", err) } - withErrors = true - fmt.Printf("SMT update, error: %s\n", err) } if withErrors { - fmt.Println("deleteme errors found") u.doneCh <- fmt.Errorf("errors found") } else { u.doneCh <- nil From e655262fd61cd03f333c5789e99370c8bcae541b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 9 Feb 2023 09:17:38 +0100 Subject: [PATCH 024/187] Certs table has id as UNIQUE primary key. Check existing certificates before sending them to the DB, this should save time of transmission and insertion. --- pkg/db/db.go | 8 ++- pkg/db/mysql.go | 56 +++++++++++++-- pkg/mapserver/internal/mockdb_for_testing.go | 13 +++- pkg/mapserver/updater/updater.go | 72 ++++++++++++++++++-- pkg/mapserver/updater/updater_test.go | 42 ++++++++++++ tools/create_schema.sh | 6 +- 6 files changed, 180 insertions(+), 17 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index b9779fb3..8218c3df 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -31,8 +31,12 @@ type Conn interface { // DisableIndexing starts the indexing in the table. EnableIndexing(table string) error - InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, - parents []common.SHA256Output) error + // CheckCertsExist returns a slice of true/false values. Each value indicates if + // the corresponding certificate identified by its ID is already present in the DB. + CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) + + InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, + parents []*common.SHA256Output) error // ************************************************************ // Function for Tree table diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index ec40f497..6a2ce6f5 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -152,17 +152,63 @@ func (c *mysqlDB) EnableIndexing(table string) error { return err } -func (c *mysqlDB) InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, - parents []common.SHA256Output) error { +// CheckCertsExist returns a slice of true/false values. Each value indicates if +// the corresponding certificate identified by its ID is already present in the DB. +func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { + // Slice to be used in the SQL query: + data := make([]interface{}, len(ids)) + for i, id := range ids { + data[i] = id[:] + } + + // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. + elems := make([]string, len(data)) + for i := range elems { + elems[i] = "SELECT ? AS id" + } + + // The query means: join two tables, one with the values I am passing as arguments (those + // are the ids) and the certs table, and for those that exist write a 1, otherwise a 0. + // Finally, group_concat all rows into just one field of type string. + str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + + "SELECT (CASE WHEN certs.id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + + strings.Join(elems, " UNION ALL ") + + ") AS request left JOIN ( SELECT id FROM certs ) AS certs ON certs.id = request.id" + + ") AS t" + + // Return slice of booleans: + present := make([]bool, len(ids)) + + var value string + if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { + return nil, err + } + for i, c := range value { + if c == '1' { + present[i] = true + } + } + + return present, nil +} +func (c *mysqlDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, + parents []*common.SHA256Output) error { + + if len(ids) == 0 { + return nil + } // TODO(juagargi) set a prepared statement in constructor - // str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) - str := "INSERT into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) + // Because the primary key is the SHA256 of the payload, if there is a clash, it must + // be that the certificates are identical. Thus always replace. + str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) data := make([]interface{}, 3*len(ids)) for i := range ids { data[i*3] = ids[i][:] data[i*3+1] = payloads[i] - data[i*3+2] = parents[i][:] + if parents[i] != nil { + data[i*3+2] = parents[i][:] + } } _, err := c.db.Exec(str, data...) if err != nil { diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index adb773bc..9be7d54f 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -2,6 +2,7 @@ package internal import ( "context" + "database/sql" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" @@ -27,6 +28,10 @@ func NewMockDB() *MockDB { } } +func (d *MockDB) DB() *sql.DB { + return nil +} + // Close closes the connection. func (d *MockDB) Close() error { return nil } @@ -36,8 +41,12 @@ func (d *MockDB) DisableIndexing(table string) error { return nil } func (d *MockDB) EnableIndexing(table string) error { return nil } -func (d *MockDB) InsertCerts(ctx context.Context, ids []common.SHA256Output, payloads [][]byte, - parents []common.SHA256Output) error { +func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { + return make([]bool, len(ids)), nil +} + +func (d *MockDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, + parents []*common.SHA256Output) error { return nil } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 5f3d7f1f..891916f0 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -231,23 +231,25 @@ func (mapUpdater *MapUpdater) Close() error { return mapUpdater.smt.Close() } -func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, +func updateCertsOldMethodDeleteme(ctx context.Context, conn db.Conn, names [][]string, certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { - ids := make([]common.SHA256Output, len(certs)) + ids := make([]*common.SHA256Output, len(certs)) payloads := make([][]byte, len(certs)) - parentIds := make([]common.SHA256Output, len(certs)) + parentIds := make([]*common.SHA256Output, len(certs)) for i, c := range certs { - ids[i] = common.SHA256Hash32Bytes(c.Raw) + id := common.SHA256Hash32Bytes(c.Raw) + ids[i] = &id payloads[i] = c.Raw if parents[i] != nil { - parentIds[i] = common.SHA256Hash32Bytes(parents[i].Raw) + id = common.SHA256Hash32Bytes(parents[i].Raw) + parentIds[i] = &id } } // TODO(juagargi) check first in DB which cert ids are already present and skip sending them - if err := conn.InsertCerts(context.Background(), ids, payloads, parentIds); err != nil { + if err := conn.InsertCerts(ctx, ids, payloads, parentIds); err != nil { return err } @@ -255,3 +257,61 @@ func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, // TODO(juagargi) return nil } + +func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, + certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { + + ids := make([]*common.SHA256Output, len(certs)) + for i, c := range certs { + id := common.SHA256Hash32Bytes(c.Raw) + ids[i] = &id + } + + // First check which certificates are already present in the DB. + mask, err := conn.CheckCertsExist(ctx, ids) + if err != nil { + return err + } + payloads := make([][]byte, 0, len(certs)) + parentIds := make([]*common.SHA256Output, 0, len(certs)) + // Prepare new parents, IDs and payloads skipping those certificates already in the DB. + runWhenFalse(mask, func(to, from int) { + if to != from { // probably unnecessary check, as swapping with itself would be okay + ids[to] = ids[from] + } + payloads = append(payloads, certs[from].Raw) + var parent *common.SHA256Output + if parents[from] != nil { + id := common.SHA256Hash32Bytes(parents[from].Raw) + parent = &id + } + parentIds = append(parentIds, parent) + }) + // deleteme We expect only 1320 unique certificates from the ~ 100 Million certificates in DB + // DELETEME but when inserting only with the primary key I see 6,392,902 rows. + // DELETEME but with primary key and unique key in id, I see 6,392,913 rows (not the same!) + // if len(ids) != len(payloads) { + // panic(fmt.Sprintf("different sizes original %d != new %d", len(ids), len(payloads))) + // } + // Trim the end of the original ID slice, as it contains values from the unmasked certificates. + ids = ids[:len(payloads)] + + // Only insert those certificates that are not in the mask. + if err := conn.InsertCerts(ctx, ids, payloads, parentIds); err != nil { + return err + } + + // Each cert that has been updated needs an entry in `domains` and `dirty` + // TODO(juagargi) + return nil +} + +func runWhenFalse(mask []bool, fcn func(to, from int)) { + to := 0 + for from, condition := range mask { + if !condition { + fcn(to, from) + to++ + } + } +} diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 471f704c..2f44354c 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -171,6 +171,48 @@ func TestFetchUpdatedDomainHash(t *testing.T) { assert.Equal(t, 0, len(updaterDB.UpdatesTable)) } +func TestRunWhenFalse(t *testing.T) { + cases := map[string]struct { + presence []bool + fromParams []int + toParams []int + }{ + "empty": { + fromParams: []int{}, + toParams: []int{}, + }, + "one": { + presence: []bool{false}, + fromParams: []int{0}, + toParams: []int{0}, + }, + "one_true": { + presence: []bool{true}, + fromParams: []int{}, + toParams: []int{}, + }, + "010": { + presence: []bool{false, true, false}, + fromParams: []int{0, 2}, + toParams: []int{0, 1}, + }, + } + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + gotTo := make([]int, 0) + gotFrom := make([]int, 0) + runWhenFalse(tc.presence, func(to, from int) { + gotTo = append(gotTo, to) + gotFrom = append(gotFrom, from) + }) + assert.Equal(t, tc.fromParams, gotFrom) + assert.Equal(t, tc.toParams, gotTo) + }) + } +} + func getRandomHash() projectCommon.SHA256Output { return projectCommon.SHA256Hash32Bytes(generateRandomBytes(50)) } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 12ac5ce6..4cf6d707 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -27,9 +27,11 @@ echo "$CMD" | mysql -u root CMD=$(cat < Date: Thu, 9 Feb 2023 09:31:22 +0100 Subject: [PATCH 025/187] Finish all pipelines cleanly. Block until the batch processor pipeline has finished. Signal again done if that signal is intercepted. --- cmd/ingest/batch.go | 3 ++- cmd/ingest/main.go | 2 +- cmd/ingest/processor.go | 3 +++ 3 files changed, 6 insertions(+), 2 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index e6e25cb2..41cd3b05 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -98,7 +98,7 @@ func (p *CertificateProcessor) start() { }() } wg.Wait() - // This stage is finished, indicate so. + // This pipeline is finished, signal it. p.doneCh <- struct{}{} }() @@ -110,6 +110,7 @@ func (p *CertificateProcessor) start() { select { case <-ticker.C: case <-p.doneCh: + p.doneCh <- struct{}{} // signal again return } writtenCerts := p.writtenCerts.Load() diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 6fba0db3..42a5b4aa 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -28,7 +28,7 @@ const ( // Times gathered at jupiter, 64 gz files, no CSV // InnoDB: 8m 17s -// MyISAM: 1m 33s +// MyISAM: 1m 33s 374 Mb/s func main() { os.Exit(mainFunction()) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index a1c947e4..29cbc4b2 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -94,6 +94,9 @@ func (p *Processor) start() { // This stage has finished, close the output channel: close(p.nodeChan) + // Wait for the next stage to finish + p.batchProcessor.Wait() + // Now start processing the changed domains into the SMT: smtProcessor := NewSMTUpdater(p.Conn, nil, 32) smtProcessor.Start() From bcc5077613e8a4f5f7ee7e05415754089e6524e0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 13 Feb 2023 22:03:29 +0100 Subject: [PATCH 026/187] Bump to go 1.18. Utils like delve don't like anything lower. --- go.mod | 8 ++++---- go.sum | 6 ------ 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 8c99a7bb..eecf012f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/netsec-ethz/fpki -go 1.17 +go 1.18 require ( github.com/go-sql-driver/mysql v1.7.0 @@ -11,8 +11,9 @@ require ( github.com/minio/sha256-simd v1.0.0 github.com/stretchr/testify v1.7.4 github.com/transparency-dev/merkle v0.0.1 - golang.org/x/net v0.7.0 - golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + go.uber.org/atomic v1.9.0 + golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c google.golang.org/grpc v1.47.0 google.golang.org/protobuf v1.28.0 ) @@ -101,7 +102,6 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/proto/otlp v0.7.0 // indirect - go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect diff --git a/go.sum b/go.sum index ebbaa41c..2c38700a 100644 --- a/go.sum +++ b/go.sum @@ -166,7 +166,6 @@ github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054 h1:uH66TXeswKn5PW5zdZ39xEwfS9an067BirqA+P4QaLI= github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= @@ -203,14 +202,12 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -408,7 +405,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= -github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -707,7 +703,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= -github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -816,7 +811,6 @@ go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= From b9660094cf0b3557f5a41757749b547d7c5d371a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 13 Feb 2023 22:09:38 +0100 Subject: [PATCH 027/187] Allow two update strategies: overwrite and keep. Overwrite always sends the certificates to DB, even when they exist. Keep queries for their existence before sending them. --- cmd/ingest/batch.go | 94 ++++++++++++++++++++++++++++++-- cmd/ingest/main.go | 24 +++++++- cmd/ingest/processor.go | 4 +- pkg/db/mysql.go | 2 +- pkg/mapserver/updater/updater.go | 31 ++--------- tools/create_schema.sh | 3 +- 6 files changed, 119 insertions(+), 39 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 41cd3b05..b8800974 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -51,6 +51,9 @@ func (b *CertBatch) IsFull() bool { type CertificateProcessor struct { conn db.Conn + updateCertBatch UpdateCertificateFunction // update strategy dependent method + strategy CertificateUpdateStrategy + incomingCh chan *CertificateNode // From the previous processor incomingBatch chan *CertBatch // Ready to be inserted doneCh chan struct{} @@ -59,13 +62,39 @@ type CertificateProcessor struct { writtenBytes atomic.Int64 } -func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode) *CertificateProcessor { +type CertificateUpdateStrategy int + +const ( + CertificateUpdateOverwrite CertificateUpdateStrategy = 0 + CertificateUpdateKeepExisting CertificateUpdateStrategy = 1 +) + +type UpdateCertificateFunction func( + context.Context, db.Conn, [][]string, []*ctx509.Certificate, []*ctx509.Certificate) error + +func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode, + strategy CertificateUpdateStrategy) *CertificateProcessor { + + // Select the update certificate method depending on the strategy: + var updateFcn UpdateCertificateFunction + switch strategy { + case CertificateUpdateOverwrite: + updateFcn = updater.UpdateCertsWithOverwrite + case CertificateUpdateKeepExisting: + updateFcn = updater.UpdateCertsWithKeepExisting + default: + panic(fmt.Errorf("invalid strategy %v", strategy)) + } + p := &CertificateProcessor{ - conn: conn, - incomingCh: incoming, - incomingBatch: make(chan *CertBatch), - doneCh: make(chan struct{}), + conn: conn, + updateCertBatch: updateFcn, + strategy: strategy, + incomingCh: incoming, + incomingBatch: make(chan *CertBatch), + doneCh: make(chan struct{}), } + p.start() return p } @@ -73,6 +102,10 @@ func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode) *Certificat // start starts the pipeline. // Two stages in this processor: from certificate node to batch, and from batch to DB. func (p *CertificateProcessor) start() { + // Prepare DB for certificate update. + p.PrepareDB() + + // Start pipeline. go func() { batch := NewCertificateBatch() for c := range p.incomingCh { @@ -98,6 +131,8 @@ func (p *CertificateProcessor) start() { }() } wg.Wait() + // Leave the DB ready again. + p.ConsolidateDB() // This pipeline is finished, signal it. p.doneCh <- struct{}{} }() @@ -128,9 +163,55 @@ func (p *CertificateProcessor) Wait() { <-p.doneCh } +// PrepareDB prepares the DB for certificate insertion. This could imply dropping keys, +// disabling indices, etc. depending on the update strategy. +// Before the DB is functional again, it needs a call to ConsolidateDB. +func (p *CertificateProcessor) PrepareDB() { + switch p.strategy { + case CertificateUpdateOverwrite: + // Try to remove unique index `id` and primary key. They may not exist. + if _, err := p.conn.DB().Exec("ALTER TABLE certs DROP PRIMARY KEY"); err != nil { + panic(fmt.Errorf("disabling keys: %s", err)) + } + } +} + +// ConsolidateDB finishes the certificate update process and leaves the DB ready again. +func (p *CertificateProcessor) ConsolidateDB() { + switch p.strategy { + case CertificateUpdateOverwrite: + // Reenable keys: + fmt.Println("Reenabling keys in DB.certs ... ") + str := "DROP TABLE IF EXISTS certs_aux_tmp" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + str = "CREATE TABLE certs_aux_tmp LIKE certs;" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + str = "ALTER TABLE certs_aux_tmp ADD PRIMARY KEY (id)" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + str = "INSERT IGNORE INTO certs_aux_tmp SELECT * FROM certs" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + str = "DROP TABLE certs" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + str = "ALTER TABLE certs_aux_tmp RENAME TO certs" + if _, err := p.conn.DB().Exec(str); err != nil { + panic(fmt.Errorf("reenabling keys: %s", err)) + } + } +} + func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: - err := updater.UpdateCerts(context.Background(), p.conn, batch.Names, batch.Certs, batch.Parents) + err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Certs, batch.Parents) if err != nil { panic(err) } @@ -146,5 +227,6 @@ func (p *CertificateProcessor) processBatch(batch *CertBatch) { } p.writtenBytes.Add(int64(bytesInBatch)) + // Each cert that has been updated needs an entry in `domains` and `dirty` // TODO(juagargi) push entries to the dirty table } diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 42a5b4aa..49508347 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -27,8 +27,10 @@ const ( ) // Times gathered at jupiter, 64 gz files, no CSV -// InnoDB: 8m 17s -// MyISAM: 1m 33s 374 Mb/s +// InnoDB: 8m 17s +// MyISAM overwrite, no pk (invalid DB): 1m 33s 374 Mb/s +// MyISAM overwrite, afterwards pk: 3m 22s 175.9 Mb/s +// MyISAM keep, already with pk: 2m 26s 241.0 Mb/s func main() { os.Exit(mainFunction()) @@ -40,12 +42,28 @@ func mainFunction() int { } cpuProfile := flag.String("cpuprofile", "", "write a CPU profile to file") memProfile := flag.String("memprofile", "", "write a memory profile to file") + certUpdateStrategy := flag.String("strategy", "keep", "strategy to update certificates\n"+ + "\"overwrite\": always send certificates to DB, even if they exist already\n"+ + "\"keep\": first check if each certificate exists already in DB before sending it\n"+ + `If data transfer to DB is expensive, "keep" is recommended.`) flag.Parse() + if flag.NArg() != 1 { flag.Usage() return 1 } + // Update strategy. + var strategy CertificateUpdateStrategy + switch *certUpdateStrategy { + case "overwrite": + strategy = CertificateUpdateOverwrite + case "keep": + strategy = CertificateUpdateKeepExisting + default: + panic(fmt.Errorf("bad update strategy: %v", *certUpdateStrategy)) + } + // Profiling: stopProfiles := func() { if *cpuProfile != "" { @@ -89,7 +107,7 @@ func mainFunction() int { exitIfError(conn.TruncateAllTables()) // Update certificates and chains. - proc := NewProcessor(conn) + proc := NewProcessor(conn, strategy) proc.AddGzFiles(gzFiles) proc.AddCsvFiles(csvFiles) exitIfError(proc.Wait()) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 29cbc4b2..da8c73c4 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -34,14 +34,14 @@ type CertWithChainData struct { CertChain []*ctx509.Certificate } -func NewProcessor(conn db.Conn) *Processor { +func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *Processor { nodeChan := make(chan *CertificateNode) p := &Processor{ Conn: conn, incomingFileCh: make(chan File), certWithChainChan: make(chan *CertWithChainData), nodeChan: nodeChan, - batchProcessor: NewBatchProcessor(conn, nodeChan), + batchProcessor: NewBatchProcessor(conn, nodeChan, certUpdateStrategy), errorCh: make(chan error), doneCh: make(chan error), diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 6a2ce6f5..a523469b 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -200,7 +200,7 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, p } // TODO(juagargi) set a prepared statement in constructor // Because the primary key is the SHA256 of the payload, if there is a clash, it must - // be that the certificates are identical. Thus always replace. + // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) data := make([]interface{}, 3*len(ids)) for i := range ids { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 891916f0..91191915 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -90,7 +90,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ certChains = append(certChains, chain) } certs, parents := UnfoldCerts(certs, certChains) - return UpdateCerts(ctx, mapUpdater.dbConn, names, certs, parents) + return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, certs, parents) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -231,7 +231,7 @@ func (mapUpdater *MapUpdater) Close() error { return mapUpdater.smt.Close() } -func updateCertsOldMethodDeleteme(ctx context.Context, conn db.Conn, names [][]string, +func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { ids := make([]*common.SHA256Output, len(certs)) @@ -246,19 +246,10 @@ func updateCertsOldMethodDeleteme(ctx context.Context, conn db.Conn, names [][]s parentIds[i] = &id } } - - // TODO(juagargi) check first in DB which cert ids are already present and skip sending them - - if err := conn.InsertCerts(ctx, ids, payloads, parentIds); err != nil { - return err - } - - // Each cert that has been updated needs an entry in `domains` and `dirty` - // TODO(juagargi) - return nil + return conn.InsertCerts(ctx, ids, payloads, parentIds) } -func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, +func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { ids := make([]*common.SHA256Output, len(certs)) @@ -287,23 +278,13 @@ func UpdateCerts(ctx context.Context, conn db.Conn, names [][]string, } parentIds = append(parentIds, parent) }) - // deleteme We expect only 1320 unique certificates from the ~ 100 Million certificates in DB - // DELETEME but when inserting only with the primary key I see 6,392,902 rows. - // DELETEME but with primary key and unique key in id, I see 6,392,913 rows (not the same!) - // if len(ids) != len(payloads) { - // panic(fmt.Sprintf("different sizes original %d != new %d", len(ids), len(payloads))) - // } + // Trim the end of the original ID slice, as it contains values from the unmasked certificates. ids = ids[:len(payloads)] // Only insert those certificates that are not in the mask. - if err := conn.InsertCerts(ctx, ids, payloads, parentIds); err != nil { - return err - } + return conn.InsertCerts(ctx, ids, payloads, parentIds) - // Each cert that has been updated needs an entry in `domains` and `dirty` - // TODO(juagargi) - return nil } func runWhenFalse(mask []bool, fcn func(to, from int)) { diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 4cf6d707..31bdcb0a 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -30,8 +30,7 @@ CREATE TABLE certs ( id VARBINARY(32) NOT NULL, payload LONGBLOB, parent VARBINARY(32) DEFAULT NULL, - PRIMARY KEY(id), - UNIQUE KEY(id) + PRIMARY KEY(id) ) ENGINE=MyISAM CHARSET=binary COLLATE=binary; EOF ) From c2b90c8d9b0345919f22bed9fcec1da9bd03aa50 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 13 Feb 2023 22:22:36 +0100 Subject: [PATCH 028/187] Fix bug sending wrong certificate ID sometimes. When the certificate has a parent, it overwrites its ID by not setting the parent's ID to a different variable with a different address. --- pkg/mapserver/updater/updater.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 91191915..6e3b0871 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -242,7 +242,7 @@ func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]strin ids[i] = &id payloads[i] = c.Raw if parents[i] != nil { - id = common.SHA256Hash32Bytes(parents[i].Raw) + id := common.SHA256Hash32Bytes(parents[i].Raw) parentIds[i] = &id } } From 43b3feec274d51078bb6d20ef8faef5b37244815 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 14 Feb 2023 13:53:27 +0100 Subject: [PATCH 029/187] Add domain_certs, and more fields to certs. Add expiration and payload_hash to table certs. Add table domain_certs, only with cert_id and domain_id. Remove cert_id from table domains. --- cmd/ingest/batch.go | 25 +++++++++++--------- cmd/ingest/processor.go | 13 ++++------ pkg/db/db.go | 5 ++-- pkg/db/mysql.go | 17 +++++++------ pkg/mapserver/internal/mockdb_for_testing.go | 5 ++-- pkg/mapserver/updater/updater.go | 12 ++++++---- tools/create_schema.sh | 18 +++++++++++--- 7 files changed, 56 insertions(+), 39 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index b8800974..84854e36 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -14,7 +14,6 @@ import ( ) type CertificateNode struct { - Names []string // collection of names per certificate Cert *ctx509.Certificate Parent *ctx509.Certificate } @@ -22,21 +21,24 @@ type CertificateNode struct { // CertBatch is an unwrapped collection of Certificate. // All slices must have the same size. type CertBatch struct { - Names [][]string // collection of names per certificate - Certs []*ctx509.Certificate - Parents []*ctx509.Certificate + Names [][]string // collection of names per certificate + Expirations []*time.Time + Certs []*ctx509.Certificate + Parents []*ctx509.Certificate } func NewCertificateBatch() *CertBatch { return &CertBatch{ - Names: make([][]string, 0, BatchSize), - Certs: make([]*ctx509.Certificate, 0, BatchSize), - Parents: make([]*ctx509.Certificate, 0, BatchSize), + Names: make([][]string, 0, BatchSize), + Expirations: make([]*time.Time, 0, BatchSize), + Certs: make([]*ctx509.Certificate, 0, BatchSize), + Parents: make([]*ctx509.Certificate, 0, BatchSize), } } func (b *CertBatch) AddCertificate(c *CertificateNode) { - b.Names = append(b.Names, c.Names) + b.Names = append(b.Names, updater.ExtractCertDomains(c.Cert)) + b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) b.Parents = append(b.Parents, c.Parent) } @@ -69,8 +71,8 @@ const ( CertificateUpdateKeepExisting CertificateUpdateStrategy = 1 ) -type UpdateCertificateFunction func( - context.Context, db.Conn, [][]string, []*ctx509.Certificate, []*ctx509.Certificate) error +type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, + []*ctx509.Certificate, []*ctx509.Certificate) error func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { @@ -211,7 +213,8 @@ func (p *CertificateProcessor) ConsolidateDB() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: - err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Certs, batch.Parents) + err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, + batch.Certs, batch.Parents) if err != nil { panic(err) } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index da8c73c4..35773607 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -29,9 +29,8 @@ type Processor struct { } type CertWithChainData struct { - DomainNames []string - Cert *ctx509.Certificate - CertChain []*ctx509.Certificate + Cert *ctx509.Certificate + CertChain []*ctx509.Certificate } func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *Processor { @@ -85,7 +84,6 @@ func (p *Processor) start() { certs, parents := updater.UnfoldCert(data.Cert, data.CertChain) for i := range certs { p.nodeChan <- &CertificateNode{ - Names: data.DomainNames, Cert: certs[i], Parent: parents[i], } @@ -176,8 +174,6 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return err } - domainNames := updater.ExtractCertDomains(cert) - // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) @@ -192,9 +188,8 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { } } p.certWithChainChan <- &CertWithChainData{ - DomainNames: domainNames, - Cert: cert, - CertChain: chain, + Cert: cert, + CertChain: chain, } return nil } diff --git a/pkg/db/db.go b/pkg/db/db.go index 8218c3df..5708f8d0 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -3,6 +3,7 @@ package db import ( "context" "database/sql" + "time" "github.com/netsec-ethz/fpki/pkg/common" ) @@ -35,8 +36,8 @@ type Conn interface { // the corresponding certificate identified by its ID is already present in the DB. CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) - InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, - parents []*common.SHA256Output) error + InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, + payloads [][]byte) error // ************************************************************ // Function for Tree table diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index a523469b..13983e94 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -5,6 +5,7 @@ import ( "database/sql" "fmt" "strings" + "time" _ "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" @@ -192,8 +193,8 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu return present, nil } -func (c *mysqlDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, - parents []*common.SHA256Output) error { +func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { if len(ids) == 0 { return nil @@ -201,14 +202,16 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, p // TODO(juagargi) set a prepared statement in constructor // Because the primary key is the SHA256 of the payload, if there is a clash, it must // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. - str := "REPLACE into certs (id, payload, parent) values " + repeatStmt(len(ids), 3) - data := make([]interface{}, 3*len(ids)) + const numFields = 4 + str := "REPLACE into certs (id, parent, expiration, payload) values " + repeatStmt(len(ids), numFields) + data := make([]interface{}, numFields*len(ids)) for i := range ids { - data[i*3] = ids[i][:] - data[i*3+1] = payloads[i] + data[i*numFields] = ids[i][:] if parents[i] != nil { - data[i*3+2] = parents[i][:] + data[i*numFields+1] = parents[i][:] } + data[i*numFields+2] = expirations[i] + data[i*numFields+3] = payloads[i] } _, err := c.db.Exec(str, data...) if err != nil { diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index 9be7d54f..e38d19ed 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -3,6 +3,7 @@ package internal import ( "context" "database/sql" + "time" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" @@ -45,8 +46,8 @@ func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output return make([]bool, len(ids)), nil } -func (d *MockDB) InsertCerts(ctx context.Context, ids []*common.SHA256Output, payloads [][]byte, - parents []*common.SHA256Output) error { +func (d *MockDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { return nil } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 6e3b0871..81401c5f 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -70,6 +70,7 @@ func (u *MapUpdater) UpdateNextBatch(ctx context.Context) (int, error) { // UpdateCertsLocally: add certs (in the form of asn.1 encoded byte arrays) directly without querying log func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [][]byte, certChainList [][][]byte) error { names := make([][]string, 0, len(certList)) // Set of names per certificate + expirations := make([]*time.Time, 0, len(certList)) certs := make([]*ctx509.Certificate, 0, len(certList)) certChains := make([][]*ctx509.Certificate, 0, len(certList)) for i, certRaw := range certList { @@ -79,6 +80,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ } certs = append(certs, cert) names = append(names, ExtractCertDomains(cert)) + expirations = append(expirations, &cert.NotAfter) chain := make([]*ctx509.Certificate, len(certChainList[i])) for i, certChainItemRaw := range certChainList[i] { @@ -90,7 +92,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ certChains = append(certChains, chain) } certs, parents := UnfoldCerts(certs, certChains) - return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, certs, parents) + return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, parents) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -232,7 +234,7 @@ func (mapUpdater *MapUpdater) Close() error { } func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, - certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { + expirations []*time.Time, certs, parents []*ctx509.Certificate) error { ids := make([]*common.SHA256Output, len(certs)) payloads := make([][]byte, len(certs)) @@ -246,11 +248,11 @@ func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]strin parentIds[i] = &id } } - return conn.InsertCerts(ctx, ids, payloads, parentIds) + return conn.InsertCerts(ctx, ids, parentIds, expirations, payloads) } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - certs []*ctx509.Certificate, parents []*ctx509.Certificate) error { + expirations []*time.Time, certs, parents []*ctx509.Certificate) error { ids := make([]*common.SHA256Output, len(certs)) for i, c := range certs { @@ -283,7 +285,7 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st ids = ids[:len(payloads)] // Only insert those certificates that are not in the mask. - return conn.InsertCerts(ctx, ids, payloads, parentIds) + return conn.InsertCerts(ctx, ids, parentIds, expirations, payloads) } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 31bdcb0a..dd57bbd3 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -28,8 +28,9 @@ CMD=$(cat < Date: Tue, 14 Feb 2023 21:01:23 +0100 Subject: [PATCH 030/187] Added comment about aio-max-nr in sysctl.conf. --- tools/README.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tools/README.md b/tools/README.md index b9c037b1..77a365ec 100644 --- a/tools/README.md +++ b/tools/README.md @@ -22,6 +22,13 @@ To allow mysqld reading from /tmp/ (load data infile) in ubuntu: - Reload apparmor `sudo systemctl reload apparmor.service` +## System + +Don't forget to change the kernel parameters to allow a higher number of AIO operations. +In `/etc/sysctl.conf` add the line `fs.aio-max-nr = 1048576`. + +And look at the `fpki.cnf` file in this repository to copy those values inside `/etc/mysql/conf.d/`. + ## Analyze performance DESCRIBE SELECT * FROM nodes WHERE id=1234; From ac25da3e792cafb949ad69f399226864239ee32e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 14 Feb 2023 21:07:24 +0100 Subject: [PATCH 031/187] Remove recently added domain_certs table. Not needed. Reinstate the domain_name in the domains table. Update domains after pushing new certificates. Update dirty table as well. --- cmd/ingest/batch.go | 3 -- pkg/db/db.go | 5 ++ pkg/db/mysql.go | 48 ++++++++++++++++---- pkg/mapserver/internal/mockdb_for_testing.go | 6 +++ pkg/mapserver/updater/updater.go | 35 ++++++++++++-- tools/create_schema.sh | 18 ++------ 6 files changed, 86 insertions(+), 29 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 84854e36..3c757a6a 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -229,7 +229,4 @@ func (p *CertificateProcessor) processBatch(batch *CertBatch) { } } p.writtenBytes.Add(int64(bytesInBatch)) - - // Each cert that has been updated needs an entry in `domains` and `dirty` - // TODO(juagargi) push entries to the dirty table } diff --git a/pkg/db/db.go b/pkg/db/db.go index 5708f8d0..5b2b4c85 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -39,6 +39,11 @@ type Conn interface { InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error + // UpdateDomainsWithCerts updates the domains and dirty tables with entries that are + // _probably_ not present there. + UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, + domainNames []string) error + // ************************************************************ // Function for Tree table // ************************************************************ diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 13983e94..0a9ec232 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -202,26 +202,58 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 // TODO(juagargi) set a prepared statement in constructor // Because the primary key is the SHA256 of the payload, if there is a clash, it must // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. - const numFields = 4 - str := "REPLACE into certs (id, parent, expiration, payload) values " + repeatStmt(len(ids), numFields) - data := make([]interface{}, numFields*len(ids)) + const N = 4 + str := "REPLACE INTO certs (id, parent, expiration, payload) VALUES " + repeatStmt(len(ids), N) + data := make([]interface{}, N*len(ids)) for i := range ids { - data[i*numFields] = ids[i][:] + data[i*N] = ids[i][:] if parents[i] != nil { - data[i*numFields+1] = parents[i][:] + data[i*N+1] = parents[i][:] } - data[i*numFields+2] = expirations[i] - data[i*numFields+3] = payloads[i] + data[i*N+2] = expirations[i] + data[i*N+3] = payloads[i] } _, err := c.db.Exec(str, data...) if err != nil { - return err } return nil } +// UpdateDomainsWithCerts updates both the domains and the dirty tables. +func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, + domainNames []string) error { + + if len(certIDs) == 0 { + return nil + } + // First insert into domains: + const N = 3 + str := "INSERT IGNORE INTO domains (cert_id,domain_id,domain_name) VALUES " + + repeatStmt(len(certIDs), N) + data := make([]interface{}, N*len(certIDs)) + for i := range certIDs { + data[i*N] = certIDs[i][:] + data[i*N+1] = domainIDs[i][:] + data[i*N+2] = domainNames[i] + } + _, err := c.db.Exec(str, data...) + if err != nil { + return err + } + + // Now insert into dirty. + str = "REPLACE INTO dirty (domain_id) VALUES " + repeatStmt(len(domainIDs), 1) + data = make([]interface{}, len(domainIDs)) + for i, id := range domainIDs { + data[i] = id[:] + } + _, err = c.db.Exec(str, data...) + + return err +} + // repeatStmt returns ( (?,..inner..,?), ...outer... ) func repeatStmt(outer int, inner int) string { components := make([]string, inner) diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index e38d19ed..f0eaca08 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -52,6 +52,12 @@ func (d *MockDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256O return nil } +func (d *MockDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, + domainNames []string) error { + + return nil +} + func (d *MockDB) RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ([]byte, error) { return d.TreeTable[id], nil } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 81401c5f..96889d54 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -248,7 +248,7 @@ func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]strin parentIds[i] = &id } } - return conn.InsertCerts(ctx, ids, parentIds, expirations, payloads) + return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads) } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, @@ -271,6 +271,7 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st runWhenFalse(mask, func(to, from int) { if to != from { // probably unnecessary check, as swapping with itself would be okay ids[to] = ids[from] + names[to] = names[from] } payloads = append(payloads, certs[from].Raw) var parent *common.SHA256Output @@ -283,12 +284,40 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st // Trim the end of the original ID slice, as it contains values from the unmasked certificates. ids = ids[:len(payloads)] + names = names[:len(payloads)] - // Only insert those certificates that are not in the mask. - return conn.InsertCerts(ctx, ids, parentIds, expirations, payloads) + // Only update those certificates that are not in the mask. + return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads) } +func insertCerts(ctx context.Context, conn db.Conn, names [][]string, + ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { + + // Send hash, parent hash, expiration and payload to the certs table. + if err := conn.InsertCerts(ctx, ids, parents, expirations, payloads); err != nil { + return fmt.Errorf("inserting certificates: %w", err) + } + + // Add all new entries from names into the domains table (with ignore) + newNames := make([]string, 0, len(ids)) + newIDs := make([]*common.SHA256Output, 0, len(ids)) + domainIDs := make([]*common.SHA256Output, 0, len(ids)) + for i, names := range names { + for _, name := range names { + newNames = append(newNames, name) + newIDs = append(newIDs, ids[i]) + domainID := common.SHA256Hash32Bytes([]byte(name)) + domainIDs = append(domainIDs, &domainID) + } + } + if err := conn.UpdateDomainsWithCerts(ctx, newIDs, domainIDs, newNames); err != nil { + return fmt.Errorf("updating domains: %w", err) + } + + return nil +} + func runWhenFalse(mask []bool, fcn func(to, from int)) { to := 0 for from, condition := range mask { diff --git a/tools/create_schema.sh b/tools/create_schema.sh index dd57bbd3..8e3170d1 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -41,21 +41,9 @@ echo "$CMD" | mysql -u root CMD=$(cat < Date: Tue, 21 Feb 2023 05:13:52 +0100 Subject: [PATCH 032/187] domains table indexed by domain, domain_payloads id is a hash. --- tools/create_schema.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 8e3170d1..8e0820d3 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -44,7 +44,8 @@ CREATE TABLE domains ( cert_id VARBINARY(32) NOT NULL, domain_id VARBINARY(32) NOT NULL, domain_name VARCHAR(300) COLLATE ascii_bin DEFAULT NULL, - PRIMARY KEY (cert_id,domain_id) + PRIMARY KEY (cert_id,domain_id), + INDEX domain_id (domain_id) ) ENGINE=MyISAM CHARSET=binary COLLATE=binary; EOF ) @@ -54,7 +55,7 @@ echo "$CMD" | $MYSQLCMD CMD=$(cat < Date: Tue, 21 Feb 2023 05:50:29 +0100 Subject: [PATCH 033/187] Only leaves inserted in domains. Only the leaf certificates are inserted in the domains and domain_payloads tables. --- cmd/ingest/batch.go | 8 +++-- cmd/ingest/processor.go | 1 + pkg/mapserver/updater/certs_updater.go | 1 + pkg/mapserver/updater/updater.go | 42 +++++++++++++++++--------- 4 files changed, 35 insertions(+), 17 deletions(-) diff --git a/cmd/ingest/batch.go b/cmd/ingest/batch.go index 3c757a6a..6c7ab6ed 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/batch.go @@ -16,6 +16,7 @@ import ( type CertificateNode struct { Cert *ctx509.Certificate Parent *ctx509.Certificate + IsLeaf bool } // CertBatch is an unwrapped collection of Certificate. @@ -25,6 +26,7 @@ type CertBatch struct { Expirations []*time.Time Certs []*ctx509.Certificate Parents []*ctx509.Certificate + AreLeaves []bool } func NewCertificateBatch() *CertBatch { @@ -33,6 +35,7 @@ func NewCertificateBatch() *CertBatch { Expirations: make([]*time.Time, 0, BatchSize), Certs: make([]*ctx509.Certificate, 0, BatchSize), Parents: make([]*ctx509.Certificate, 0, BatchSize), + AreLeaves: make([]bool, 0, BatchSize), } } @@ -41,6 +44,7 @@ func (b *CertBatch) AddCertificate(c *CertificateNode) { b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) b.Parents = append(b.Parents, c.Parent) + b.AreLeaves = append(b.AreLeaves, c.IsLeaf) } func (b *CertBatch) IsFull() bool { @@ -72,7 +76,7 @@ const ( ) type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, - []*ctx509.Certificate, []*ctx509.Certificate) error + []*ctx509.Certificate, []*ctx509.Certificate, []bool) error func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { @@ -214,7 +218,7 @@ func (p *CertificateProcessor) ConsolidateDB() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.Parents) + batch.Certs, batch.Parents, batch.AreLeaves) if err != nil { panic(err) } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 35773607..19ae047c 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -86,6 +86,7 @@ func (p *Processor) start() { p.nodeChan <- &CertificateNode{ Cert: certs[i], Parent: parents[i], + IsLeaf: i == 0, // Only the first certificate is a leaf. } } } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 6d769078..fc4f9cf0 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -152,6 +152,7 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct // trust chain, or nil if the certificate is root. // The parents returned slice has the same elements as the certificates returned slice. // When a certificate is root, it's corresponding parents entry is nil. +// The leaf certificates are always returned at the head of the slice. func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( certificates, parents []*ctx509.Certificate) { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 96889d54..6b010231 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -92,7 +92,14 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ certChains = append(certChains, chain) } certs, parents := UnfoldCerts(certs, certChains) - return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, parents) + areLeaves := make([]bool, 0, len(certs)) + // The leaves are always at the head of the returned slice: just flag all leaves for the + // length of the original certificate list. + for i := range certList { + areLeaves[i] = true + } + return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, parents, + areLeaves) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -234,7 +241,7 @@ func (mapUpdater *MapUpdater) Close() error { } func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs, parents []*ctx509.Certificate) error { + expirations []*time.Time, certs, parents []*ctx509.Certificate, areLeaves []bool) error { ids := make([]*common.SHA256Output, len(certs)) payloads := make([][]byte, len(certs)) @@ -248,11 +255,11 @@ func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]strin parentIds[i] = &id } } - return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads) + return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads, areLeaves) } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs, parents []*ctx509.Certificate) error { + expirations []*time.Time, certs, parents []*ctx509.Certificate, areLeaves []bool) error { ids := make([]*common.SHA256Output, len(certs)) for i, c := range certs { @@ -287,28 +294,33 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st names = names[:len(payloads)] // Only update those certificates that are not in the mask. - return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads) + return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads, areLeaves) } func insertCerts(ctx context.Context, conn db.Conn, names [][]string, - ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { + ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte, + areLeaves []bool) error { // Send hash, parent hash, expiration and payload to the certs table. if err := conn.InsertCerts(ctx, ids, parents, expirations, payloads); err != nil { return fmt.Errorf("inserting certificates: %w", err) } - // Add all new entries from names into the domains table (with ignore) - newNames := make([]string, 0, len(ids)) - newIDs := make([]*common.SHA256Output, 0, len(ids)) - domainIDs := make([]*common.SHA256Output, 0, len(ids)) + // Add new entries from names into the domains table iff they are leaves. + estimatedSize := len(ids) * 2 // Number of IDs / 3 ~~ is the number of leaves. 6 names per leaf. + newNames := make([]string, 0, estimatedSize) + newIDs := make([]*common.SHA256Output, 0, estimatedSize) + domainIDs := make([]*common.SHA256Output, 0, estimatedSize) for i, names := range names { - for _, name := range names { - newNames = append(newNames, name) - newIDs = append(newIDs, ids[i]) - domainID := common.SHA256Hash32Bytes([]byte(name)) - domainIDs = append(domainIDs, &domainID) + if areLeaves[i] { + // If the certificate is a leaf certificate, insert one entry per name. + for _, name := range names { + newNames = append(newNames, name) + newIDs = append(newIDs, ids[i]) + domainID := common.SHA256Hash32Bytes([]byte(name)) + domainIDs = append(domainIDs, &domainID) + } } } if err := conn.UpdateDomainsWithCerts(ctx, newIDs, domainIDs, newNames); err != nil { From f3d80df485359a2fb67efc7e37fd071a9911764b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 05:58:31 +0100 Subject: [PATCH 034/187] Rename files. --- cmd/ingest/{batch.go => certProcessor.go} | 2 +- cmd/ingest/processor.go | 2 +- cmd/ingest/{smt_updater.go => smtUpdater.go} | 0 3 files changed, 2 insertions(+), 2 deletions(-) rename cmd/ingest/{batch.go => certProcessor.go} (98%) rename cmd/ingest/{smt_updater.go => smtUpdater.go} (100%) diff --git a/cmd/ingest/batch.go b/cmd/ingest/certProcessor.go similarity index 98% rename from cmd/ingest/batch.go rename to cmd/ingest/certProcessor.go index 6c7ab6ed..0bc531ab 100644 --- a/cmd/ingest/batch.go +++ b/cmd/ingest/certProcessor.go @@ -78,7 +78,7 @@ const ( type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, []*ctx509.Certificate, []*ctx509.Certificate, []bool) error -func NewBatchProcessor(conn db.Conn, incoming chan *CertificateNode, +func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { // Select the update certificate method depending on the strategy: diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 19ae047c..b82657a8 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -40,7 +40,7 @@ func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *P incomingFileCh: make(chan File), certWithChainChan: make(chan *CertWithChainData), nodeChan: nodeChan, - batchProcessor: NewBatchProcessor(conn, nodeChan, certUpdateStrategy), + batchProcessor: NewCertProcessor(conn, nodeChan, certUpdateStrategy), errorCh: make(chan error), doneCh: make(chan error), diff --git a/cmd/ingest/smt_updater.go b/cmd/ingest/smtUpdater.go similarity index 100% rename from cmd/ingest/smt_updater.go rename to cmd/ingest/smtUpdater.go From 782b6191575b6695bfe796167e73f8e56764a926 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 06:47:19 +0100 Subject: [PATCH 035/187] Introduce a LRU cache in batch processor. --- cmd/ingest/certProcessor.go | 53 +++++++++++++++++++++++++++----- cmd/ingest/main.go | 3 +- go.mod | 1 + go.sum | 1 + pkg/mapserver/updater/updater.go | 28 +++++++++-------- 5 files changed, 65 insertions(+), 21 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 0bc531ab..db6844d7 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -7,6 +7,7 @@ import ( "time" ctx509 "github.com/google/certificate-transparency-go/x509" + lru "github.com/hashicorp/golang-lru" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" @@ -55,7 +56,8 @@ func (b *CertBatch) IsFull() bool { // This is the most expensive stage, and as such, the processor prints the statistics about // number of certificates and megabytes per second being inserted into the DB. type CertificateProcessor struct { - conn db.Conn + conn db.Conn + cache *lru.TwoQueueCache // IDs of certificates pushed to DB. updateCertBatch UpdateCertificateFunction // update strategy dependent method strategy CertificateUpdateStrategy @@ -64,8 +66,9 @@ type CertificateProcessor struct { incomingBatch chan *CertBatch // Ready to be inserted doneCh chan struct{} // Statistics: - writtenCerts atomic.Int64 - writtenBytes atomic.Int64 + writtenCerts atomic.Int64 + writtenBytes atomic.Int64 + uncachedCerts atomic.Int64 } type CertificateUpdateStrategy int @@ -76,11 +79,15 @@ const ( ) type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, - []*ctx509.Certificate, []*ctx509.Certificate, []bool) error + []*ctx509.Certificate, []*common.SHA256Output, []*ctx509.Certificate, []bool) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { + cache, err := lru.New2Q(LruCacheSize) + if err != nil { + panic(err) + } // Select the update certificate method depending on the strategy: var updateFcn UpdateCertificateFunction switch strategy { @@ -94,6 +101,7 @@ func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, p := &CertificateProcessor{ conn: conn, + cache: cache, updateCertBatch: updateFcn, strategy: strategy, incomingCh: incoming, @@ -156,9 +164,11 @@ func (p *CertificateProcessor) start() { } writtenCerts := p.writtenCerts.Load() writtenBytes := p.writtenBytes.Load() + newCerts := p.uncachedCerts.Load() secondsSinceStart := float64(time.Since(startTime).Seconds()) - fmt.Printf("%.0f Certs / second, %.1f Mb/s\n", + fmt.Printf("%.0f Certs / second (%.0f new), %.1f Mb/s\n", float64(writtenCerts)/secondsSinceStart, + float64(newCerts)/secondsSinceStart, float64(writtenBytes)/1024./1024./secondsSinceStart, ) } @@ -216,13 +226,42 @@ func (p *CertificateProcessor) ConsolidateDB() { } func (p *CertificateProcessor) processBatch(batch *CertBatch) { + // Compute the ID of the certs, and prepare the slices holding all the data. + ids := updater.ComputeCertIDs(batch.Certs) + names := make([][]string, 0, len(ids)) + expirations := make([]*time.Time, 0, len(ids)) + newIds := make([]*common.SHA256Output, 0, len(ids)) + certs := make([]*ctx509.Certificate, 0, len(ids)) + parents := make([]*ctx509.Certificate, 0, len(ids)) + areLeaves := make([]bool, 0, len(ids)) + + // Check if the certificate has been already pushed to DB: + for i, id := range ids { + if !p.cache.Contains(*id) { + // If the cache doesn't contain the certificate, we cannot skip it. + names = append(names, batch.Names[i]) + expirations = append(expirations, batch.Expirations[i]) + newIds = append(newIds, ids[i]) + certs = append(certs, batch.Certs[i]) + parents = append(parents, batch.Parents[i]) + areLeaves = append(areLeaves, batch.AreLeaves[i]) + } + } // Store certificates in DB: - err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.Parents, batch.AreLeaves) + err := p.updateCertBatch(context.Background(), p.conn, names, expirations, + certs, newIds, parents, areLeaves) if err != nil { panic(err) } + + // Update cache. + for _, id := range ids { + p.cache.Add(*id, nil) + } + + // Update statistics. p.writtenCerts.Add(int64(len(batch.Certs))) + p.uncachedCerts.Add(int64(len(newIds))) bytesInBatch := 0 for i := range batch.Certs { bytesInBatch += len(batch.Certs[i].Raw) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 49508347..f76979fe 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -18,7 +18,8 @@ const ( NumParsers = 64 NumDBWriters = 32 - BatchSize = 1000 // # of certificates inserted at once. + BatchSize = 1000 // # of certificates inserted at once. + LruCacheSize = 10 * 1000 * 1000 // Keep track of the 10 million most seen certificates. ) const ( diff --git a/go.mod b/go.mod index eecf012f..9205012c 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( github.com/google/certificate-transparency-go v1.1.3 github.com/google/trillian v1.4.1 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 + github.com/hashicorp/golang-lru v0.5.1 github.com/minio/sha256-simd v1.0.0 github.com/stretchr/testify v1.7.4 github.com/transparency-dev/merkle v0.0.1 diff --git a/go.sum b/go.sum index 2c38700a..1a34f551 100644 --- a/go.sum +++ b/go.sum @@ -458,6 +458,7 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 6b010231..c161b995 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -98,8 +98,8 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ for i := range certList { areLeaves[i] = true } - return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, parents, - areLeaves) + return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, + ComputeCertIDs(certs), parents, areLeaves) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -241,14 +241,12 @@ func (mapUpdater *MapUpdater) Close() error { } func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs, parents []*ctx509.Certificate, areLeaves []bool) error { + expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, + parents []*ctx509.Certificate, areLeaves []bool) error { - ids := make([]*common.SHA256Output, len(certs)) payloads := make([][]byte, len(certs)) parentIds := make([]*common.SHA256Output, len(certs)) for i, c := range certs { - id := common.SHA256Hash32Bytes(c.Raw) - ids[i] = &id payloads[i] = c.Raw if parents[i] != nil { id := common.SHA256Hash32Bytes(parents[i].Raw) @@ -259,13 +257,8 @@ func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]strin } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs, parents []*ctx509.Certificate, areLeaves []bool) error { - - ids := make([]*common.SHA256Output, len(certs)) - for i, c := range certs { - id := common.SHA256Hash32Bytes(c.Raw) - ids[i] = &id - } + expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, + parents []*ctx509.Certificate, areLeaves []bool) error { // First check which certificates are already present in the DB. mask, err := conn.CheckCertsExist(ctx, ids) @@ -298,6 +291,15 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st } +func ComputeCertIDs(certs []*ctx509.Certificate) []*common.SHA256Output { + ids := make([]*common.SHA256Output, len(certs)) + for i, c := range certs { + id := common.SHA256Hash32Bytes(c.Raw) + ids[i] = &id + } + return ids +} + func insertCerts(ctx context.Context, conn db.Conn, names [][]string, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte, areLeaves []bool) error { From 815c42bea7aa85f3a29b0440ab0b9e96f4840ed4 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 09:43:35 +0100 Subject: [PATCH 036/187] Reading single certs checks cache and is done in parallel. --- cmd/ingest/certProcessor.go | 66 +++++++++++++++++-------------------- 1 file changed, 31 insertions(+), 35 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index db6844d7..2e246f0e 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -121,14 +121,15 @@ func (p *CertificateProcessor) start() { // Start pipeline. go func() { - batch := NewCertificateBatch() - for c := range p.incomingCh { - batch.AddCertificate(c) - if batch.IsFull() { - p.incomingBatch <- batch - batch = NewCertificateBatch() - } + wg := sync.WaitGroup{} + wg.Add(NumDBWriters) + for w := 0; w < NumDBWriters; w++ { + go func() { + defer wg.Done() + p.createBatches() + }() } + wg.Wait() // Because the stage is finished, close the output channel: close(p.incomingBatch) }() @@ -225,43 +226,38 @@ func (p *CertificateProcessor) ConsolidateDB() { } } -func (p *CertificateProcessor) processBatch(batch *CertBatch) { - // Compute the ID of the certs, and prepare the slices holding all the data. - ids := updater.ComputeCertIDs(batch.Certs) - names := make([][]string, 0, len(ids)) - expirations := make([]*time.Time, 0, len(ids)) - newIds := make([]*common.SHA256Output, 0, len(ids)) - certs := make([]*ctx509.Certificate, 0, len(ids)) - parents := make([]*ctx509.Certificate, 0, len(ids)) - areLeaves := make([]bool, 0, len(ids)) - - // Check if the certificate has been already pushed to DB: - for i, id := range ids { - if !p.cache.Contains(*id) { - // If the cache doesn't contain the certificate, we cannot skip it. - names = append(names, batch.Names[i]) - expirations = append(expirations, batch.Expirations[i]) - newIds = append(newIds, ids[i]) - certs = append(certs, batch.Certs[i]) - parents = append(parents, batch.Parents[i]) - areLeaves = append(areLeaves, batch.AreLeaves[i]) +// createBatches reads CertificateNodes from the incoming channel and sends them in batches +// to processing. +func (p *CertificateProcessor) createBatches() { + batch := NewCertificateBatch() + for c := range p.incomingCh { + // Check cache. + id := common.SHA256Hash32Bytes(c.Cert.Raw) + if !p.cache.Contains(id) { + batch.AddCertificate(c) + if batch.IsFull() { + p.incomingBatch <- batch + batch = NewCertificateBatch() + } + p.uncachedCerts.Inc() } + // Add to cache. + p.cache.Add(id, nil) } + // Last batch (might be empty). + p.incomingBatch <- batch +} + +func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: - err := p.updateCertBatch(context.Background(), p.conn, names, expirations, - certs, newIds, parents, areLeaves) + err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, + batch.Certs, updater.ComputeCertIDs(batch.Certs), batch.Parents, batch.AreLeaves) if err != nil { panic(err) } - // Update cache. - for _, id := range ids { - p.cache.Add(*id, nil) - } - // Update statistics. p.writtenCerts.Add(int64(len(batch.Certs))) - p.uncachedCerts.Add(int64(len(newIds))) bytesInBatch := 0 for i := range batch.Certs { bytesInBatch += len(batch.Certs[i].Raw) From b80823ed50bbe5dc701cdff08afe3fe1c6ea10f9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 10:08:34 +0100 Subject: [PATCH 037/187] Bugfix: presence checking in DB of nothing is ok. --- pkg/db/mysql.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 0a9ec232..86c3f31f 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -156,6 +156,10 @@ func (c *mysqlDB) EnableIndexing(table string) error { // CheckCertsExist returns a slice of true/false values. Each value indicates if // the corresponding certificate identified by its ID is already present in the DB. func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { + if len(ids) == 0 { + // If empty, return empty. + return nil, nil + } // Slice to be used in the SQL query: data := make([]interface{}, len(ids)) for i, id := range ids { From 201fcbcff8e4433900e40f3b73421af11ba2335a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 10:49:26 +0100 Subject: [PATCH 038/187] First version of the presence cache. Extremely slow. --- cmd/ingest/certProcessor.go | 24 +++++++------- cmd/ingest/presenceCache.go | 65 +++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 13 deletions(-) create mode 100644 cmd/ingest/presenceCache.go diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 2e246f0e..a334b187 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -7,7 +7,6 @@ import ( "time" ctx509 "github.com/google/certificate-transparency-go/x509" - lru "github.com/hashicorp/golang-lru" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" @@ -26,6 +25,7 @@ type CertBatch struct { Names [][]string // collection of names per certificate Expirations []*time.Time Certs []*ctx509.Certificate + IDs []*common.SHA256Output Parents []*ctx509.Certificate AreLeaves []bool } @@ -35,15 +35,17 @@ func NewCertificateBatch() *CertBatch { Names: make([][]string, 0, BatchSize), Expirations: make([]*time.Time, 0, BatchSize), Certs: make([]*ctx509.Certificate, 0, BatchSize), + IDs: make([]*common.SHA256Output, 0, BatchSize), Parents: make([]*ctx509.Certificate, 0, BatchSize), AreLeaves: make([]bool, 0, BatchSize), } } -func (b *CertBatch) AddCertificate(c *CertificateNode) { +func (b *CertBatch) AddCertificate(c *CertificateNode, id *common.SHA256Output) { b.Names = append(b.Names, updater.ExtractCertDomains(c.Cert)) b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) + b.IDs = append(b.IDs, id) b.Parents = append(b.Parents, c.Parent) b.AreLeaves = append(b.AreLeaves, c.IsLeaf) } @@ -57,7 +59,7 @@ func (b *CertBatch) IsFull() bool { // number of certificates and megabytes per second being inserted into the DB. type CertificateProcessor struct { conn db.Conn - cache *lru.TwoQueueCache // IDs of certificates pushed to DB. + cache *PresenceCache // IDs of certificates pushed to DB. updateCertBatch UpdateCertificateFunction // update strategy dependent method strategy CertificateUpdateStrategy @@ -84,10 +86,6 @@ type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*tim func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { - cache, err := lru.New2Q(LruCacheSize) - if err != nil { - panic(err) - } // Select the update certificate method depending on the strategy: var updateFcn UpdateCertificateFunction switch strategy { @@ -101,7 +99,7 @@ func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, p := &CertificateProcessor{ conn: conn, - cache: cache, + cache: NewPresenceCache(), updateCertBatch: updateFcn, strategy: strategy, incomingCh: incoming, @@ -167,7 +165,7 @@ func (p *CertificateProcessor) start() { writtenBytes := p.writtenBytes.Load() newCerts := p.uncachedCerts.Load() secondsSinceStart := float64(time.Since(startTime).Seconds()) - fmt.Printf("%.0f Certs / second (%.0f new), %.1f Mb/s\n", + fmt.Printf("%.0f Certs / second (%.0f uncached), %.1f Mb/s\n", float64(writtenCerts)/secondsSinceStart, float64(newCerts)/secondsSinceStart, float64(writtenBytes)/1024./1024./secondsSinceStart, @@ -233,16 +231,16 @@ func (p *CertificateProcessor) createBatches() { for c := range p.incomingCh { // Check cache. id := common.SHA256Hash32Bytes(c.Cert.Raw) - if !p.cache.Contains(id) { - batch.AddCertificate(c) + if !p.cache.Contains(&id) { + batch.AddCertificate(c, &id) if batch.IsFull() { + // Add to cache. + p.cache.AddIDs(batch.IDs) p.incomingBatch <- batch batch = NewCertificateBatch() } p.uncachedCerts.Inc() } - // Add to cache. - p.cache.Add(id, nil) } // Last batch (might be empty). p.incomingBatch <- batch diff --git a/cmd/ingest/presenceCache.go b/cmd/ingest/presenceCache.go new file mode 100644 index 00000000..a1000fc6 --- /dev/null +++ b/cmd/ingest/presenceCache.go @@ -0,0 +1,65 @@ +package main + +import ( + "sync" + "unsafe" + + "github.com/netsec-ethz/fpki/pkg/common" + "go.uber.org/atomic" +) + +const initialNumberOfElements = 1000000 // 1 million + +// PresenceCache is, for now, just a set. It will consume memory unstoppably. +type PresenceCache struct { + ptr atomic.UnsafePointer // Pointer to the data. + addingMu sync.Mutex +} + +type set map[common.SHA256Output]struct{} + +func NewPresenceCache() *PresenceCache { + set := make(set, initialNumberOfElements) + return &PresenceCache{ + ptr: *atomic.NewUnsafePointer(unsafe.Pointer(&set)), + } +} + +func (c *PresenceCache) Contains(id *common.SHA256Output) bool { + s := *c.getSet() + _, ok := s[*id] + return ok +} + +// AddIDs is thread safe. This function does the following: +// 1. Copy the set to a local variable. +// 2. Modify the local copy. +// 3. Thread-safely modify the pointer to the set. +func (c *PresenceCache) AddIDs(ids []*common.SHA256Output) { + c.addingMu.Lock() + defer c.addingMu.Unlock() + + // Copy the local contents. + newSet := *c.cloneSet() + // Add the batch. + for _, id := range ids { + newSet[*id] = struct{}{} + } + // Modify the pointer to the set. + ptr := unsafe.Pointer(&newSet) + c.ptr.Swap(ptr) +} + +func (c *PresenceCache) getSet() *set { + ptr := c.ptr.Load() + return (*set)(ptr) +} + +func (c *PresenceCache) cloneSet() *set { + s := *c.getSet() + clone := make(set, len(s)) + for k, v := range s { + clone[k] = v + } + return &clone +} From 2db2d46c215ae160c0444070a70020ffe56d557a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 11:22:46 +0100 Subject: [PATCH 039/187] XXX this code is buggy. WIP. Add a maybe faster version of the presence cache. This version maintains two sets in memory, works with one of them, and whenever new data is added, it goes first to the secondary copy (the shadow), and then to the main copy. --- cmd/ingest/presenceCache.go | 75 ++++++++++++++++++++++++------------- 1 file changed, 48 insertions(+), 27 deletions(-) diff --git a/cmd/ingest/presenceCache.go b/cmd/ingest/presenceCache.go index a1000fc6..4ac6de69 100644 --- a/cmd/ingest/presenceCache.go +++ b/cmd/ingest/presenceCache.go @@ -2,7 +2,6 @@ package main import ( "sync" - "unsafe" "github.com/netsec-ethz/fpki/pkg/common" "go.uber.org/atomic" @@ -12,54 +11,76 @@ const initialNumberOfElements = 1000000 // 1 million // PresenceCache is, for now, just a set. It will consume memory unstoppably. type PresenceCache struct { - ptr atomic.UnsafePointer // Pointer to the data. + sets [2]set // A regular set and its "shadow" (always a copy) + currentIdx atomic.Uint32 // The index of the current set. + readerCount [2]atomic.Int32 // How many routines reading from sets[0] + addingMu sync.Mutex } type set map[common.SHA256Output]struct{} func NewPresenceCache() *PresenceCache { - set := make(set, initialNumberOfElements) + + sets := [...]set{ + make(set, initialNumberOfElements), + make(set, initialNumberOfElements), + } return &PresenceCache{ - ptr: *atomic.NewUnsafePointer(unsafe.Pointer(&set)), + sets: sets, + // currentIdx: *atomic.NewUint32(0), } } func (c *PresenceCache) Contains(id *common.SHA256Output) bool { - s := *c.getSet() + idx := c.currentIdx.Load() + c.readerCount[idx].Inc() + defer c.readerCount[idx].Dec() + s := c.sets[int(idx)] _, ok := s[*id] return ok } -// AddIDs is thread safe. This function does the following: -// 1. Copy the set to a local variable. -// 2. Modify the local copy. -// 3. Thread-safely modify the pointer to the set. +// AddIDs is thread safe. func (c *PresenceCache) AddIDs(ids []*common.SHA256Output) { c.addingMu.Lock() defer c.addingMu.Unlock() - // Copy the local contents. - newSet := *c.cloneSet() - // Add the batch. + // Futex until all the readers have left the shadow (should almost always be noop). + for { + if c.readerCount[1].Load() == 0 { + break + } + } + // Copy the local contents to the shadow. for _, id := range ids { - newSet[*id] = struct{}{} + c.sets[1][*id] = struct{}{} } // Modify the pointer to the set. - ptr := unsafe.Pointer(&newSet) - c.ptr.Swap(ptr) + c.currentIdx.Store(1) + // Futex until all the readers have left current. + for { + if c.readerCount[0].Load() == 0 { + break + } + } + // Copy to current. + for _, id := range ids { + c.sets[0][*id] = struct{}{} + } + // Point back current. + c.currentIdx.Store(0) } -func (c *PresenceCache) getSet() *set { - ptr := c.ptr.Load() - return (*set)(ptr) -} +// func (c *PresenceCache) getSet() *set { +// return &c.sets[int(c.currentIdx.Load())] +// } -func (c *PresenceCache) cloneSet() *set { - s := *c.getSet() - clone := make(set, len(s)) - for k, v := range s { - clone[k] = v - } - return &clone -} +// func (c *PresenceCache) cloneSet() *set { +// s := *c.getSet() +// clone := make(set, len(s)) +// for k, v := range s { +// clone[k] = v +// } +// return &clone +// } From 7ac2e4b80567da71c778a64d9f460aa55124ce51 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 16:07:56 +0100 Subject: [PATCH 040/187] Change the cache to a simple RWLock cache. --- cmd/ingest/presenceCache.go | 61 +++++-------------------------------- 1 file changed, 8 insertions(+), 53 deletions(-) diff --git a/cmd/ingest/presenceCache.go b/cmd/ingest/presenceCache.go index 4ac6de69..65acf9c7 100644 --- a/cmd/ingest/presenceCache.go +++ b/cmd/ingest/presenceCache.go @@ -4,40 +4,29 @@ import ( "sync" "github.com/netsec-ethz/fpki/pkg/common" - "go.uber.org/atomic" ) const initialNumberOfElements = 1000000 // 1 million // PresenceCache is, for now, just a set. It will consume memory unstoppably. type PresenceCache struct { - sets [2]set // A regular set and its "shadow" (always a copy) - currentIdx atomic.Uint32 // The index of the current set. - readerCount [2]atomic.Int32 // How many routines reading from sets[0] - - addingMu sync.Mutex + set set + addingMu sync.RWMutex } type set map[common.SHA256Output]struct{} func NewPresenceCache() *PresenceCache { - - sets := [...]set{ - make(set, initialNumberOfElements), - make(set, initialNumberOfElements), - } return &PresenceCache{ - sets: sets, - // currentIdx: *atomic.NewUint32(0), + set: make(set, initialNumberOfElements), } } func (c *PresenceCache) Contains(id *common.SHA256Output) bool { - idx := c.currentIdx.Load() - c.readerCount[idx].Inc() - defer c.readerCount[idx].Dec() - s := c.sets[int(idx)] - _, ok := s[*id] + c.addingMu.RLock() + defer c.addingMu.RUnlock() + + _, ok := c.set[*id] return ok } @@ -46,41 +35,7 @@ func (c *PresenceCache) AddIDs(ids []*common.SHA256Output) { c.addingMu.Lock() defer c.addingMu.Unlock() - // Futex until all the readers have left the shadow (should almost always be noop). - for { - if c.readerCount[1].Load() == 0 { - break - } - } - // Copy the local contents to the shadow. for _, id := range ids { - c.sets[1][*id] = struct{}{} + c.set[*id] = struct{}{} } - // Modify the pointer to the set. - c.currentIdx.Store(1) - // Futex until all the readers have left current. - for { - if c.readerCount[0].Load() == 0 { - break - } - } - // Copy to current. - for _, id := range ids { - c.sets[0][*id] = struct{}{} - } - // Point back current. - c.currentIdx.Store(0) } - -// func (c *PresenceCache) getSet() *set { -// return &c.sets[int(c.currentIdx.Load())] -// } - -// func (c *PresenceCache) cloneSet() *set { -// s := *c.getSet() -// clone := make(set, len(s)) -// for k, v := range s { -// clone[k] = v -// } -// return &clone -// } From 2ab0383a78fe19feac7c0cb34a53562a00ce3050 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Feb 2023 16:28:18 +0100 Subject: [PATCH 041/187] Again the principal/shadow cache, no errors. --- cmd/ingest/certProcessor.go | 24 ++++---------- cmd/ingest/presenceCache.go | 65 ++++++++++++++++++++++++++++++++----- 2 files changed, 63 insertions(+), 26 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index a334b187..203e5b1c 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -163,11 +163,11 @@ func (p *CertificateProcessor) start() { } writtenCerts := p.writtenCerts.Load() writtenBytes := p.writtenBytes.Load() - newCerts := p.uncachedCerts.Load() + uncachedCerts := p.uncachedCerts.Load() secondsSinceStart := float64(time.Since(startTime).Seconds()) - fmt.Printf("%.0f Certs / second (%.0f uncached), %.1f Mb/s\n", + fmt.Printf("%.0f Certs/s (%.0f%% uncached), %.1f Mb/s\n", float64(writtenCerts)/secondsSinceStart, - float64(newCerts)/secondsSinceStart, + float64(uncachedCerts)*100./float64(writtenCerts), float64(writtenBytes)/1024./1024./secondsSinceStart, ) } @@ -241,6 +241,9 @@ func (p *CertificateProcessor) createBatches() { } p.uncachedCerts.Inc() } + // Update statistics. + p.writtenCerts.Inc() + p.writtenBytes.Add(int64(len(c.Cert.Raw))) } // Last batch (might be empty). p.incomingBatch <- batch @@ -249,21 +252,8 @@ func (p *CertificateProcessor) createBatches() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, updater.ComputeCertIDs(batch.Certs), batch.Parents, batch.AreLeaves) + batch.Certs, batch.IDs, batch.Parents, batch.AreLeaves) if err != nil { panic(err) } - - // Update statistics. - p.writtenCerts.Add(int64(len(batch.Certs))) - bytesInBatch := 0 - for i := range batch.Certs { - bytesInBatch += len(batch.Certs[i].Raw) - bytesInBatch += common.SHA256Size - if batch.Parents[i] != nil { - bytesInBatch += len(batch.Parents[i].Raw) - bytesInBatch += common.SHA256Size - } - } - p.writtenBytes.Add(int64(bytesInBatch)) } diff --git a/cmd/ingest/presenceCache.go b/cmd/ingest/presenceCache.go index 65acf9c7..b224c2ab 100644 --- a/cmd/ingest/presenceCache.go +++ b/cmd/ingest/presenceCache.go @@ -4,30 +4,50 @@ import ( "sync" "github.com/netsec-ethz/fpki/pkg/common" + "go.uber.org/atomic" ) const initialNumberOfElements = 1000000 // 1 million // PresenceCache is, for now, just a set. It will consume memory unstoppably. type PresenceCache struct { - set set - addingMu sync.RWMutex + sets [2]set // A regular set and its "shadow" (always a copy) + currentIdx atomic.Uint32 // The index of the current set. + readerCount [2]atomic.Int32 // How many routines reading from sets[0] + + addingMu sync.Mutex } type set map[common.SHA256Output]struct{} func NewPresenceCache() *PresenceCache { + + sets := [...]set{ + make(set, initialNumberOfElements), + make(set, initialNumberOfElements), + } return &PresenceCache{ - set: make(set, initialNumberOfElements), + sets: sets, + // currentIdx: *atomic.NewUint32(0), } } func (c *PresenceCache) Contains(id *common.SHA256Output) bool { - c.addingMu.RLock() - defer c.addingMu.RUnlock() - - _, ok := c.set[*id] - return ok + // To avoid race conditions, we must double check that the index didn't change before we were + // able to increment the reader counter. If it changed, repeat the operation. + for { + idx := c.currentIdx.Load() + c.readerCount[idx].Inc() + if c.currentIdx.Load() != idx { + // The writting routine won the race: unroll increment and repeat operation. + c.readerCount[idx].Dec() + continue + } + defer c.readerCount[idx].Dec() + s := c.sets[int(idx)] + _, ok := s[*id] + return ok + } } // AddIDs is thread safe. @@ -35,7 +55,34 @@ func (c *PresenceCache) AddIDs(ids []*common.SHA256Output) { c.addingMu.Lock() defer c.addingMu.Unlock() + // Futex until all the readers have left the shadow (should almost always be noop). + for { + if c.readerCount[1].Load() == 0 { + break + } + // fmt.Println("shadow busy") + } + // Copy the local contents to the shadow. for _, id := range ids { - c.set[*id] = struct{}{} + c.sets[1][*id] = struct{}{} } + // Modify the pointer to the set. + c.currentIdx.Store(1) + // Futex until all the readers have left current. + for { + if c.readerCount[0].Load() == 0 { + break + } + // fmt.Println("principal busy") + } + // Copy to current. + for _, id := range ids { + c.sets[0][*id] = struct{}{} + } + // Point back current. + c.currentIdx.Store(0) +} + +func (c *PresenceCache) Size() int { + return len(c.sets[0]) } From ebc6c83432a1c5851abb105aadfe4dae0ea7f0b8 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 23 Feb 2023 11:48:50 +0100 Subject: [PATCH 042/187] Principal/Shadow cache in CSV ingest. Avoids parsing the ancestry if it's already in DB. --- cmd/ingest/certProcessor.go | 55 +++++++++++--------------- cmd/ingest/processor.go | 52 ++++++++++++++++++------ pkg/mapserver/updater/certs_updater.go | 49 +++++++++++++++++++++-- 3 files changed, 109 insertions(+), 47 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 203e5b1c..7a053245 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -14,9 +14,11 @@ import ( ) type CertificateNode struct { - Cert *ctx509.Certificate - Parent *ctx509.Certificate - IsLeaf bool + CertID *common.SHA256Output + Cert *ctx509.Certificate + ParentID *common.SHA256Output + Parent *ctx509.Certificate + IsLeaf bool } // CertBatch is an unwrapped collection of Certificate. @@ -25,8 +27,9 @@ type CertBatch struct { Names [][]string // collection of names per certificate Expirations []*time.Time Certs []*ctx509.Certificate - IDs []*common.SHA256Output + CertIDs []*common.SHA256Output Parents []*ctx509.Certificate + ParentIDs []*common.SHA256Output AreLeaves []bool } @@ -35,18 +38,20 @@ func NewCertificateBatch() *CertBatch { Names: make([][]string, 0, BatchSize), Expirations: make([]*time.Time, 0, BatchSize), Certs: make([]*ctx509.Certificate, 0, BatchSize), - IDs: make([]*common.SHA256Output, 0, BatchSize), + CertIDs: make([]*common.SHA256Output, 0, BatchSize), Parents: make([]*ctx509.Certificate, 0, BatchSize), + ParentIDs: make([]*common.SHA256Output, 0, BatchSize), AreLeaves: make([]bool, 0, BatchSize), } } -func (b *CertBatch) AddCertificate(c *CertificateNode, id *common.SHA256Output) { +func (b *CertBatch) AddCertificate(c *CertificateNode) { b.Names = append(b.Names, updater.ExtractCertDomains(c.Cert)) b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) - b.IDs = append(b.IDs, id) + b.CertIDs = append(b.CertIDs, c.CertID) b.Parents = append(b.Parents, c.Parent) + b.ParentIDs = append(b.ParentIDs, c.ParentID) b.AreLeaves = append(b.AreLeaves, c.IsLeaf) } @@ -58,8 +63,7 @@ func (b *CertBatch) IsFull() bool { // This is the most expensive stage, and as such, the processor prints the statistics about // number of certificates and megabytes per second being inserted into the DB. type CertificateProcessor struct { - conn db.Conn - cache *PresenceCache // IDs of certificates pushed to DB. + conn db.Conn updateCertBatch UpdateCertificateFunction // update strategy dependent method strategy CertificateUpdateStrategy @@ -68,9 +72,9 @@ type CertificateProcessor struct { incomingBatch chan *CertBatch // Ready to be inserted doneCh chan struct{} // Statistics: - writtenCerts atomic.Int64 - writtenBytes atomic.Int64 - uncachedCerts atomic.Int64 + WrittenCerts atomic.Int64 + WrittenBytes atomic.Int64 + UncachedCerts atomic.Int64 } type CertificateUpdateStrategy int @@ -99,7 +103,6 @@ func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, p := &CertificateProcessor{ conn: conn, - cache: NewPresenceCache(), updateCertBatch: updateFcn, strategy: strategy, incomingCh: incoming, @@ -161,9 +164,9 @@ func (p *CertificateProcessor) start() { p.doneCh <- struct{}{} // signal again return } - writtenCerts := p.writtenCerts.Load() - writtenBytes := p.writtenBytes.Load() - uncachedCerts := p.uncachedCerts.Load() + writtenCerts := p.WrittenCerts.Load() + writtenBytes := p.WrittenBytes.Load() + uncachedCerts := p.UncachedCerts.Load() secondsSinceStart := float64(time.Since(startTime).Seconds()) fmt.Printf("%.0f Certs/s (%.0f%% uncached), %.1f Mb/s\n", float64(writtenCerts)/secondsSinceStart, @@ -229,21 +232,11 @@ func (p *CertificateProcessor) ConsolidateDB() { func (p *CertificateProcessor) createBatches() { batch := NewCertificateBatch() for c := range p.incomingCh { - // Check cache. - id := common.SHA256Hash32Bytes(c.Cert.Raw) - if !p.cache.Contains(&id) { - batch.AddCertificate(c, &id) - if batch.IsFull() { - // Add to cache. - p.cache.AddIDs(batch.IDs) - p.incomingBatch <- batch - batch = NewCertificateBatch() - } - p.uncachedCerts.Inc() + batch.AddCertificate(c) + if batch.IsFull() { + p.incomingBatch <- batch + batch = NewCertificateBatch() } - // Update statistics. - p.writtenCerts.Inc() - p.writtenBytes.Add(int64(len(c.Cert.Raw))) } // Last batch (might be empty). p.incomingBatch <- batch @@ -252,7 +245,7 @@ func (p *CertificateProcessor) createBatches() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.IDs, batch.Parents, batch.AreLeaves) + batch.Certs, batch.CertIDs, batch.Parents, batch.AreLeaves) if err != nil { panic(err) } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index b82657a8..0ef34934 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -10,6 +10,7 @@ import ( "sync" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) @@ -18,7 +19,9 @@ import ( // inside the DB and SMT. It is composed of several different stages, // described in the `start` method. type Processor struct { - Conn db.Conn + Conn db.Conn + cache *PresenceCache // IDs of certificates pushed to DB. + incomingFileCh chan File // New files with certificates to be ingested certWithChainChan chan *CertWithChainData // After parsing files nodeChan chan *CertificateNode // After finding parents, to be sent to DB and SMT @@ -29,14 +32,17 @@ type Processor struct { } type CertWithChainData struct { - Cert *ctx509.Certificate - CertChain []*ctx509.Certificate + CertID *common.SHA256Output // The ID (the SHA256) of the certificate. + Cert *ctx509.Certificate // The payload of the certificate. + ChainIDs []*common.SHA256Output // The trust chain of the certificate. + ChainPayloads []*ctx509.Certificate // The payloads of the chain. Is nil if already cached. } func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *Processor { nodeChan := make(chan *CertificateNode) p := &Processor{ Conn: conn, + cache: NewPresenceCache(), incomingFileCh: make(chan File), certWithChainChan: make(chan *CertWithChainData), nodeChan: nodeChan, @@ -81,12 +87,15 @@ func (p *Processor) start() { // Process the parsed content into the DB, and from DB into SMT: go func() { for data := range p.certWithChainChan { - certs, parents := updater.UnfoldCert(data.Cert, data.CertChain) + certs, certIDs, parents, parentIDs := updater.UnfoldCert(data.Cert, data.CertID, + data.ChainPayloads, data.ChainIDs) for i := range certs { p.nodeChan <- &CertificateNode{ - Cert: certs[i], - Parent: parents[i], - IsLeaf: i == 0, // Only the first certificate is a leaf. + CertID: certIDs[i], + Cert: certs[i], + ParentID: parentIDs[i], + Parent: parents[i], + IsLeaf: i == 0, // Only the first certificate is a leaf. } } } @@ -170,27 +179,46 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { if err != nil { return err } + certID := common.SHA256Hash32Bytes(rawBytes) cert, err := ctx509.ParseCertificate(rawBytes) if err != nil { return err } + // Update statistics. + p.batchProcessor.WrittenBytes.Add(int64(len(rawBytes))) + p.batchProcessor.WrittenCerts.Inc() + p.batchProcessor.UncachedCerts.Inc() // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) + chainIDs := make([]*common.SHA256Output, len(strs)) for i, s := range strs { rawBytes, err = base64.StdEncoding.DecodeString(s) if err != nil { return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) } - chain[i], err = ctx509.ParseCertificate(rawBytes) - if err != nil { - return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) + // Update statistics. + p.batchProcessor.WrittenBytes.Add(int64(len(rawBytes))) + p.batchProcessor.WrittenCerts.Inc() + // Check if the parent certificate is in the cache. + id := common.SHA256Hash32Bytes(rawBytes) + if !p.cache.Contains(&id) { + // Not seen before, push it to the DB. + chain[i], err = ctx509.ParseCertificate(rawBytes) + if err != nil { + return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) + } + p.cache.AddIDs([]*common.SHA256Output{&id}) + p.batchProcessor.UncachedCerts.Inc() } + chainIDs[i] = &id } p.certWithChainChan <- &CertWithChainData{ - Cert: cert, - CertChain: chain, + Cert: cert, + CertID: &certID, + ChainPayloads: chain, + ChainIDs: chainIDs, } return nil } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index fc4f9cf0..5e99f98c 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -179,10 +179,51 @@ func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( return } -func UnfoldCert(cert *ctx509.Certificate, chain []*ctx509.Certificate) ( - certificates, parents []*ctx509.Certificate) { - - return UnfoldCerts([]*ctx509.Certificate{cert}, [][]*ctx509.Certificate{chain}) +// UnfoldCert takes a certificate with its trust chain and returns a ready-to-insert-in-DB +// collection of IDs and payloads for the certificate and its ancestry. +// Additionally, if the payload of any of the ancestors of the certificate is nil, this function +// interprets it as the ancestor is already present in the DB, and thus will omit returning it +// and any posterior ancestors. +func UnfoldCert(cert *ctx509.Certificate, certID *common.SHA256Output, + chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, +) (certPayloads []*ctx509.Certificate, certIDs []*common.SHA256Output, + parentPayloads []*ctx509.Certificate, parentIDs []*common.SHA256Output) { + + // return UnfoldCerts([]*ctx509.Certificate{cert}, [][]*ctx509.Certificate{chain}) + + // todo: do not add parents that have their payload to nil, because they must be in DB already + + certPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) + certIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) + parentPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) + parentIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) + + // Always add the leaf certificate. + certPayloads = append(certPayloads, cert) + certIDs = append(certIDs, certID) + parentPayloads = append(parentPayloads, chain[0]) + parentIDs = append(parentIDs, chainIDs[0]) + // Add the intermediate certs iff their payload is not nil. + i := 0 + for ; i < len(chain)-1; i++ { + if chain[i] == nil { + // This parent has been inserted already in DB. + // Its parent must have been inserted as well. There are no more parents to insert. + return + } + certPayloads = append(certPayloads, chain[i]) + certIDs = append(certIDs, chainIDs[i]) + parentPayloads = append(parentPayloads, chain[i+1]) + parentIDs = append(parentIDs, chainIDs[i+1]) + } + // Add the root certificate (no parent) iff we haven't inserted it yet. + if chain[i] != nil { + certPayloads = append(certPayloads, chain[i]) + certIDs = append(certIDs, chainIDs[i]) + parentPayloads = append(parentPayloads, nil) + parentIDs = append(parentIDs, nil) + } + return } // update domain entries From ca24cd17e2473e572864065c76b68cb46fac99db Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 16 Mar 2023 15:46:37 +0100 Subject: [PATCH 043/187] Skip expired certificates. If the certificate is not expired, we assume that its chain is also not expired. If the certificate is expired, skip its ingestion and its chain ingestion as well. --- cmd/ingest/certProcessor.go | 8 ++++---- cmd/ingest/processor.go | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 7a053245..9a6ccdba 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -72,8 +72,8 @@ type CertificateProcessor struct { incomingBatch chan *CertBatch // Ready to be inserted doneCh chan struct{} // Statistics: - WrittenCerts atomic.Int64 - WrittenBytes atomic.Int64 + ReadCerts atomic.Int64 + ReadBytes atomic.Int64 UncachedCerts atomic.Int64 } @@ -164,8 +164,8 @@ func (p *CertificateProcessor) start() { p.doneCh <- struct{}{} // signal again return } - writtenCerts := p.WrittenCerts.Load() - writtenBytes := p.WrittenBytes.Load() + writtenCerts := p.ReadCerts.Load() + writtenBytes := p.ReadBytes.Load() uncachedCerts := p.UncachedCerts.Load() secondsSinceStart := float64(time.Since(startTime).Seconds()) fmt.Printf("%.0f Certs/s (%.0f%% uncached), %.1f Mb/s\n", diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 0ef34934..64ca7ace 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -8,6 +8,7 @@ import ( "os" "strings" "sync" + "time" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" @@ -21,6 +22,7 @@ import ( type Processor struct { Conn db.Conn cache *PresenceCache // IDs of certificates pushed to DB. + now time.Time incomingFileCh chan File // New files with certificates to be ingested certWithChainChan chan *CertWithChainData // After parsing files @@ -43,6 +45,7 @@ func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *P p := &Processor{ Conn: conn, cache: NewPresenceCache(), + now: time.Now(), incomingFileCh: make(chan File), certWithChainChan: make(chan *CertWithChainData), nodeChan: nodeChan, @@ -184,11 +187,16 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { if err != nil { return err } + // Update statistics. - p.batchProcessor.WrittenBytes.Add(int64(len(rawBytes))) - p.batchProcessor.WrittenCerts.Inc() + p.batchProcessor.ReadBytes.Add(int64(len(rawBytes))) + p.batchProcessor.ReadCerts.Inc() p.batchProcessor.UncachedCerts.Inc() + if p.now.After(cert.NotAfter) { + return nil + } + // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) @@ -199,8 +207,8 @@ func (p *Processor) ingestWithCSV(fileReader io.Reader) error { return fmt.Errorf("at line %d: %s\n%s", lineNo, err, fields[CertChainColumn]) } // Update statistics. - p.batchProcessor.WrittenBytes.Add(int64(len(rawBytes))) - p.batchProcessor.WrittenCerts.Inc() + p.batchProcessor.ReadBytes.Add(int64(len(rawBytes))) + p.batchProcessor.ReadCerts.Inc() // Check if the parent certificate is in the cache. id := common.SHA256Hash32Bytes(rawBytes) if !p.cache.Contains(&id) { From 955a61f0e9f3842be9cc377daf0701f432e1445d Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 16 Mar 2023 16:04:36 +0100 Subject: [PATCH 044/187] New caches package for ingest. It contains the principal/shadow and the nocache caches. --- cmd/ingest/cache/cache.go | 12 ++++++++++++ cmd/ingest/cache/noCache.go | 13 +++++++++++++ cmd/ingest/{ => cache}/presenceCache.go | 2 +- cmd/ingest/processor.go | 6 ++++-- 4 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 cmd/ingest/cache/cache.go create mode 100644 cmd/ingest/cache/noCache.go rename cmd/ingest/{ => cache}/presenceCache.go (99%) diff --git a/cmd/ingest/cache/cache.go b/cmd/ingest/cache/cache.go new file mode 100644 index 00000000..2671a68d --- /dev/null +++ b/cmd/ingest/cache/cache.go @@ -0,0 +1,12 @@ +package cache + +import "github.com/netsec-ethz/fpki/pkg/common" + +type Cache interface { + Contains(*common.SHA256Output) bool + AddIDs([]*common.SHA256Output) +} + +func NewNoCache() NoCache { + return NoCache{} +} diff --git a/cmd/ingest/cache/noCache.go b/cmd/ingest/cache/noCache.go new file mode 100644 index 00000000..1c768da0 --- /dev/null +++ b/cmd/ingest/cache/noCache.go @@ -0,0 +1,13 @@ +package cache + +import "github.com/netsec-ethz/fpki/pkg/common" + +type NoCache struct{} + +// Contains always returns false (the item is never in cache). +func (NoCache) Contains(*common.SHA256Output) bool { + return false +} + +// AddIDs doesn't do anything. +func (NoCache) AddIDs([]*common.SHA256Output) {} diff --git a/cmd/ingest/presenceCache.go b/cmd/ingest/cache/presenceCache.go similarity index 99% rename from cmd/ingest/presenceCache.go rename to cmd/ingest/cache/presenceCache.go index b224c2ab..d08b736c 100644 --- a/cmd/ingest/presenceCache.go +++ b/cmd/ingest/cache/presenceCache.go @@ -1,4 +1,4 @@ -package main +package cache import ( "sync" diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 64ca7ace..dfddb795 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -11,6 +11,8 @@ import ( "time" ctx509 "github.com/google/certificate-transparency-go/x509" + + "github.com/netsec-ethz/fpki/cmd/ingest/cache" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" @@ -21,7 +23,7 @@ import ( // described in the `start` method. type Processor struct { Conn db.Conn - cache *PresenceCache // IDs of certificates pushed to DB. + cache cache.Cache // IDs of certificates pushed to DB. now time.Time incomingFileCh chan File // New files with certificates to be ingested @@ -44,7 +46,7 @@ func NewProcessor(conn db.Conn, certUpdateStrategy CertificateUpdateStrategy) *P nodeChan := make(chan *CertificateNode) p := &Processor{ Conn: conn, - cache: NewPresenceCache(), + cache: cache.NewNoCache(), now: time.Now(), incomingFileCh: make(chan File), certWithChainChan: make(chan *CertWithChainData), From 15e28f8badd5ea9e329a5e3d4dfb5c3bc72528d7 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 17 Mar 2023 12:41:30 +0100 Subject: [PATCH 045/187] Coalesce payloads in one entry per domain. For all domains that have been modified, get their certificates, sort their payloads and fuse them together in one entry, so that the retrieval phase can quickly get it. Compute the SHA256 of the coalesced payload as well. --- cmd/ingest/coalescePayloads.go | 81 ++++++++++++++++++++++++++++++++++ cmd/ingest/main.go | 43 ++++++++++-------- tools/create_schema.sh | 48 +++++++++++++++++++- 3 files changed, 153 insertions(+), 19 deletions(-) create mode 100644 cmd/ingest/coalescePayloads.go diff --git a/cmd/ingest/coalescePayloads.go b/cmd/ingest/coalescePayloads.go new file mode 100644 index 00000000..52852ab1 --- /dev/null +++ b/cmd/ingest/coalescePayloads.go @@ -0,0 +1,81 @@ +package main + +import ( + "database/sql" + "fmt" + "sync" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +func CoalescePayloadsForDirtyDomains(db *sql.DB) { + // Get all dirty domain IDs. + str := "SELECT domain_id FROM dirty" + rows, err := db.Query(str) + if err != nil { + panic(fmt.Errorf("error querying dirty domains: %w", err)) + } + domainIDs := make([]*common.SHA256Output, 0) + for rows.Next() { + var domainId []byte + err = rows.Scan(&domainId) + if err != nil { + panic(fmt.Errorf("error scanning domain ID: %w", err)) + } + ptr := (*common.SHA256Output)(domainId) + domainIDs = append(domainIDs, ptr) + } + + // Start NumDBWriters workers. + fmt.Printf("Starting %d workers coalescing payloads for modified domains\n", NumDBWriters) + ch := make(chan []*common.SHA256Output) + wg := sync.WaitGroup{} + wg.Add(NumDBWriters) + for i := 0; i < NumDBWriters; i++ { + go func() { + defer wg.Done() + for ids := range ch { + // We receive ids as a slice of IDs. We ought to build a long slice of bytes + // with all the bytes concatenated. + param := make([]byte, len(ids)*common.SHA256Size) + for i, id := range ids { + copy(param[i*common.SHA256Size:], id[:]) + } + // Now call the stored procedure with this parameter. + str := "CALL calc_several_domain_payloads(?)" + _, err := db.Exec(str, param) + if err != nil { + panic(fmt.Errorf("error coalescing payload for domains: %w", err)) + } + } + }() + } + + // Split the dirty domain ID list in NumDBWriters + batchSize := len(domainIDs) / NumDBWriters + // First workers handle one more ID than the rest, to take into account also the remainder. + for i := 0; i < len(domainIDs)%NumDBWriters; i++ { + b := domainIDs[i*(batchSize+1) : (i+1)*(batchSize+1)] + ch <- b + } + // The rest of the workers will do a batchSize-sized item. + restOfWorkersCount := NumDBWriters - (len(domainIDs) % NumDBWriters) + domainIDs = domainIDs[(len(domainIDs)%NumDBWriters)*(batchSize+1):] + for i := 0; i < restOfWorkersCount; i++ { + b := domainIDs[i*batchSize : (i+1)*batchSize] + ch <- b + } + + // Close the batches channel. + close(ch) + // And wait for all workers to finish. + wg.Wait() + + // Remove all entries from the dirty table. + str = "TRUNCATE dirty" + _, err = db.Exec(str) + if err != nil { + panic(fmt.Errorf("error truncating dirty table: %w", err)) + } + fmt.Println("Done coalescing.") +} diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index f76979fe..5c9fd1c5 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -44,26 +44,29 @@ func mainFunction() int { cpuProfile := flag.String("cpuprofile", "", "write a CPU profile to file") memProfile := flag.String("memprofile", "", "write a memory profile to file") certUpdateStrategy := flag.String("strategy", "keep", "strategy to update certificates\n"+ - "\"overwrite\": always send certificates to DB, even if they exist already\n"+ - "\"keep\": first check if each certificate exists already in DB before sending it\n"+ + "\"overwrite\": always send certificates to DB, even if they exist already.\n"+ + "\"keep\": first check if each certificate exists already in DB before sending it.\n"+ + "\"coalesce\": only coalesce payloads of domains in the dirty table.\n"+ `If data transfer to DB is expensive, "keep" is recommended.`) flag.Parse() - if flag.NArg() != 1 { - flag.Usage() - return 1 - } - // Update strategy. var strategy CertificateUpdateStrategy + var coalesceOnly bool switch *certUpdateStrategy { case "overwrite": strategy = CertificateUpdateOverwrite case "keep": strategy = CertificateUpdateKeepExisting + case "coalesce": + coalesceOnly = true default: panic(fmt.Errorf("bad update strategy: %v", *certUpdateStrategy)) } + if !coalesceOnly && flag.NArg() != 1 { + flag.Usage() + return 1 + } // Profiling: stopProfiles := func() { @@ -86,7 +89,7 @@ func mainFunction() int { defer stopProfiles() // Signals catching: - signals := make(chan os.Signal) + signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM) go func() { <-signals @@ -100,18 +103,22 @@ func mainFunction() int { conn, err := db.Connect(config) exitIfError(err) - // All GZ and CSV files found under the directory of the argument. - gzFiles, csvFiles := listOurFiles(flag.Arg(0)) - fmt.Printf("# gzFiles: %d, # csvFiles: %d\n", len(gzFiles), len(csvFiles)) + if !coalesceOnly { + // All GZ and CSV files found under the directory of the argument. + gzFiles, csvFiles := listOurFiles(flag.Arg(0)) + fmt.Printf("# gzFiles: %d, # csvFiles: %d\n", len(gzFiles), len(csvFiles)) - // Truncate DB. - exitIfError(conn.TruncateAllTables()) + // Truncate DB. + exitIfError(conn.TruncateAllTables()) - // Update certificates and chains. - proc := NewProcessor(conn, strategy) - proc.AddGzFiles(gzFiles) - proc.AddCsvFiles(csvFiles) - exitIfError(proc.Wait()) + // Update certificates and chains. + proc := NewProcessor(conn, strategy) + proc.AddGzFiles(gzFiles) + proc.AddCsvFiles(csvFiles) + exitIfError(proc.Wait()) + } + // Coalesce the payloads of all modified domains. + CoalescePayloadsForDirtyDomains(conn.DB()) // Close DB and check errors. err = conn.Close() diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 8e0820d3..a80aefa1 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -57,7 +57,7 @@ USE fpki; CREATE TABLE domain_payloads ( id VARBINARY(32) NOT NULL, payload LONGBLOB, - payload_hash VARBINARY(32) DEFAULT NULL, + payload_id VARBINARY(32) DEFAULT NULL, PRIMARY KEY (id) ) ENGINE=MyISAM CHARSET=binary COLLATE=binary; EOF @@ -140,3 +140,49 @@ EOF ) echo "$CMD" | $MYSQLCMD + +CMD=$(cat < 0 DO + SET ID = LEFT(IDS,32); + CALL calc_domain_payload(ID); + SET IDS = RIGHT(IDS,LENGTH(IDS)-32); + END WHILE; + END$$ +DELIMITER ; +EOF +) +echo "$CMD" | mysql -u root From 1c00e63c3fcb7ff200ccc37c81831f95f8a17c2f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 17 Mar 2023 12:57:26 +0100 Subject: [PATCH 046/187] Don't truncate tables on ingest. --- cmd/ingest/main.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 5c9fd1c5..7a6520bd 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -108,9 +108,6 @@ func mainFunction() int { gzFiles, csvFiles := listOurFiles(flag.Arg(0)) fmt.Printf("# gzFiles: %d, # csvFiles: %d\n", len(gzFiles), len(csvFiles)) - // Truncate DB. - exitIfError(conn.TruncateAllTables()) - // Update certificates and chains. proc := NewProcessor(conn, strategy) proc.AddGzFiles(gzFiles) From e12ef44af7fe351841d3af21bcbea29327c14b6f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 17 Mar 2023 12:57:42 +0100 Subject: [PATCH 047/187] Fix TruncateAllTables with the correct table names. --- pkg/db/mysql.go | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 86c3f31f..82707485 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -131,12 +131,15 @@ func (c *mysqlDB) Close() error { func (c *mysqlDB) TruncateAllTables() error { tables := []string{ - "domainEntries", "tree", - "updates", + "root", + "certs", + "domains", + "domain_payloads", + "dirty", } for _, t := range tables { - if _, err := c.db.Exec(fmt.Sprintf("DELETE FROM %s", t)); err != nil { + if _, err := c.db.Exec(fmt.Sprintf("TRUNCATE %s", t)); err != nil { return err } } From fe0f417ecbe892cdfe31852f5b5851e9fe764a32 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 17 Mar 2023 22:43:21 +0100 Subject: [PATCH 048/187] Adapt SMT updater to the new design. --- cmd/ingest/coalescePayloads.go | 28 +---- cmd/ingest/main.go | 18 ++- cmd/ingest/processor.go | 7 -- cmd/ingest/smtUpdater.go | 48 ++++---- pkg/db/db.go | 6 +- pkg/db/mysql.go | 9 +- pkg/db/read.go | 111 ++++++++++++------- pkg/db/write.go | 61 +++++++++- pkg/mapserver/internal/mockdb_for_testing.go | 2 + pkg/mapserver/trie/trie.go | 13 +-- pkg/mapserver/trie/trie_cache.go | 20 ++-- pkg/mapserver/updater/dbutil.go | 9 +- pkg/mapserver/updater/deleteme.go | 9 +- pkg/mapserver/updater/updater.go | 5 +- 14 files changed, 212 insertions(+), 134 deletions(-) diff --git a/cmd/ingest/coalescePayloads.go b/cmd/ingest/coalescePayloads.go index 52852ab1..c8e33d06 100644 --- a/cmd/ingest/coalescePayloads.go +++ b/cmd/ingest/coalescePayloads.go @@ -1,29 +1,19 @@ package main import ( - "database/sql" + "context" "fmt" "sync" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" ) -func CoalescePayloadsForDirtyDomains(db *sql.DB) { +func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) { // Get all dirty domain IDs. - str := "SELECT domain_id FROM dirty" - rows, err := db.Query(str) + domainIDs, err := conn.UpdatedDomains(ctx) if err != nil { - panic(fmt.Errorf("error querying dirty domains: %w", err)) - } - domainIDs := make([]*common.SHA256Output, 0) - for rows.Next() { - var domainId []byte - err = rows.Scan(&domainId) - if err != nil { - panic(fmt.Errorf("error scanning domain ID: %w", err)) - } - ptr := (*common.SHA256Output)(domainId) - domainIDs = append(domainIDs, ptr) + panic(err) } // Start NumDBWriters workers. @@ -43,7 +33,7 @@ func CoalescePayloadsForDirtyDomains(db *sql.DB) { } // Now call the stored procedure with this parameter. str := "CALL calc_several_domain_payloads(?)" - _, err := db.Exec(str, param) + _, err := conn.DB().Exec(str, param) if err != nil { panic(fmt.Errorf("error coalescing payload for domains: %w", err)) } @@ -71,11 +61,5 @@ func CoalescePayloadsForDirtyDomains(db *sql.DB) { // And wait for all workers to finish. wg.Wait() - // Remove all entries from the dirty table. - str = "TRUNCATE dirty" - _, err = db.Exec(str) - if err != nil { - panic(fmt.Errorf("error truncating dirty table: %w", err)) - } fmt.Println("Done coalescing.") } diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 7a6520bd..d33ec301 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -1,6 +1,7 @@ package main import ( + "context" "flag" "fmt" "io/ioutil" @@ -37,6 +38,8 @@ func main() { os.Exit(mainFunction()) } func mainFunction() int { + ctx := context.Background() + flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n%s directory\n", os.Args[0]) flag.PrintDefaults() @@ -115,9 +118,20 @@ func mainFunction() int { exitIfError(proc.Wait()) } // Coalesce the payloads of all modified domains. - CoalescePayloadsForDirtyDomains(conn.DB()) + CoalescePayloadsForDirtyDomains(ctx, conn) + + // Now start processing the changed domains into the SMT: + // conn.LoadRoot deleteme TODO load root + smtProcessor := NewSMTUpdater(conn, nil, 32) + smtProcessor.Start(ctx) + err = smtProcessor.Wait() + exitIfError(err) + + // Cleanup dirty entries. + err = conn.CleanupDirty(ctx) + exitIfError(err) - // Close DB and check errors. + // Close DB. err = conn.Close() exitIfError(err) return 0 diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index dfddb795..431ee8b4 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -110,13 +110,6 @@ func (p *Processor) start() { // Wait for the next stage to finish p.batchProcessor.Wait() - // Now start processing the changed domains into the SMT: - smtProcessor := NewSMTUpdater(p.Conn, nil, 32) - smtProcessor.Start() - if err := smtProcessor.Wait(); err != nil { - p.errorCh <- err - } - // There is no more processing to do, close the errors channel and allow the // error processor to finish. close(p.errorCh) diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go index 3ace9fe8..02727201 100644 --- a/cmd/ingest/smtUpdater.go +++ b/cmd/ingest/smtUpdater.go @@ -3,7 +3,6 @@ package main import ( "context" "fmt" - "sync" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" @@ -12,7 +11,7 @@ import ( ) type SMTUpdater struct { - Store db.Conn + conn db.Conn smtTrie *trie.Trie errorCh chan error @@ -26,46 +25,40 @@ func NewSMTUpdater(conn db.Conn, root []byte, cacheHeight int) *SMTUpdater { } smtTrie.CacheHeightLimit = cacheHeight return &SMTUpdater{ - Store: conn, + conn: conn, smtTrie: smtTrie, errorCh: make(chan error), doneCh: make(chan error), } } -func (u *SMTUpdater) Start() { +func (u *SMTUpdater) Start(ctx context.Context) { fmt.Println("Starting SMT updater") // Start processing the error channel. go u.processErrorChannel() - if 4%5 != 0 { // deleteme - close(u.errorCh) - return - } - // Read batches of updated nodes from `updates`: go func() { - domainsCh, errorCh := u.Store.UpdatedDomains() - wg := sync.WaitGroup{} - for batch := range domainsCh { - // Process the batches concurrently. - batch := batch - wg.Add(1) - go func() { - defer wg.Done() - u.processBatch(batch) - }() - } - for err := range errorCh { + // This is the last and only processing function. After it finishes, there is nothing + // else to process, close error channel on exiting. + defer close(u.errorCh) + + domains, err := u.conn.UpdatedDomains(ctx) + if err != nil { u.errorCh <- err + return } - wg.Wait() + u.processBatch(ctx, domains) - // Nothing else to process, close error channel. - close(u.errorCh) + // Save root value: + err = u.conn.SaveRoot(ctx, (*common.SHA256Output)(u.smtTrie.Root)) + if err != nil { + u.errorCh <- err + return + } + fmt.Println("Done SMT updater.") }() - } func (u *SMTUpdater) Wait() error { @@ -88,9 +81,9 @@ func (u *SMTUpdater) processErrorChannel() { close(u.doneCh) } -func (u *SMTUpdater) processBatch(batch []common.SHA256Output) { +func (u *SMTUpdater) processBatch(ctx context.Context, batch []*common.SHA256Output) { // Read those certificates: - entries, err := u.Store.RetrieveDomainEntries(context.Background(), batch) + entries, err := u.conn.RetrieveDomainEntries(ctx, batch) if err != nil { u.errorCh <- err return @@ -113,5 +106,4 @@ func (u *SMTUpdater) processBatch(batch []common.SHA256Output) { u.errorCh <- err return } - fmt.Printf("deleteme SMT processed batch of %d elements\n", len(batch)) } diff --git a/pkg/db/db.go b/pkg/db/db.go index 5b2b4c85..0f4413ba 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -65,7 +65,7 @@ type Conn interface { RetrieveDomainEntry(ctx context.Context, id common.SHA256Output) ([]byte, error) // RetrieveDomainEntries: Retrieve a list of domain entries table - RetrieveDomainEntries(ctx context.Context, id []common.SHA256Output) ([]*KeyValuePair, error) + RetrieveDomainEntries(ctx context.Context, id []*common.SHA256Output) ([]*KeyValuePair, error) // UpdateDomainEntries: Update a list of key-value pairs in domain entries table UpdateDomainEntries(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) @@ -91,5 +91,7 @@ type Conn interface { // A batch will have a implementation dependent size. // Each updated domain represents the SHA256 of the textual domain that was updated and // present in the `updates` table. - UpdatedDomains() (chan []common.SHA256Output, chan error) + UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) + CleanupDirty(ctx context.Context) error + SaveRoot(ctx context.Context, root *common.SHA256Output) error } diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index 82707485..bec26b70 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -261,12 +261,13 @@ func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs return err } -// repeatStmt returns ( (?,..inner..,?), ...outer... ) -func repeatStmt(outer int, inner int) string { - components := make([]string, inner) +// repeatStmt returns ( (?,..dimensions..,?), ...elemCount... ) +// Use it like repeatStmt(1, len(IDs)) to obtain (?,?,...) +func repeatStmt(elemCount int, dimensions int) string { + components := make([]string, dimensions) for i := 0; i < len(components); i++ { components[i] = "?" } toRepeat := "(" + strings.Join(components, ",") + ")" - return strings.Repeat(toRepeat+",", outer-1) + toRepeat + return strings.Repeat(toRepeat+",", elemCount-1) + toRepeat } diff --git a/pkg/db/read.go b/pkg/db/read.go index 213a7d40..1186092b 100644 --- a/pkg/db/read.go +++ b/pkg/db/read.go @@ -14,9 +14,22 @@ type readKeyResult struct { Err error } +func (c *mysqlDB) RetrieveTreeNode(ctx context.Context, key common.SHA256Output) ([]byte, error) { + var value []byte + str := "SELECT value FROM tree WHERE key32 = ?" + err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&value) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, fmt.Errorf("error retrieving node from tree: %w", err) + } + return value, nil +} + // RetrieveTreeNode retrieves one single key-value pair from tree table // Return sql.ErrNoRows if no row is round -func (c *mysqlDB) RetrieveTreeNode(ctx context.Context, key common.SHA256Output) ([]byte, error) { +func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Output) ([]byte, error) { c.getProofLimiter <- struct{}{} defer func() { <-c.getProofLimiter }() @@ -46,14 +59,41 @@ func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Outp // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table // No sql.ErrNoRows will be thrown, if some records does not exist. Check the length of result -func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, key []common.SHA256Output) ( +func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, keys []*common.SHA256Output) ( []*KeyValuePair, error) { - return c.retrieveDomainEntries(ctx, key) + return c.retrieveDomainEntries(ctx, keys) +} + +func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, +) ([]*KeyValuePair, error) { + + str := "SELECT id,payload FROM domain_payloads WHERE id IN " + repeatStmt(1, len(domainIDs)) + params := make([]interface{}, len(domainIDs)) + for i, id := range domainIDs { + params[i] = (*id)[:] + } + rows, err := c.db.QueryContext(ctx, str, params...) + if err != nil { + return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) + } + pairs := make([]*KeyValuePair, 0, len(domainIDs)) + for rows.Next() { + var id, payload []byte + err := rows.Scan(&id, &payload) + if err != nil { + return nil, fmt.Errorf("error scanning domain ID and its payload") + } + pairs = append(pairs, &KeyValuePair{ + Key: *(*common.SHA256Output)(id), + Value: payload, + }) + } + return pairs, nil } // used for retrieving key value pair -func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, keys []common.SHA256Output) ( +func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.SHA256Output) ( []*KeyValuePair, error) { str := "SELECT `key`, `value` FROM domainEntries WHERE `key` IN " + repeatStmt(1, len(keys)) args := make([]interface{}, len(keys)) @@ -147,44 +187,35 @@ func (c *mysqlDB) RetrieveUpdatedDomains(ctx context.Context, perQueryLimit int) return keys, nil } -// UpdatedDomains reads the updates table, which was written by e.g. AddUpdatedDomains. -func (c *mysqlDB) UpdatedDomains() (chan []common.SHA256Output, chan error) { - domainsCh := make(chan []common.SHA256Output) - errorCh := make(chan error) - go func() { - defer close(errorCh) - rows, err := c.prepGetUpdatedDomains.Query() +// UpdatedDomains returns the domain IDs that are still dirty, i.e. modified certificates for +// that domain, but not yet coalesced and ingested by the SMT. +func (c *mysqlDB) UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) { + str := "SELECT domain_id FROM dirty" + rows, err := c.db.QueryContext(ctx, str) + if err != nil { + return nil, fmt.Errorf("error querying dirty domains: %w", err) + } + domainIDs := make([]*common.SHA256Output, 0) + for rows.Next() { + var domainId []byte + err = rows.Scan(&domainId) if err != nil { - close(domainsCh) - errorCh <- err - return - } - defer rows.Close() - for { - batch := make([]common.SHA256Output, 0, batchSize) - for i := 0; i < batchSize && rows.Next(); i++ { - var key []byte - if err := rows.Scan(&key); err != nil { - close(domainsCh) - errorCh <- err - return - } - batch = append(batch, *(*common.SHA256Output)(key[:common.SHA256Size])) - } - if err := rows.Err(); err != nil { - close(domainsCh) - errorCh <- err - return - } - - if len(batch) == 0 { - break - } - domainsCh <- batch + return nil, fmt.Errorf("error scanning domain ID: %w", err) } - close(domainsCh) - }() - return domainsCh, errorCh + ptr := (*common.SHA256Output)(domainId) + domainIDs = append(domainIDs, ptr) + } + return domainIDs, nil +} + +func (c *mysqlDB) CleanupDirty(ctx context.Context) error { + // Remove all entries from the dirty table. + str := "TRUNCATE dirty" + _, err := c.db.ExecContext(ctx, str) + if err != nil { + return fmt.Errorf("error truncating dirty table: %w", err) + } + return nil } func retrieveValue(ctx context.Context, stmt *sql.Stmt, key common.SHA256Output) ([]byte, error) { diff --git a/pkg/db/write.go b/pkg/db/write.go index 48059439..780b1038 100644 --- a/pkg/db/write.go +++ b/pkg/db/write.go @@ -12,17 +12,38 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" ) +func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, pairs []*KeyValuePair) (int, error) { + panic("not available") +} + // UpdateDomainEntries: Update a list of key-value store -func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { - numOfUpdatedRecords, err := c.doUpdatePairs(ctx, keyValuePairs, c.getDomainEntriesUpdateStmts, "domainEntries") +func (c *mysqlDB) UpdateDomainEntriesOLD(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { + numOfUpdatedRecords, err := c.doUpdatePairs(ctx, keyValuePairs, c.getDomainEntriesUpdateStmts) if err != nil { return 0, fmt.Errorf("UpdateDomainEntries | %w", err) } return numOfUpdatedRecords, nil } -// DeleteTreeNodes deletes a list of key-value stored in the tree table. func (c *mysqlDB) DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) { + str := "DELETE FROM tree WHERE key32 IN " + repeatStmt(1, len(keys)) + params := make([]interface{}, len(keys)) + for i, k := range keys { + params[i] = k[:] + } + res, err := c.db.ExecContext(ctx, str, params...) + if err != nil { + return 0, fmt.Errorf("error deleting keys from tree: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) + } + return int(n), nil +} + +// DeleteTreeNodes deletes a list of key-value stored in the tree table. +func (c *mysqlDB) DeleteTreeNodesOLD(ctx context.Context, keys []common.SHA256Output) (int, error) { n, err := c.doUpdateKeys(ctx, keys, c.getTreeDeleteStmts) if err != nil { return 0, fmt.Errorf("DeleteTreeNodes | %w", err) @@ -31,9 +52,30 @@ func (c *mysqlDB) DeleteTreeNodes(ctx context.Context, keys []common.SHA256Outpu return n, nil } -// UpdateTreeNodes: Update a list of key-value store func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { - numOfUpdatedPairs, err := c.doUpdatePairs(ctx, keyValuePairs, c.getTreeStructureUpdateStmts, "tree") + if len(keyValuePairs) == 0 { + return 0, nil + } + str := "REPLACE INTO tree (key32,value) VALUES " + repeatStmt(len(keyValuePairs), 2) + params := make([]interface{}, 2*len(keyValuePairs)) + for i, pair := range keyValuePairs { + params[i*2] = pair.Key[:] + params[i*2+1] = pair.Value + } + res, err := c.db.ExecContext(ctx, str, params...) + if err != nil { + return 0, fmt.Errorf("error inserting key-values into tree: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) + } + return int(n), nil +} + +// UpdateTreeNodes: Update a list of key-value store +func (c *mysqlDB) UpdateTreeNodesOLD(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { + numOfUpdatedPairs, err := c.doUpdatePairs(ctx, keyValuePairs, c.getTreeStructureUpdateStmts) if err != nil { return 0, fmt.Errorf("UpdateTreeNodes | %w", err) } @@ -62,6 +104,15 @@ func (c *mysqlDB) RemoveAllUpdatedDomains(ctx context.Context) error { return nil } +func (c *mysqlDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error { + str := "REPLACE INTO root (key32) VALUES (?)" + _, err := c.db.ExecContext(ctx, str, (*root)[:]) + if err != nil { + return fmt.Errorf("error inserting root ID: %w", err) + } + return nil +} + // ******************************************************************** // // Common diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index f0eaca08..0bc3e9aa 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -141,3 +141,5 @@ func (d *MockDB) RemoveAllUpdatedDomains(ctx context.Context) error { } func (d *MockDB) UpdatedDomains() (chan []common.SHA256Output, chan error) { return nil, nil } + +func (*MockDB) CleanupDirty(ctx context.Context) error { return nil } diff --git a/pkg/mapserver/trie/trie.go b/pkg/mapserver/trie/trie.go index bc6ff320..15c1378f 100644 --- a/pkg/mapserver/trie/trie.go +++ b/pkg/mapserver/trie/trie.go @@ -68,7 +68,7 @@ func NewTrie(root []byte, hash func(data ...[]byte) []byte, store db.Conn) (*Tri } func (s *Trie) PrintCacheSize() { - fmt.Println(len(s.db.liveCache)) + fmt.Println(s.db.GetLiveCacheSize()) } func (s *Trie) Close() error { @@ -81,15 +81,12 @@ func (s *Trie) Close() error { // If Update is called multiple times, only the state after the last update // is committed. func (s *Trie) Update(ctx context.Context, keys, values [][]byte) ([]byte, error) { - if len(keys) == 0 { - return nil, nil + if len(keys) != len(values) { + return nil, fmt.Errorf("key value size does not match") } - if len(values) == 0 { + if len(keys) == 0 { return nil, nil } - if len(keys) != len(values) { - return nil, fmt.Errorf("key value size does not mathc") - } s.lock.Lock() defer s.lock.Unlock() @@ -520,7 +517,7 @@ func (s *Trie) loadBatch(ctx context.Context, root []byte, height int) ([][]byte // Added: add the newly fetched nodes, and cache them into memory resultBytes := parseBatch(value) if height >= s.CacheHeightLimit && height%4 == 0 { - s.db.liveCache[rootCopy] = resultBytes + s.db.updateLiveCache(rootCopy, resultBytes) } return resultBytes, nil } diff --git a/pkg/mapserver/trie/trie_cache.go b/pkg/mapserver/trie/trie_cache.go index bae44968..e1f14c6b 100644 --- a/pkg/mapserver/trie/trie_cache.go +++ b/pkg/mapserver/trie/trie_cache.go @@ -98,7 +98,7 @@ func (cacheDB *CacheDB) getValueLimit(ctx context.Context, key []byte) ([]byte, } func (cacheDB *CacheDB) getValueLockFree(ctx context.Context, key []byte) ([]byte, error) { - value, err := cacheDB.Store.RetrieveTreeNode(ctx, *(*[32]byte)(key)) + value, err := cacheDB.Store.RetrieveTreeNode(ctx, *(*common.SHA256Output)(key)) if err != nil { return nil, fmt.Errorf("getValue | RetrieveOneKeyValuePair | %w", err) } @@ -121,11 +121,15 @@ func serializeBatch(batch [][]byte) []byte { return serialized } -//************************************************** -// functions for live cache -//************************************************** +// ************************************************** +// +// functions for live cache +// +// ************************************************** // GetLiveCacheSize: get current size of live cache func (db *CacheDB) GetLiveCacheSize() int { + db.liveMux.RLock() + defer db.liveMux.RUnlock() return len(db.liveCache) } @@ -151,9 +155,11 @@ func (db *CacheDB) getLiveCache(node common.SHA256Output) ([][]byte, bool) { return val, exists } -//************************************************** -// functions for updated nodes -//************************************************** +// ************************************************** +// +// functions for updated nodes +// +// ************************************************** // getUpdatedNodes: get one node from updated nodes func (db *CacheDB) getUpdatedNodes(node common.SHA256Output) ([][]byte, bool) { db.updatedMux.RLock() diff --git a/pkg/mapserver/updater/dbutil.go b/pkg/mapserver/updater/dbutil.go index 5628ad55..af56e302 100644 --- a/pkg/mapserver/updater/dbutil.go +++ b/pkg/mapserver/updater/dbutil.go @@ -28,10 +28,11 @@ func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDB(ctx context.Context, affectedDomainHashes = append(affectedDomainHashes, k) } - work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { - domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) - resultChan <- dbResult{pairs: domainEntries, err: err} - } + // work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { + // domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) + // resultChan <- dbResult{pairs: domainEntries, err: err} + // } + work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) {} resultChan := make(chan dbResult) diff --git a/pkg/mapserver/updater/deleteme.go b/pkg/mapserver/updater/deleteme.go index d53fa928..e644e09d 100644 --- a/pkg/mapserver/updater/deleteme.go +++ b/pkg/mapserver/updater/deleteme.go @@ -197,10 +197,11 @@ func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDBReturnReadDomains(ctx affectedDomainHashes = append(affectedDomainHashes, k) } - work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { - domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) - resultChan <- dbResult{pairs: domainEntries, err: err} - } + // work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { + // domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) + // resultChan <- dbResult{pairs: domainEntries, err: err} + // } + work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) {} resultChan := make(chan dbResult) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index c161b995..d13aefd9 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -204,7 +204,10 @@ func KeyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ([][]byte, [][]byte updateInput := make([]UpdateInput, 0, len(keyValuePair)) for _, pair := range keyValuePair { - updateInput = append(updateInput, UpdateInput{Key: pair.Key, Value: common.SHA256Hash(pair.Value)}) + updateInput = append(updateInput, UpdateInput{ + Key: pair.Key, + Value: common.SHA256Hash(pair.Value), + }) } sort.Slice(updateInput, func(i, j int) bool { From 438c93c4b301f5c5b67914a7abf7e4cc24909c51 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 17 Mar 2023 14:08:02 +0100 Subject: [PATCH 049/187] WIP new responder and DB functions. Added LoadRoot. Changed signature of some DB functions. Moved responder to OldMapResponder. --- pkg/db/db.go | 10 +- pkg/db/mysql.go | 18 +- pkg/grpc/grpcserver/grpcserver.go | 4 +- pkg/mapserver/internal/mockdb_for_testing.go | 6 +- pkg/mapserver/responder/deleteme.go | 2 +- pkg/mapserver/responder/old_responder.go | 247 ++++++++++++++++++ ...esponder_test.go => old_responder_test.go} | 4 +- pkg/mapserver/responder/responder.go | 238 +++-------------- .../responder_benchmark/main.go | 2 +- tests/integration/db/db.go | 27 +- tests/integration/mapserver/main.go | 247 +++--------------- tests/integration/old_mapserver/main.go | 234 +++++++++++++++++ 12 files changed, 589 insertions(+), 450 deletions(-) create mode 100644 pkg/mapserver/responder/old_responder.go rename pkg/mapserver/responder/{responder_test.go => old_responder_test.go} (98%) create mode 100644 tests/integration/old_mapserver/main.go diff --git a/pkg/db/db.go b/pkg/db/db.go index 0f4413ba..e69074d7 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -24,13 +24,13 @@ type Conn interface { Close() error // TruncateAllTables resets the DB to an initial state. - TruncateAllTables() error + TruncateAllTables(ctx context.Context) error - // DisableIndexing stops the indexing in the table. - DisableIndexing(table string) error + LoadRoot(ctx context.Context) (*common.SHA256Output, error) - // DisableIndexing starts the indexing in the table. - EnableIndexing(table string) error + ////////////////////////////////////////////////////////////////// + // check if the functions below are needed after the new design // + ////////////////////////////////////////////////////////////////// // CheckCertsExist returns a slice of true/false values. Each value indicates if // the corresponding certificate identified by its ID is already present in the DB. diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index bec26b70..bdeaa503 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -129,7 +129,7 @@ func (c *mysqlDB) Close() error { return c.db.Close() } -func (c *mysqlDB) TruncateAllTables() error { +func (c *mysqlDB) TruncateAllTables(ctx context.Context) error { tables := []string{ "tree", "root", @@ -139,21 +139,19 @@ func (c *mysqlDB) TruncateAllTables() error { "dirty", } for _, t := range tables { - if _, err := c.db.Exec(fmt.Sprintf("TRUNCATE %s", t)); err != nil { + if _, err := c.db.ExecContext(ctx, fmt.Sprintf("TRUNCATE %s", t)); err != nil { return err } } return nil } -func (c *mysqlDB) DisableIndexing(table string) error { - _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` DISABLE KEYS", table)) - return err -} - -func (c *mysqlDB) EnableIndexing(table string) error { - _, err := c.db.Exec(fmt.Sprintf("ALTER TABLE `%s` ENABLE KEYS", table)) - return err +func (c *mysqlDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { + var key []byte + if err := c.db.QueryRowContext(ctx, "SELECT key32 FROM root").Scan(&key); err != nil { + return nil, fmt.Errorf("error obtaining the root entry: %w", err) + } + return (*common.SHA256Output)(key), nil } // CheckCertsExist returns a slice of true/false values. Each value indicates if diff --git a/pkg/grpc/grpcserver/grpcserver.go b/pkg/grpc/grpcserver/grpcserver.go index 31cbeec4..be9031f3 100644 --- a/pkg/grpc/grpcserver/grpcserver.go +++ b/pkg/grpc/grpcserver/grpcserver.go @@ -21,7 +21,7 @@ var ( // ResponderServer: server to distribute map response type ResponderServer struct { pb.UnimplementedMapResponderServer - responder *responder.MapResponder + responder *responder.OldMapResponder } type GRPCProofs struct { @@ -49,7 +49,7 @@ func (server ResponderServer) QueryMapEntries(ctx context.Context, in *pb.MapCli } func NewGRPCServer(ctx context.Context, root []byte, cacheHeight int, mapserverConfigPath string) (*ResponderServer, error) { - responder, err := responder.NewMapResponder(ctx, root, cacheHeight, mapserverConfigPath) + responder, err := responder.NewOldMapResponder(ctx, root, cacheHeight, mapserverConfigPath) if err != nil { return nil, err } diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index 0bc3e9aa..cf47c760 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -36,11 +36,9 @@ func (d *MockDB) DB() *sql.DB { // Close closes the connection. func (d *MockDB) Close() error { return nil } -func (d *MockDB) TruncateAllTables() error { return nil } +func (d *MockDB) TruncateAllTables(ctx context.Context) error { return nil } -func (d *MockDB) DisableIndexing(table string) error { return nil } - -func (d *MockDB) EnableIndexing(table string) error { return nil } +func (*MockDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { return nil, nil } func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { return make([]bool, len(ids)), nil diff --git a/pkg/mapserver/responder/deleteme.go b/pkg/mapserver/responder/deleteme.go index ae3b0d9d..dd5c7f06 100644 --- a/pkg/mapserver/responder/deleteme.go +++ b/pkg/mapserver/responder/deleteme.go @@ -10,7 +10,7 @@ import ( ) // GetDomainProofsTest deleteme! only used to print extra info in benchmarks. -func (mapResponder *MapResponder) GetDomainProofsTest(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, int, error) { +func (mapResponder *OldMapResponder) GetDomainProofsTest(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, int, error) { start := time.Now() domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) if err != nil { diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go new file mode 100644 index 00000000..0a120ace --- /dev/null +++ b/pkg/mapserver/responder/old_responder.go @@ -0,0 +1,247 @@ +package responder + +import ( + "context" + "crypto/rsa" + "fmt" + "strings" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/domain" + mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/mapserver/trie" +) + +// OldMapResponder: A map responder, which is responsible for receiving client's request. Only read from db. +type OldMapResponder struct { + conn db.Conn + getProofLimiter chan struct{} + smt *trie.Trie + signedTreeHead []byte + rsaKeyPair *rsa.PrivateKey +} + +// NewOldMapResponder: return a new responder +func NewOldMapResponder(ctx context.Context, root []byte, cacheHeight int, mapServerConfigPath string) (*OldMapResponder, error) { + // new db connection for SMT + conn, err := db.Connect(nil) + if err != nil { + return nil, fmt.Errorf("NewMapResponder | Connect | %w", err) + } + + smt, err := trie.NewTrie(root, common.SHA256Hash, conn) + if err != nil { + return nil, fmt.Errorf("NewMapResponder | NewTrie | %w", err) + } + smt.CacheHeightLimit = cacheHeight + + // load cache + err = smt.LoadCache(ctx, root) + if err != nil { + return nil, fmt.Errorf("NewMapResponder | LoadCache | %w", err) + } + + mapServer := newMapResponder(conn, smt) + + err = mapServer.loadPrivKeyAndSignTreeHead(mapServerConfigPath) + if err != nil { + return nil, fmt.Errorf("NewMapResponder | loadPrivKey | %w", err) + } + + return mapServer, nil +} + +func (r *OldMapResponder) loadPrivKeyAndSignTreeHead(mapServerConfigPath string) error { + config := &MapserverConfig{} + err := ReadConfigFromFile(config, mapServerConfigPath) + if err != nil { + return fmt.Errorf("ReadConfigFromFile | %w", err) + } + + keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + if err != nil { + return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) + } + + r.rsaKeyPair = keyPair + + signature, err := common.SignStructRSASHA256(r.smt.Root, keyPair) + if err != nil { + return fmt.Errorf("SignStructRSASHA256 | %w", err) + } + + r.signedTreeHead = signature + + return nil +} + +func (r *OldMapResponder) GetSignTreeHead() []byte { + return r.signedTreeHead +} + +func newMapResponder(conn db.Conn, smt *trie.Trie) *OldMapResponder { + return &OldMapResponder{ + conn: conn, + getProofLimiter: make(chan struct{}, 64), // limit getProof to 64 concurrent routines + smt: smt, + } +} + +// GetProof: get proofs for one domain +func (r *OldMapResponder) GetProof(ctx context.Context, domainName string) ([]mapCommon.MapServerResponse, error) { + r.getProofLimiter <- struct{}{} + defer func() { <-r.getProofLimiter }() + return r.getProof(ctx, domainName) +} + +// GetRoot: get current root of the smt +func (mapResponder *OldMapResponder) GetRoot() []byte { + return mapResponder.smt.Root +} + +// Close: close db +func (mapResponder *OldMapResponder) Close() error { + err := mapResponder.conn.Close() + if err != nil { + return err + } + return mapResponder.smt.Close() +} + +func (r *OldMapResponder) getProof(ctx context.Context, domainName string) ( + []mapCommon.MapServerResponse, error) { + + // check domain name first + domainList, err := domain.ParseDomainName(domainName) + if err != nil { + if err == domain.ErrInvalidDomainName { + return nil, err + } + return nil, fmt.Errorf("GetDomainProof | parseDomainName | %w", err) + } + proofsResult := make([]mapCommon.MapServerResponse, 0, len(domainList)) + + for _, domain := range domainList { + domainHash := common.SHA256Hash32Bytes([]byte(domain)) + + proof, isPoP, proofKey, ProofValue, err := r.smt.MerkleProof(ctx, domainHash[:]) + if err != nil { + return nil, fmt.Errorf("getDomainProof | MerkleProof | %w", err) + } + + var proofType mapCommon.ProofType + domainBytes := []byte{} + // If it is PoP, query the domain entry. If it is PoA, directly return the PoA + if isPoP { + proofType = mapCommon.PoP + domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) + if err != nil { + return nil, fmt.Errorf("GetDomainProof | %w", err) + } + } else { + proofType = mapCommon.PoA + } + + proofsResult = append(proofsResult, mapCommon.MapServerResponse{ + Domain: domain, + PoI: mapCommon.PoI{ + Proof: proof, + Root: r.smt.Root, + ProofType: proofType, + ProofKey: proofKey, + ProofValue: ProofValue}, + DomainEntryBytes: domainBytes, + TreeHeadSig: r.signedTreeHead, + }) + } + return proofsResult, nil +} + +func (mapResponder *OldMapResponder) GetDomainProofs(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, error) { + domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) + if err != nil { + return nil, fmt.Errorf("GetDomainProofs | getMapping | %w", err) + } + + domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) + if err != nil { + return nil, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) + } + + result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) + if err != nil { + return nil, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) + } + for _, keyValuePair := range result { + domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value + } + + return domainResultMap, nil +} + +func getMapping(domainNames []string, signedTreeHead []byte) (map[string][]*mapCommon.MapServerResponse, map[common.SHA256Output]*mapCommon.MapServerResponse, error) { + domainResultMap := make(map[string][]*mapCommon.MapServerResponse) + domainProofMap := make(map[common.SHA256Output]*mapCommon.MapServerResponse) + + for _, domainName := range domainNames { + _, ok := domainResultMap[domainName] + if !ok { + // list of proofs for this domain + resultsList := []*mapCommon.MapServerResponse{} + subDomainNames, err := domain.ParseDomainName(domainName) + + if err != nil { + return nil, nil, fmt.Errorf("getMapping | parseDomainName | %w", err) + } + for _, subDomainName := range subDomainNames { + var domainHash32Bytes common.SHA256Output + copy(domainHash32Bytes[:], common.SHA256Hash([]byte(subDomainName))) + subDomainResult, ok := domainProofMap[domainHash32Bytes] + if ok { + resultsList = append(resultsList, subDomainResult) + } else { + domainProofMap[domainHash32Bytes] = &mapCommon.MapServerResponse{Domain: subDomainName, TreeHeadSig: signedTreeHead} + resultsList = append(resultsList, domainProofMap[domainHash32Bytes]) + } + } + domainResultMap[domainName] = resultsList + } + } + return domainResultMap, domainProofMap, nil +} + +func (mapResponder *OldMapResponder) getProofFromSMT(ctx context.Context, + domainMap map[common.SHA256Output]*mapCommon.MapServerResponse, +) ([]*common.SHA256Output, error) { + + domainNameToFetchFromDB := []*common.SHA256Output{} + for key, value := range domainMap { + proof, isPoP, proofKey, ProofValue, err := mapResponder.smt.MerkleProof(ctx, key[:]) + if err != nil { + return nil, fmt.Errorf("getProofFromSMT | MerkleProof | %w", err) + } + + value.PoI = mapCommon.PoI{Proof: proof, ProofKey: proofKey, ProofValue: ProofValue, Root: mapResponder.smt.Root} + + switch { + case isPoP: + value.PoI.ProofType = mapCommon.PoP + domainNameToFetchFromDB = append(domainNameToFetchFromDB, &key) + + case !isPoP: + value.PoI.ProofType = mapCommon.PoA + } + } + return domainNameToFetchFromDB, nil +} + +// repeatStmt returns ( (?,..inner..,?), ...outer... ) +func repeatStmt(outer int, inner int) string { + components := make([]string, inner) + for i := 0; i < len(components); i++ { + components[i] = "?" + } + toRepeat := "(" + strings.Join(components, ",") + ")" + return strings.Repeat(toRepeat+",", outer-1) + toRepeat +} diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/old_responder_test.go similarity index 98% rename from pkg/mapserver/responder/responder_test.go rename to pkg/mapserver/responder/old_responder_test.go index ad58bc7e..c2029b8b 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -90,7 +90,7 @@ func TestResponderWithPoP(t *testing.T) { require.Len(t, certs, count) // create responder and request proof for those names - responder, err := NewMapResponder(ctx, root, 233, "./testdata/mapserver_config.json") + responder, err := NewOldMapResponder(ctx, root, 233, "./testdata/mapserver_config.json") require.NoError(t, err) for _, cert := range certs { responses, err := responder.GetProof(ctx, cert.Subject.CommonName) @@ -136,7 +136,7 @@ func TestGetDomainProof(t *testing.T) { } // getMockResponder builds a mock responder. -func getMockResponder(t require.TestingT, certs []*x509.Certificate) *MapResponder { +func getMockResponder(t require.TestingT, certs []*x509.Certificate) *OldMapResponder { // update the certs, and get the mock db of SMT and db conn, root, err := getUpdatedUpdater(t, certs) require.NoError(t, err) diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index a6517a19..a69dd9be 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -2,9 +2,7 @@ package responder import ( "context" - "crypto/rsa" "fmt" - "strings" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" @@ -13,232 +11,70 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) -// MapResponder: A map responder, which is responsible for receiving client's request. Only read from db. type MapResponder struct { - conn db.Conn - getProofLimiter chan struct{} - smt *trie.Trie - signedTreeHead []byte - rsaKeyPair *rsa.PrivateKey + conn db.Conn + smt *trie.Trie } -// NewMapResponder: return a new responder -func NewMapResponder(ctx context.Context, root []byte, cacheHeight int, mapServerConfigPath string) (*MapResponder, error) { - // new db connection for SMT - conn, err := db.Connect(nil) +func NewMapResponder(ctx context.Context, conn db.Conn) (*MapResponder, error) { + // Load root. + root, err := conn.LoadRoot(ctx) if err != nil { - return nil, fmt.Errorf("NewMapResponder | Connect | %w", err) + return nil, err } - smt, err := trie.NewTrie(root, common.SHA256Hash, conn) + // Build the Sparse Merkle Tree (SMT). + smt, err := trie.NewTrie(root[:], common.SHA256Hash, conn) if err != nil { - return nil, fmt.Errorf("NewMapResponder | NewTrie | %w", err) + return nil, fmt.Errorf("error loading SMT: %w", err) } - smt.CacheHeightLimit = cacheHeight - // load cache - err = smt.LoadCache(ctx, root) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | LoadCache | %w", err) - } - - mapServer := newMapResponder(conn, smt) - - err = mapServer.loadPrivKeyAndSignTreeHead(mapServerConfigPath) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | loadPrivKey | %w", err) + r := &MapResponder{ + conn: conn, + smt: smt, } - - return mapServer, nil + return r, nil } -func (r *MapResponder) loadPrivKeyAndSignTreeHead(mapServerConfigPath string) error { - config := &MapserverConfig{} - err := ReadConfigFromFile(config, mapServerConfigPath) - if err != nil { - return fmt.Errorf("ReadConfigFromFile | %w", err) - } +func (r *MapResponder) GetProof(ctx context.Context, domainName string, +) ([]*mapCommon.MapServerResponse, error) { - keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + // Parse the domain name. + domainParts, err := domain.ParseDomainName(domainName) if err != nil { - return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) - } - - r.rsaKeyPair = keyPair - - signature, err := common.SignStructRSASHA256(r.smt.Root, keyPair) - if err != nil { - return fmt.Errorf("SignStructRSASHA256 | %w", err) - } - - r.signedTreeHead = signature - - return nil -} - -func (r *MapResponder) GetSignTreeHead() []byte { - return r.signedTreeHead -} - -func newMapResponder(conn db.Conn, smt *trie.Trie) *MapResponder { - return &MapResponder{ - conn: conn, - getProofLimiter: make(chan struct{}, 64), // limit getProof to 64 concurrent routines - smt: smt, - } -} - -// GetProof: get proofs for one domain -func (r *MapResponder) GetProof(ctx context.Context, domainName string) ([]mapCommon.MapServerResponse, error) { - r.getProofLimiter <- struct{}{} - defer func() { <-r.getProofLimiter }() - return r.getProof(ctx, domainName) -} - -// GetRoot: get current root of the smt -func (mapResponder *MapResponder) GetRoot() []byte { - return mapResponder.smt.Root -} - -// Close: close db -func (mapResponder *MapResponder) Close() error { - err := mapResponder.conn.Close() - if err != nil { - return err - } - return mapResponder.smt.Close() -} - -func (r *MapResponder) getProof(ctx context.Context, domainName string) ( - []mapCommon.MapServerResponse, error) { - - // check domain name first - domainList, err := domain.ParseDomainName(domainName) - if err != nil { - if err == domain.ErrInvalidDomainName { - return nil, err - } - return nil, fmt.Errorf("GetDomainProof | parseDomainName | %w", err) + return nil, err } - proofsResult := make([]mapCommon.MapServerResponse, 0, len(domainList)) - for _, domain := range domainList { - domainHash := common.SHA256Hash32Bytes([]byte(domain)) - - proof, isPoP, proofKey, ProofValue, err := r.smt.MerkleProof(ctx, domainHash[:]) + // Prepare proof with the help of the SMT. + proofList := make([]*mapCommon.MapServerResponse, len(domainParts)) + for i, domainPart := range domainParts { + hash := common.SHA256Hash32Bytes([]byte(domainPart)) + proof, isPoP, proofKey, proofValue, err := r.smt.MerkleProof(ctx, hash[:]) if err != nil { - return nil, fmt.Errorf("getDomainProof | MerkleProof | %w", err) + return nil, fmt.Errorf("error obtaining Merkle proof for %s: %w", + domainPart, err) } - var proofType mapCommon.ProofType - domainBytes := []byte{} - // If it is PoP, query the domain entry. If it is PoA, directly return the PoA + // If it is a proof of presence, obtain the payload. + var payload []byte + proofType := mapCommon.PoA if isPoP { proofType = mapCommon.PoP - domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) - if err != nil { - return nil, fmt.Errorf("GetDomainProof | %w", err) - } - } else { - proofType = mapCommon.PoA + _ = payload } - proofsResult = append(proofsResult, mapCommon.MapServerResponse{ - Domain: domain, + proofList[i] = &mapCommon.MapServerResponse{ + Domain: domainPart, PoI: mapCommon.PoI{ + ProofType: proofType, Proof: proof, Root: r.smt.Root, - ProofType: proofType, ProofKey: proofKey, - ProofValue: ProofValue}, - DomainEntryBytes: domainBytes, - TreeHeadSig: r.signedTreeHead, - }) - } - return proofsResult, nil -} - -func (mapResponder *MapResponder) GetDomainProofs(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, error) { - domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | getMapping | %w", err) - } - - domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) - } - - result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) - } - for _, keyValuePair := range result { - domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value - } - - return domainResultMap, nil -} - -func getMapping(domainNames []string, signedTreeHead []byte) (map[string][]*mapCommon.MapServerResponse, map[common.SHA256Output]*mapCommon.MapServerResponse, error) { - domainResultMap := make(map[string][]*mapCommon.MapServerResponse) - domainProofMap := make(map[common.SHA256Output]*mapCommon.MapServerResponse) - - for _, domainName := range domainNames { - _, ok := domainResultMap[domainName] - if !ok { - // list of proofs for this domain - resultsList := []*mapCommon.MapServerResponse{} - subDomainNames, err := domain.ParseDomainName(domainName) - - if err != nil { - return nil, nil, fmt.Errorf("getMapping | parseDomainName | %w", err) - } - for _, subDomainName := range subDomainNames { - var domainHash32Bytes common.SHA256Output - copy(domainHash32Bytes[:], common.SHA256Hash([]byte(subDomainName))) - subDomainResult, ok := domainProofMap[domainHash32Bytes] - if ok { - resultsList = append(resultsList, subDomainResult) - } else { - domainProofMap[domainHash32Bytes] = &mapCommon.MapServerResponse{Domain: subDomainName, TreeHeadSig: signedTreeHead} - resultsList = append(resultsList, domainProofMap[domainHash32Bytes]) - } - } - domainResultMap[domainName] = resultsList - } - } - return domainResultMap, domainProofMap, nil -} - -func (mapResponder *MapResponder) getProofFromSMT(ctx context.Context, domainMap map[common.SHA256Output]*mapCommon.MapServerResponse) ([]common.SHA256Output, error) { - domainNameToFetchFromDB := []common.SHA256Output{} - for key, value := range domainMap { - proof, isPoP, proofKey, ProofValue, err := mapResponder.smt.MerkleProof(ctx, key[:]) - if err != nil { - return nil, fmt.Errorf("getProofFromSMT | MerkleProof | %w", err) - } - - value.PoI = mapCommon.PoI{Proof: proof, ProofKey: proofKey, ProofValue: ProofValue, Root: mapResponder.smt.Root} - - switch { - case isPoP: - value.PoI.ProofType = mapCommon.PoP - domainNameToFetchFromDB = append(domainNameToFetchFromDB, key) - - case !isPoP: - value.PoI.ProofType = mapCommon.PoA + ProofValue: proofValue, + }, + DomainEntryBytes: payload, + // TreeHeadSig: , } } - return domainNameToFetchFromDB, nil -} - -// repeatStmt returns ( (?,..inner..,?), ...outer... ) -func repeatStmt(outer int, inner int) string { - components := make([]string, inner) - for i := 0; i < len(components); i++ { - components[i] = "?" - } - toRepeat := "(" + strings.Join(components, ",") + ")" - return strings.Repeat(toRepeat+",", outer-1) + toRepeat + return proofList, nil } diff --git a/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go b/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go index 8ede22dc..5dd0f3dd 100644 --- a/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go +++ b/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go @@ -35,7 +35,7 @@ func main() { if err != nil { panic(err) } - responder, err := responder.NewMapResponder(ctx, root, 233, "./config/mapserver_config.json") + responder, err := responder.NewOldMapResponder(ctx, root, 233, "./config/mapserver_config.json") if err != nil { panic(err) } diff --git a/tests/integration/db/db.go b/tests/integration/db/db.go index dc5dc165..b862e2a3 100644 --- a/tests/integration/db/db.go +++ b/tests/integration/db/db.go @@ -275,12 +275,12 @@ func testDomainEntriesTable() { // check if value is correctly inserted // RetrieveDomainEntry() // ***************************************************************** - keys := getKeys(1511, 4555) + keys := getKeyPtrs(1511, 4555) prevKeySize := len(keys) result := make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, key) + value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -289,7 +289,7 @@ func testDomainEntriesTable() { if !bytes.Equal(value, []byte("hi this is a test")) { panic("Domain entries Table Read test 1: Stored value is not correct") } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) + result = append(result, &db.KeyValuePair{Key: *key, Value: value}) } } @@ -298,11 +298,11 @@ func testDomainEntriesTable() { } // query a larger range - keys = getKeys(1011, 5555) + keys = getKeyPtrs(1011, 5555) result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, key) + value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -311,7 +311,7 @@ func testDomainEntriesTable() { if !bytes.Equal(value, []byte("hi this is a test")) { panic("Domain entries Table Read test 2: Stored value is not correct") } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) + result = append(result, &db.KeyValuePair{Key: *key, Value: value}) } } @@ -341,16 +341,16 @@ func testDomainEntriesTable() { // ***************************************************************** // read empty keys // ***************************************************************** - keys = getKeys(11511, 14555) + keys = getKeyPtrs(11511, 14555) result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, key) + value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } if value != nil { - result = append(result, &db.KeyValuePair{Key: key, Value: value}) + result = append(result, &db.KeyValuePair{Key: *key, Value: value}) } } @@ -471,6 +471,15 @@ func getKeys(startIdx, endIdx int) []common.SHA256Output { return result } +func getKeyPtrs(startIdx, endIdx int) []*common.SHA256Output { + result := []*common.SHA256Output{} + for i := startIdx; i <= endIdx; i++ { + keyHash := common.SHA256Hash32Bytes([]byte(strconv.Itoa(i))) + result = append(result, (*common.SHA256Output)(&keyHash)) + } + return result +} + func clearTable() { db, err := sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/fpki?maxAllowedPacket=1073741824") if err != nil { diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index eada28ae..6fa0e93d 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -1,234 +1,51 @@ package main import ( - "bytes" "context" - "database/sql" - "encoding/base64" - "encoding/json" + "encoding/hex" "fmt" - "net/http" - "strings" - "sync" + "os" + "time" - ct "github.com/google/certificate-transparency-go" - ctTls "github.com/google/certificate-transparency-go/tls" - ctX509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/domain" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/prover" + "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - - "time" ) -var wg sync.WaitGroup - -type result struct { - Proofs [][]mapCommon.MapServerResponse - Err error -} - -// "https://ct.googleapis.com/logs/argon2021" - -// TestUpdaterAndResponder: store a list of domain entries -> fetch inclusion -> verify inclusion func main() { - // truncate tables - db, err := sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/fpki?maxAllowedPacket=1073741824") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE domainEntries;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE updates;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE tree;") - if err != nil { - panic(err) - } - - // new map updator - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } + os.Exit(mainFunc()) +} +func mainFunc() int { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - //start := time.Now() - // download the certs and update the domain entries - mapUpdater.Fetcher.BatchSize = 10000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", 1120000, 1120999) - _, err = mapUpdater.UpdateNextBatch(ctx) - if err != nil { - panic(err) - } - - //end := time.Now() - //fmt.Println("time to get 10000 certs: ", end.Sub(start)) - - //start = time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - //end = time.Now() - //fmt.Println("time to commit changes: ", end.Sub(start)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - // get a new responder, and load an existing tree - mapResponder, err := responder.NewMapResponder(ctx, root, 233, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - // re-collect the added certs - collectedCertMap := []ctX509.Certificate{} - for i := 0; i < 50; i++ { - certList, err := getCerts("https://ct.googleapis.com/logs/argon2021", int64(1120000+i*20), int64(1120000+i*20+19)) - //fmt.Println("downloading : ", int64(1120000+i*20), " - ", int64(1120000+i*20+19)) - if err != nil { - panic(err) - } - collectedCertMap = append(collectedCertMap, certList...) - } - - numberOfWorker := 15 - wg.Add(numberOfWorker) - step := len(collectedCertMap) / numberOfWorker - - for i := 0; i < numberOfWorker; i++ { - worker(ctx, collectedCertMap[i*step:i*step+step-1], mapResponder) - } - - wg.Wait() - - fmt.Println("map server succeed!") -} - -func worker(ctx context.Context, certs []ctX509.Certificate, mapResponder *responder.MapResponder) { - for _, cert := range certs { - if cert.Subject.CommonName != "" { - proofs, err := mapResponder.GetProof(ctx, cert.Subject.CommonName) - if err != nil { - if err == domain.ErrInvalidDomainName { - continue - } - panic(err) - } - if !checkProof(cert, proofs) { - panic("certs not found") - } - } - } - wg.Done() + // Create an empty test DB + // Connect to the test DB + // Connect to DB via local socket, should be faster. + config := db.ConfigFromEnvironment() + // config.Dsn = "root@unix(/var/run/mysqld/mysqld.sock)/fpki" + conn, err := db.Connect(config) + panicIfError(err) + + root, err := conn.LoadRoot(ctx) + panicIfError(err) + fmt.Printf("root is %s\n", hex.EncodeToString((*root)[:])) + + // Ingest mock data + + // Retrieve some domains + res, err := responder.NewMapResponder(ctx, conn) + panicIfError(err) + p, err := res.GetProof(ctx, "aname.com") + panicIfError(err) + _ = p + + // Compare results + return 0 } -func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) bool { - caName := cert.Issuer.String() - for _, proof := range proofs { - if !strings.Contains(cert.Subject.CommonName, proof.Domain) { - panic("wrong domain proofs") - } - proofType, isCorrect, err := prover.VerifyProofByDomain(proof) - if err != nil { - panic(err) - } - - if !isCorrect { - panic("wrong proof") - } - - if proofType == mapCommon.PoA { - if len(proof.DomainEntryBytes) != 0 { - panic("domain entry bytes not empty for PoA") - } - } - if proofType == mapCommon.PoP { - domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - panic(err) - } - // get the correct CA entry - for _, caEntry := range domainEntry.CAEntry { - if caEntry.CAName == caName { - // check if the cert is in the CA entry - for _, certRaw := range caEntry.DomainCerts { - if bytes.Equal(certRaw, cert.Raw) { - return true - } - } - } - } - } - } - return false -} - -// CertData: merkle tree leaf -type CertData struct { - LeafInput string `json:"leaf_input"` - ExtraData string `json:"extra_data"` -} - -// CertLog: Data from CT log -type CertLog struct { - Entries []CertData -} - -// copy of function from logpicker_worker.go -func getCerts(ctURL string, start int64, end int64) ([]ctX509.Certificate, error) { - url := fmt.Sprintf(ctURL+"/ct/v1/get-entries?start=%d&end=%d"", start, end) - resp, err := http.Get(url) +func panicIfError(err error) { if err != nil { - return nil, fmt.Errorf("http.Get %w", err) - } - - buf := new(bytes.Buffer) - buf.ReadFrom(resp.Body) - - var resultsCerLog CertLog - json.Unmarshal(buf.Bytes(), &resultsCerLog) - - certList := []ctX509.Certificate{} - - // parse merkle leaves and append it to the result -parse_cert_loop: - for _, entry := range resultsCerLog.Entries { - leafBytes, _ := base64.RawStdEncoding.DecodeString(entry.LeafInput) - var merkleLeaf ct.MerkleTreeLeaf - ctTls.Unmarshal(leafBytes, &merkleLeaf) - - var certificate *ctX509.Certificate - switch entryType := merkleLeaf.TimestampedEntry.EntryType; entryType { - case ct.X509LogEntryType: - certificate, err = ctX509.ParseCertificate(merkleLeaf.TimestampedEntry.X509Entry.Data) - if err != nil { - fmt.Println("ERROR: ParseCertificate ", err) - continue parse_cert_loop - } - case ct.PrecertLogEntryType: - certificate, err = ctX509.ParseTBSCertificate(merkleLeaf.TimestampedEntry.PrecertEntry.TBSCertificate) - if err != nil { - fmt.Println("ERROR: ParseTBSCertificate ", err) - continue parse_cert_loop - } - } - certList = append(certList, *certificate) + panic(err) } - return certList, nil } diff --git a/tests/integration/old_mapserver/main.go b/tests/integration/old_mapserver/main.go new file mode 100644 index 00000000..08ef70a1 --- /dev/null +++ b/tests/integration/old_mapserver/main.go @@ -0,0 +1,234 @@ +package main + +import ( + "bytes" + "context" + "database/sql" + "encoding/base64" + "encoding/json" + "fmt" + "net/http" + "strings" + "sync" + + ct "github.com/google/certificate-transparency-go" + ctTls "github.com/google/certificate-transparency-go/tls" + ctX509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/domain" + mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/mapserver/prover" + "github.com/netsec-ethz/fpki/pkg/mapserver/responder" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + + "time" +) + +var wg sync.WaitGroup + +type result struct { + Proofs [][]mapCommon.MapServerResponse + Err error +} + +// "https://ct.googleapis.com/logs/argon2021" + +// TestUpdaterAndResponder: store a list of domain entries -> fetch inclusion -> verify inclusion +func main() { + // truncate tables + db, err := sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/fpki?maxAllowedPacket=1073741824") + if err != nil { + panic(err) + } + + _, err = db.Exec("TRUNCATE domainEntries;") + if err != nil { + panic(err) + } + + _, err = db.Exec("TRUNCATE updates;") + if err != nil { + panic(err) + } + + _, err = db.Exec("TRUNCATE tree;") + if err != nil { + panic(err) + } + + // new map updator + mapUpdater, err := updater.NewMapUpdater(nil, 233) + if err != nil { + panic(err) + } + + ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) + defer cancelF() + + //start := time.Now() + // download the certs and update the domain entries + mapUpdater.Fetcher.BatchSize = 10000 + mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", 1120000, 1120999) + _, err = mapUpdater.UpdateNextBatch(ctx) + if err != nil { + panic(err) + } + + //end := time.Now() + //fmt.Println("time to get 10000 certs: ", end.Sub(start)) + + //start = time.Now() + err = mapUpdater.CommitSMTChanges(ctx) + if err != nil { + panic(err) + } + //end = time.Now() + //fmt.Println("time to commit changes: ", end.Sub(start)) + + root := mapUpdater.GetRoot() + err = mapUpdater.Close() + if err != nil { + panic(err) + } + + // get a new responder, and load an existing tree + mapResponder, err := responder.NewOldMapResponder(ctx, root, 233, "./config/mapserver_config.json") + if err != nil { + panic(err) + } + + // re-collect the added certs + collectedCertList := []ctX509.Certificate{} + for i := 0; i < 50; i++ { + certList, err := getCerts("https://ct.googleapis.com/logs/argon2021", int64(1120000+i*20), int64(1120000+i*20+19)) + //fmt.Println("downloading : ", int64(1120000+i*20), " - ", int64(1120000+i*20+19)) + if err != nil { + panic(err) + } + collectedCertList = append(collectedCertList, certList...) + } + + numberOfWorker := 15 + wg.Add(numberOfWorker) + step := len(collectedCertList) / numberOfWorker + + for i := 0; i < numberOfWorker; i++ { + worker(ctx, collectedCertList[i*step:i*step+step-1], mapResponder) + } + + wg.Wait() + + fmt.Println("map server succeed!") +} + +func worker(ctx context.Context, certs []ctX509.Certificate, mapResponder *responder.OldMapResponder) { + for _, cert := range certs { + if cert.Subject.CommonName != "" { + proofs, err := mapResponder.GetProof(ctx, cert.Subject.CommonName) + if err != nil { + if err == domain.ErrInvalidDomainName { + continue + } + panic(err) + } + if !checkProof(cert, proofs) { + panic("certs not found") + } + } + } + wg.Done() +} + +func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) bool { + caName := cert.Issuer.String() + for _, proof := range proofs { + if !strings.Contains(cert.Subject.CommonName, proof.Domain) { + panic("wrong domain proofs") + } + proofType, isCorrect, err := prover.VerifyProofByDomain(proof) + if err != nil { + panic(err) + } + + if !isCorrect { + panic("wrong proof") + } + + if proofType == mapCommon.PoA { + if len(proof.DomainEntryBytes) != 0 { + panic("domain entry bytes not empty for PoA") + } + } + if proofType == mapCommon.PoP { + domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) + if err != nil { + panic(err) + } + // get the correct CA entry + for _, caEntry := range domainEntry.CAEntry { + if caEntry.CAName == caName { + // check if the cert is in the CA entry + for _, certRaw := range caEntry.DomainCerts { + if bytes.Equal(certRaw, cert.Raw) { + return true + } + } + } + } + } + } + return false +} + +// CertData: merkle tree leaf +type CertData struct { + LeafInput string `json:"leaf_input"` + ExtraData string `json:"extra_data"` +} + +// CertLog: Data from CT log +type CertLog struct { + Entries []CertData +} + +// copy of function from logpicker_worker.go +func getCerts(ctURL string, start int64, end int64) ([]ctX509.Certificate, error) { + url := fmt.Sprintf(ctURL+"/ct/v1/get-entries?start=%d&end=%d"", start, end) + resp, err := http.Get(url) + if err != nil { + return nil, fmt.Errorf("http.Get %w", err) + } + + buf := new(bytes.Buffer) + buf.ReadFrom(resp.Body) + + var resultsCerLog CertLog + json.Unmarshal(buf.Bytes(), &resultsCerLog) + + certList := []ctX509.Certificate{} + + // parse merkle leaves and append it to the result +parse_cert_loop: + for _, entry := range resultsCerLog.Entries { + leafBytes, _ := base64.RawStdEncoding.DecodeString(entry.LeafInput) + var merkleLeaf ct.MerkleTreeLeaf + ctTls.Unmarshal(leafBytes, &merkleLeaf) + + var certificate *ctX509.Certificate + switch entryType := merkleLeaf.TimestampedEntry.EntryType; entryType { + case ct.X509LogEntryType: + certificate, err = ctX509.ParseCertificate(merkleLeaf.TimestampedEntry.X509Entry.Data) + if err != nil { + fmt.Println("ERROR: ParseCertificate ", err) + continue parse_cert_loop + } + case ct.PrecertLogEntryType: + certificate, err = ctX509.ParseTBSCertificate(merkleLeaf.TimestampedEntry.PrecertEntry.TBSCertificate) + if err != nil { + fmt.Println("ERROR: ParseTBSCertificate ", err) + continue parse_cert_loop + } + } + certList = append(certList, *certificate) + } + return certList, nil +} From dc8936cdd560755768cde61869d114a3bed608b6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 10:56:59 +0100 Subject: [PATCH 050/187] Load root in the ingester. --- cmd/ingest/main.go | 11 +++++++++-- cmd/ingest/smtUpdater.go | 8 ++++++-- pkg/db/db.go | 2 +- pkg/db/mysql.go | 3 +++ pkg/db/read.go | 4 ++++ 5 files changed, 23 insertions(+), 5 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index d33ec301..321ce9a2 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -106,6 +106,14 @@ func mainFunction() int { conn, err := db.Connect(config) exitIfError(err) + // Load root if any: + root, err := conn.LoadRoot(ctx) + exitIfError(err) + if root == nil { + fmt.Print("Empty root!!. DB should be empty, but not checking.\n\n") + // TODO(juagargi) check that DB is empty if root is empty. + } + if !coalesceOnly { // All GZ and CSV files found under the directory of the argument. gzFiles, csvFiles := listOurFiles(flag.Arg(0)) @@ -121,8 +129,7 @@ func mainFunction() int { CoalescePayloadsForDirtyDomains(ctx, conn) // Now start processing the changed domains into the SMT: - // conn.LoadRoot deleteme TODO load root - smtProcessor := NewSMTUpdater(conn, nil, 32) + smtProcessor := NewSMTUpdater(conn, root, 32) smtProcessor.Start(ctx) err = smtProcessor.Wait() exitIfError(err) diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go index 02727201..737b933a 100644 --- a/cmd/ingest/smtUpdater.go +++ b/cmd/ingest/smtUpdater.go @@ -18,8 +18,12 @@ type SMTUpdater struct { doneCh chan error // Will have just one entry when all the processing is done } -func NewSMTUpdater(conn db.Conn, root []byte, cacheHeight int) *SMTUpdater { - smtTrie, err := trie.NewTrie(root, common.SHA256Hash, conn) +func NewSMTUpdater(conn db.Conn, root *common.SHA256Output, cacheHeight int) *SMTUpdater { + var rootSlice []byte + if root != nil { + rootSlice = (*root)[:] + } + smtTrie, err := trie.NewTrie(rootSlice, common.SHA256Hash, conn) if err != nil { panic(err) } diff --git a/pkg/db/db.go b/pkg/db/db.go index e69074d7..dbad6589 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -27,6 +27,7 @@ type Conn interface { TruncateAllTables(ctx context.Context) error LoadRoot(ctx context.Context) (*common.SHA256Output, error) + SaveRoot(ctx context.Context, root *common.SHA256Output) error ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // @@ -93,5 +94,4 @@ type Conn interface { // present in the `updates` table. UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) CleanupDirty(ctx context.Context) error - SaveRoot(ctx context.Context, root *common.SHA256Output) error } diff --git a/pkg/db/mysql.go b/pkg/db/mysql.go index bdeaa503..62771abc 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql.go @@ -149,6 +149,9 @@ func (c *mysqlDB) TruncateAllTables(ctx context.Context) error { func (c *mysqlDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { var key []byte if err := c.db.QueryRowContext(ctx, "SELECT key32 FROM root").Scan(&key); err != nil { + if err == sql.ErrNoRows { + return nil, nil + } return nil, fmt.Errorf("error obtaining the root entry: %w", err) } return (*common.SHA256Output)(key), nil diff --git a/pkg/db/read.go b/pkg/db/read.go index 1186092b..43a13699 100644 --- a/pkg/db/read.go +++ b/pkg/db/read.go @@ -68,6 +68,9 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, keys []*common.SHA2 func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, ) ([]*KeyValuePair, error) { + if len(domainIDs) == 0 { + return nil, nil + } str := "SELECT id,payload FROM domain_payloads WHERE id IN " + repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { @@ -75,6 +78,7 @@ func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common } rows, err := c.db.QueryContext(ctx, str, params...) if err != nil { + fmt.Printf("Query is: '%s'\n", str) return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) } pairs := make([]*KeyValuePair, 0, len(domainIDs)) From 72b39344a2a15e781913f3c870d2a47178154490 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 11:06:30 +0100 Subject: [PATCH 051/187] Compute the Signed Tree Head in the responder. --- pkg/mapserver/responder/responder.go | 34 +++++++++++++++++++++++++--- tests/integration/mapserver/main.go | 2 +- 2 files changed, 32 insertions(+), 4 deletions(-) diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index a69dd9be..3bb36d27 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -12,11 +12,12 @@ import ( ) type MapResponder struct { - conn db.Conn - smt *trie.Trie + conn db.Conn + smt *trie.Trie + signedTreeHead []byte } -func NewMapResponder(ctx context.Context, conn db.Conn) (*MapResponder, error) { +func NewMapResponder(ctx context.Context, configFile string, conn db.Conn) (*MapResponder, error) { // Load root. root, err := conn.LoadRoot(ctx) if err != nil { @@ -33,6 +34,7 @@ func NewMapResponder(ctx context.Context, conn db.Conn) (*MapResponder, error) { conn: conn, smt: smt, } + r.signTreeHead(configFile) return r, nil } @@ -78,3 +80,29 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, } return proofList, nil } + +func (r *MapResponder) signTreeHead(configFile string) error { + // Load configuration. + config := &MapserverConfig{} + err := ReadConfigFromFile(config, configFile) + if err != nil { + return fmt.Errorf("ReadConfigFromFile | %w", err) + } + + // Load private key from configuration. + keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + if err != nil { + return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) + } + + // Sign the tree head. + signature, err := common.SignStructRSASHA256(r.smt.Root, keyPair) + if err != nil { + return fmt.Errorf("SignStructRSASHA256 | %w", err) + } + + // Keep it for the proofs. + r.signedTreeHead = signature + + return nil +} diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 6fa0e93d..87e0b047 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -34,7 +34,7 @@ func mainFunc() int { // Ingest mock data // Retrieve some domains - res, err := responder.NewMapResponder(ctx, conn) + res, err := responder.NewMapResponder(ctx, "./config/mapserver_config.json", conn) panicIfError(err) p, err := res.GetProof(ctx, "aname.com") panicIfError(err) From 46a6014fbae6368199b46a9b62a6b9b020e3dfa2 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 12:41:18 +0100 Subject: [PATCH 052/187] Make the create_schema.sh script more modular. --- tools/create_schema.sh | 113 ++++++++++++++++++++++------------------- 1 file changed, 62 insertions(+), 51 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index a80aefa1..0fff56ee 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -1,31 +1,25 @@ #!/bin/bash -echo "This will destroy everything in the fpki database" +create_new_db() { + set -e -read -p "Are you sure? (y/n) default=n " answer -case ${answer:0:1} in - y|Y ) - ;; - * ) - exit 1 - ;; -esac + DBNAME=$1 + MYSQLCMD="mysql -u root" -set -e # after call to read MYSQLCMD="mysql -u root" CMD=$(cat < Date: Mon, 20 Mar 2023 14:25:03 +0100 Subject: [PATCH 053/187] Allow the creation of DBs in tests. --- tests/integration/mapserver/main.go | 5 +++++ tests/pkg/db/testDB.go | 31 +++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100644 tests/pkg/db/testDB.go diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 87e0b047..4c8b1fe6 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -9,6 +9,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" + testdb "github.com/netsec-ethz/fpki/tests/pkg/db" ) func main() { @@ -20,6 +21,10 @@ func mainFunc() int { defer cancelF() // Create an empty test DB + dbName := "testo" + err := testdb.CreateTestDB(ctx, dbName) + panicIfError(err) + // Connect to the test DB // Connect to DB via local socket, should be faster. config := db.ConfigFromEnvironment() diff --git a/tests/pkg/db/testDB.go b/tests/pkg/db/testDB.go new file mode 100644 index 00000000..bb9d6d14 --- /dev/null +++ b/tests/pkg/db/testDB.go @@ -0,0 +1,31 @@ +package db + +import ( + "context" + "fmt" + "os" + "os/exec" +) + +// CreateTestDB creates a new and ready test DB with the same structure as the F-PKI one. +func CreateTestDB(ctx context.Context, dbName string) error { + // Import the tools/create_script.sh in a bash session and run its function. + args := []string{ + // "-c", + // "source", + // "./tools/create_schema.sh", + // "&&", + // "create_new_db", + // dbName, + "-c", + fmt.Sprintf("source ./tools/create_schema.sh && create_new_db %s", dbName), + } + cmd := exec.Command("bash", args...) + out, err := cmd.CombinedOutput() + if err != nil { + fmt.Fprint(os.Stderr, string(out)) + return err + } + + return nil +} From a1a6ce3796d1f54c1ccedad19ac84293dca6a80a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 16:40:30 +0100 Subject: [PATCH 054/187] Refactor db package. --- pkg/db/db.go | 2 - pkg/db/init.go | 62 ----------------- pkg/db/mysql/init.go | 68 +++++++++++++++++++ pkg/db/{ => mysql}/mysql.go | 4 +- pkg/db/{ => mysql}/read.go | 17 ++--- pkg/db/{ => mysql}/read_worker.go | 5 +- pkg/db/{ => mysql}/write.go | 13 ++-- pkg/mapserver/responder/old_responder.go | 3 +- pkg/mapserver/updater/updater.go | 3 +- tests/benchmark/db_benchmark/db.go | 6 +- .../updater_benchmark/main.go | 5 +- .../wholesys_benchmark_PoA/main.go | 6 +- .../wholesys_benchmark_PoP/main.go | 6 +- .../wholesys_benchmark_PoP_diffSize/main.go | 6 +- tests/benchmark/smt_benchmark/main.go | 2 +- tests/integration/db/db.go | 7 +- tests/integration/grpc_test/main.go | 4 +- tests/integration/mapserver/main.go | 3 +- tests/integration/smt/smt.go | 10 +-- tests/pkg/db/testDB.go | 13 ++-- {pkg => tests/pkg}/db/test_utils.go | 42 +++++------- 21 files changed, 149 insertions(+), 138 deletions(-) create mode 100644 pkg/db/mysql/init.go rename pkg/db/{ => mysql}/mysql.go (99%) rename pkg/db/{ => mysql}/read.go (94%) rename pkg/db/{ => mysql}/read_worker.go (93%) rename pkg/db/{ => mysql}/write.go (97%) rename {pkg => tests/pkg}/db/test_utils.go (66%) diff --git a/pkg/db/db.go b/pkg/db/db.go index dbad6589..3a52b54e 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -8,8 +8,6 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" ) -const batchSize = 1000 - // KeyValuePair: key-value pair type KeyValuePair struct { Key common.SHA256Output diff --git a/pkg/db/init.go b/pkg/db/init.go index 7512a2c1..fd7a081a 100644 --- a/pkg/db/init.go +++ b/pkg/db/init.go @@ -1,9 +1,6 @@ package db import ( - "database/sql" - "fmt" - "net/url" "os" _ "github.com/go-sql-driver/mysql" @@ -52,62 +49,3 @@ func ConfigFromEnvironment() *Configuration { }, } } - -// Connect: connect to db, using the config file -func Connect(config *Configuration) (Conn, error) { - if config == nil { - config = ConfigFromEnvironment() - } - - db, err := connect(config) - if err != nil { - return nil, fmt.Errorf("cannot open DB: %w", err) - } - - // Set a very small number of concurrent connections per sql.DB . - // This avoids routines creating connections to the DB and holding vast amounts of - // data (which impact the heap), and forcing to slow down the pipelines until the existing - // DB connections complete their work. - maxConnections := 8 - db.SetMaxOpenConns(maxConnections) - - // check schema - if config.CheckSchema { - if err := checkSchema(db); err != nil { - return nil, fmt.Errorf("checking schema on connection: %w", err) - } - } - return NewMysqlDB(db) -} - -func connect(config *Configuration) (*sql.DB, error) { - dsn, err := url.Parse(config.Dsn) - if err != nil { - return nil, fmt.Errorf("bad connection string: %w", err) - } - uri := dsn.Query() - for k, v := range config.Values { - uri.Add(k, v) - } - dsn.RawQuery = uri.Encode() - return sql.Open("mysql", dsn.String()) -} - -func checkSchema(c *sql.DB) error { - _, err := c.Query("SELECT COUNT(*) FROM nodes") - if err != nil { - return fmt.Errorf("table nodes: %w", err) - } - row := c.QueryRow("SHOW STATUS LIKE 'max_used_connections'") - var varName string - var varValue string - if err = row.Scan(&varName, &varValue); err != nil { - return err - } - fmt.Printf("***************** Init %s : %s\n", varName, varValue) - if _, err = c.Exec("SET GLOBAL max_connections = 1024"); err != nil { - return err - } - fmt.Printf("***************** Init %s : %s\n", varName, varValue) - return nil -} diff --git a/pkg/db/mysql/init.go b/pkg/db/mysql/init.go new file mode 100644 index 00000000..d393699a --- /dev/null +++ b/pkg/db/mysql/init.go @@ -0,0 +1,68 @@ +package mysql + +import ( + "database/sql" + "fmt" + "net/url" + + "github.com/netsec-ethz/fpki/pkg/db" +) + +// Connect: connect to db, using the config file +func Connect(config *db.Configuration) (db.Conn, error) { + if config == nil { + config = db.ConfigFromEnvironment() + } + + db, err := connect(config) + if err != nil { + return nil, fmt.Errorf("cannot open DB: %w", err) + } + + // Set a very small number of concurrent connections per sql.DB . + // This avoids routines creating connections to the DB and holding vast amounts of + // data (which impact the heap), and forcing to slow down the pipelines until the existing + // DB connections complete their work. + maxConnections := 8 + db.SetMaxOpenConns(maxConnections) + + // check schema + if config.CheckSchema { + if err := checkSchema(db); err != nil { + return nil, fmt.Errorf("checking schema on connection: %w", err) + } + } + return NewMysqlDB(db) +} + +func connect(config *db.Configuration) (*sql.DB, error) { + dsn, err := url.Parse(config.Dsn) + if err != nil { + return nil, fmt.Errorf("bad connection string: %w", err) + } + uri := dsn.Query() + for k, v := range config.Values { + uri.Add(k, v) + } + dsn.RawQuery = uri.Encode() + return sql.Open("mysql", dsn.String()) +} + +func checkSchema(c *sql.DB) error { + _, err := c.Query("SELECT COUNT(*) FROM nodes") + if err != nil { + return fmt.Errorf("table nodes: %w", err) + } + row := c.QueryRow("SHOW STATUS LIKE 'max_used_connections'") + var varName string + var varValue string + if err = row.Scan(&varName, &varValue); err != nil { + return err + } + fmt.Printf("***************** Init %s : %s\n", varName, varValue) + if _, err = c.Exec("SET GLOBAL max_connections = 1024"); err != nil { + return err + } + fmt.Printf("***************** Init %s : %s\n", varName, varValue) + return nil +} diff --git a/pkg/db/mysql.go b/pkg/db/mysql/mysql.go similarity index 99% rename from pkg/db/mysql.go rename to pkg/db/mysql/mysql.go index 62771abc..a78d2c45 100644 --- a/pkg/db/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -1,4 +1,4 @@ -package db +package mysql import ( "context" @@ -11,6 +11,8 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" ) +const batchSize = 1000 + // NOTE // The project contains three tables: // * Domain entries tables: the table to store domain materials. diff --git a/pkg/db/read.go b/pkg/db/mysql/read.go similarity index 94% rename from pkg/db/read.go rename to pkg/db/mysql/read.go index 43a13699..7d712181 100644 --- a/pkg/db/read.go +++ b/pkg/db/mysql/read.go @@ -1,4 +1,4 @@ -package db +package mysql import ( "context" @@ -6,6 +6,7 @@ import ( "fmt" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" ) // used during main thread and worker thread @@ -60,13 +61,13 @@ func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Outp // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table // No sql.ErrNoRows will be thrown, if some records does not exist. Check the length of result func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, keys []*common.SHA256Output) ( - []*KeyValuePair, error) { + []*db.KeyValuePair, error) { return c.retrieveDomainEntries(ctx, keys) } func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, -) ([]*KeyValuePair, error) { +) ([]*db.KeyValuePair, error) { if len(domainIDs) == 0 { return nil, nil @@ -81,14 +82,14 @@ func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common fmt.Printf("Query is: '%s'\n", str) return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) } - pairs := make([]*KeyValuePair, 0, len(domainIDs)) + pairs := make([]*db.KeyValuePair, 0, len(domainIDs)) for rows.Next() { var id, payload []byte err := rows.Scan(&id, &payload) if err != nil { return nil, fmt.Errorf("error scanning domain ID and its payload") } - pairs = append(pairs, &KeyValuePair{ + pairs = append(pairs, &db.KeyValuePair{ Key: *(*common.SHA256Output)(id), Value: payload, }) @@ -98,7 +99,7 @@ func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common // used for retrieving key value pair func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.SHA256Output) ( - []*KeyValuePair, error) { + []*db.KeyValuePair, error) { str := "SELECT `key`, `value` FROM domainEntries WHERE `key` IN " + repeatStmt(1, len(keys)) args := make([]interface{}, len(keys)) for i, k := range keys { @@ -111,12 +112,12 @@ func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.S } defer rows.Close() var k, v []byte - domainEntries := make([]*KeyValuePair, 0, len(keys)) + domainEntries := make([]*db.KeyValuePair, 0, len(keys)) for rows.Next() { if err = rows.Scan(&k, &v); err != nil { return nil, err } - domainEntries = append(domainEntries, &KeyValuePair{ + domainEntries = append(domainEntries, &db.KeyValuePair{ Key: *(*common.SHA256Output)(k), Value: v, }) diff --git a/pkg/db/read_worker.go b/pkg/db/mysql/read_worker.go similarity index 93% rename from pkg/db/read_worker.go rename to pkg/db/mysql/read_worker.go index b485e45c..ec30fe78 100644 --- a/pkg/db/read_worker.go +++ b/pkg/db/mysql/read_worker.go @@ -1,4 +1,4 @@ -package db +package mysql import ( "context" @@ -7,11 +7,12 @@ import ( "strconv" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" ) // keyValueResult: used in worker thread; in multi-thread read type keyValueResult struct { - Pairs []*KeyValuePair + Pairs []*db.KeyValuePair Err error } diff --git a/pkg/db/write.go b/pkg/db/mysql/write.go similarity index 97% rename from pkg/db/write.go rename to pkg/db/mysql/write.go index 780b1038..c9601816 100644 --- a/pkg/db/write.go +++ b/pkg/db/mysql/write.go @@ -1,4 +1,4 @@ -package db +package mysql import ( "context" @@ -10,14 +10,15 @@ import ( "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" ) -func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, pairs []*KeyValuePair) (int, error) { +func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, pairs []*db.KeyValuePair) (int, error) { panic("not available") } // UpdateDomainEntries: Update a list of key-value store -func (c *mysqlDB) UpdateDomainEntriesOLD(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { +func (c *mysqlDB) UpdateDomainEntriesOLD(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { numOfUpdatedRecords, err := c.doUpdatePairs(ctx, keyValuePairs, c.getDomainEntriesUpdateStmts) if err != nil { return 0, fmt.Errorf("UpdateDomainEntries | %w", err) @@ -52,7 +53,7 @@ func (c *mysqlDB) DeleteTreeNodesOLD(ctx context.Context, keys []common.SHA256Ou return n, nil } -func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { +func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { if len(keyValuePairs) == 0 { return 0, nil } @@ -74,7 +75,7 @@ func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*KeyValue } // UpdateTreeNodes: Update a list of key-value store -func (c *mysqlDB) UpdateTreeNodesOLD(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) { +func (c *mysqlDB) UpdateTreeNodesOLD(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { numOfUpdatedPairs, err := c.doUpdatePairs(ctx, keyValuePairs, c.getTreeStructureUpdateStmts) if err != nil { return 0, fmt.Errorf("UpdateTreeNodes | %w", err) @@ -119,7 +120,7 @@ func (c *mysqlDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error // // ******************************************************************** // worker to update key-value pairs -func (c *mysqlDB) doUpdatePairs(ctx context.Context, keyValuePairs []*KeyValuePair, +func (c *mysqlDB) doUpdatePairs(ctx context.Context, keyValuePairs []*db.KeyValuePair, stmtGetter prepStmtGetter) (int, error) { func (HugeLeafError) Error() string { diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go index 0a120ace..61f790c8 100644 --- a/pkg/mapserver/responder/old_responder.go +++ b/pkg/mapserver/responder/old_responder.go @@ -8,6 +8,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/domain" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" @@ -25,7 +26,7 @@ type OldMapResponder struct { // NewOldMapResponder: return a new responder func NewOldMapResponder(ctx context.Context, root []byte, cacheHeight int, mapServerConfigPath string) (*OldMapResponder, error) { // new db connection for SMT - conn, err := db.Connect(nil) + conn, err := mysql.Connect(nil) if err != nil { return nil, fmt.Errorf("NewMapResponder | Connect | %w", err) } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index d13aefd9..13816f5c 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -11,6 +11,7 @@ import ( ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) @@ -25,7 +26,7 @@ type MapUpdater struct { // NewMapUpdater: return a new map updater. func NewMapUpdater(root []byte, cacheHeight int) (*MapUpdater, error) { // db conn for map updater - dbConn, err := db.Connect(nil) + dbConn, err := mysql.Connect(nil) if err != nil { return nil, fmt.Errorf("NewMapUpdater | db.Connect | %w", err) } diff --git a/tests/benchmark/db_benchmark/db.go b/tests/benchmark/db_benchmark/db.go index b84999fa..47456944 100644 --- a/tests/benchmark/db_benchmark/db.go +++ b/tests/benchmark/db_benchmark/db.go @@ -10,15 +10,17 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ) func main() { - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() // ***************************************************************** // open a db connection // ***************************************************************** - conn, err := db.Connect(nil) + conn, err := mysql.Connect(nil) if err != nil { panic(err) } diff --git a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go b/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go index 542374c1..d721b4d7 100644 --- a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go +++ b/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go @@ -13,6 +13,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ) var domainCount int @@ -21,7 +22,7 @@ var domainCount int func main() { domainCount = 0 - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() csvFile, err := os.Create("result.csv") @@ -72,7 +73,7 @@ func main() { fmt.Println("time to commit the changes: ", time.Since(start)) timeToUpdateSMT := time.Since(start) - domainCount = db.GetDomainNamesForTest() + domainCount = dbtest.GetDomainCountWithoutTestObject() fmt.Println("total domains: ", domainCount) err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go index 54cad2e9..5471b7ee 100644 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go +++ b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go @@ -12,11 +12,11 @@ import ( "time" _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ) var domainCount int @@ -33,7 +33,7 @@ func main() { } domainCount = 0 - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() csvFile, err := os.Create("result.csv") respondeCSVFile, err := os.Create("result_responder.csv") @@ -86,7 +86,7 @@ func main() { fmt.Println("time to commit the changes: ", time.Since(start)) timeToUpdateSMT := time.Since(start) - domainCount = db.GetDomainNamesForTest() + domainCount = dbtest.GetDomainCountWithoutTestObject() fmt.Println("total domains: ", domainCount) err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go index d3d5f3ba..92df1e84 100644 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go +++ b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go @@ -14,11 +14,11 @@ import ( "time" _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ) var domainCount int @@ -61,7 +61,7 @@ func main() { }*/ domainCount = 0 - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() csvFile, err := os.Create("result.csv") respondeCSVFile, err := os.Create("result_responder.csv") @@ -116,7 +116,7 @@ func main() { fmt.Println("time to commit the changes: ", time.Since(start)) timeToUpdateSMT := time.Since(start) - domainCount = db.GetDomainNamesForTest() + domainCount = dbtest.GetDomainCountWithoutTestObject() fmt.Println("total domains: ", domainCount) err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go index 8899e3e0..2a7329e9 100644 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go +++ b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go @@ -13,11 +13,11 @@ import ( "time" _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ) var domainCount int @@ -34,7 +34,7 @@ func main() { testSet1000 := loadTestData("testData1000.txt") domainCount = 0 - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() csvFile, err := os.Create("result.csv") csvPathFile, err := os.Create("pathResult.csv") @@ -88,7 +88,7 @@ func main() { fmt.Println("time to commit the changes: ", time.Since(start)) timeToUpdateSMT := time.Since(start) - domainCount = db.GetDomainNamesForTest() + domainCount = dbtest.GetDomainCountWithoutTestObject() fmt.Println("total domains: ", domainCount) err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) diff --git a/tests/benchmark/smt_benchmark/main.go b/tests/benchmark/smt_benchmark/main.go index 7d2f3091..5e8e84a0 100644 --- a/tests/benchmark/smt_benchmark/main.go +++ b/tests/benchmark/smt_benchmark/main.go @@ -12,8 +12,8 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + "github.com/netsec-ethz/fpki/tests/pkg/db" ) var wg sync.WaitGroup diff --git a/tests/integration/db/db.go b/tests/integration/db/db.go index b862e2a3..6efc9f39 100644 --- a/tests/integration/db/db.go +++ b/tests/integration/db/db.go @@ -11,6 +11,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" ) func main() { @@ -34,7 +35,7 @@ func testTreeTable() { // ***************************************************************** // open a db connection // ***************************************************************** - conn, err := db.Connect(nil) + conn, err := mysql.Connect(nil) if err != nil { panic(err) } @@ -233,7 +234,7 @@ func testDomainEntriesTable() { // ***************************************************************** // open a db connection // ***************************************************************** - conn, err := db.Connect(nil) + conn, err := mysql.Connect(nil) if err != nil { panic(err) } @@ -372,7 +373,7 @@ func testUpdateTable() { // ***************************************************************** // open a db connection // ***************************************************************** - conn, err := db.Connect(nil) + conn, err := mysql.Connect(nil) if err != nil { panic(err) } diff --git a/tests/integration/grpc_test/main.go b/tests/integration/grpc_test/main.go index fe22b795..684f7562 100644 --- a/tests/integration/grpc_test/main.go +++ b/tests/integration/grpc_test/main.go @@ -11,12 +11,12 @@ import ( "sync" "time" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/grpc/grpcclient" "github.com/netsec-ethz/fpki/pkg/grpc/grpcserver" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" ct "github.com/google/certificate-transparency-go" ctTls "github.com/google/certificate-transparency-go/tls" @@ -27,7 +27,7 @@ import ( var wg sync.WaitGroup func main() { - db.TruncateAllTablesWithoutTestObject() + dbtest.TruncateAllTablesWithoutTestObject() // new map updator mapUpdater, err := updater.NewMapUpdater(nil, 233) diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 4c8b1fe6..0d4c5913 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -8,6 +8,7 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" testdb "github.com/netsec-ethz/fpki/tests/pkg/db" ) @@ -29,7 +30,7 @@ func mainFunc() int { // Connect to DB via local socket, should be faster. config := db.ConfigFromEnvironment() // config.Dsn = "root@unix(/var/run/mysqld/mysqld.sock)/fpki" - conn, err := db.Connect(config) + conn, err := mysql.Connect(config) panicIfError(err) root, err := conn.LoadRoot(ctx) diff --git a/tests/integration/smt/smt.go b/tests/integration/smt/smt.go index 554dc94a..6f3d6b87 100644 --- a/tests/integration/smt/smt.go +++ b/tests/integration/smt/smt.go @@ -11,7 +11,7 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) @@ -30,7 +30,7 @@ func testUpdateWithSameKeys() { //*************************************************************** // connect to db //*************************************************************** - db, err := db.Connect(nil) + db, err := mysql.Connect(nil) if err != nil { panic(err) } @@ -100,7 +100,7 @@ func testTrieMerkleProofAndReloadTree() { //*************************************************************** // connect to a new db //*************************************************************** - dbConn, err := db.Connect(nil) + dbConn, err := mysql.Connect(nil) if err != nil { panic(err) } @@ -154,7 +154,7 @@ func testTrieMerkleProofAndReloadTree() { //*************************************************************** // start a new db //*************************************************************** - dbConn1, err := db.Connect(nil) + dbConn1, err := mysql.Connect(nil) if err != nil { panic(err) } @@ -202,7 +202,7 @@ func testTrieMerkleProofAndReloadTree() { } func testTrieLoadCache() { - dbConn, err := db.Connect(nil) + dbConn, err := mysql.Connect(nil) if err != nil { panic(err) } diff --git a/tests/pkg/db/testDB.go b/tests/pkg/db/testDB.go index bb9d6d14..fd238f41 100644 --- a/tests/pkg/db/testDB.go +++ b/tests/pkg/db/testDB.go @@ -5,18 +5,15 @@ import ( "fmt" "os" "os/exec" + + "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" ) // CreateTestDB creates a new and ready test DB with the same structure as the F-PKI one. func CreateTestDB(ctx context.Context, dbName string) error { // Import the tools/create_script.sh in a bash session and run its function. args := []string{ - // "-c", - // "source", - // "./tools/create_schema.sh", - // "&&", - // "create_new_db", - // dbName, "-c", fmt.Sprintf("source ./tools/create_schema.sh && create_new_db %s", dbName), } @@ -29,3 +26,7 @@ func CreateTestDB(ctx context.Context, dbName string) error { return nil } + +func Connect(config *db.Configuration) (db.Conn, error) { + return mysql.Connect(config) +} diff --git a/pkg/db/test_utils.go b/tests/pkg/db/test_utils.go similarity index 66% rename from pkg/db/test_utils.go rename to tests/pkg/db/test_utils.go index c77b5643..385f57ac 100644 --- a/pkg/db/test_utils.go +++ b/tests/pkg/db/test_utils.go @@ -1,21 +1,13 @@ package db import ( + "context" "fmt" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/stretchr/testify/require" ) -type testingT struct{} - -func (t *testingT) Errorf(format string, args ...interface{}) { - str := fmt.Sprintf(format, args...) - panic(str) -} -func (t *testingT) FailNow() { - panic("") -} - // TruncateAllTablesWithoutTestObject will truncate all tables in DB. This function should // be used only while testing. func TruncateAllTablesWithoutTestObject() { @@ -26,37 +18,29 @@ func TruncateAllTablesWithoutTestObject() { // TruncateAllTablesForTest will truncate all tables in DB. This function should be used // only in tests. func TruncateAllTablesForTest(t require.TestingT) { - db, err := Connect(nil) + db, err := mysql.Connect(nil) require.NoError(t, err) - c := db.(*mysqlDB) - require.NotNil(t, c) - _, err = c.db.Exec("TRUNCATE fpki.domainEntries;") - require.NoError(t, err) - _, err = c.db.Exec("TRUNCATE fpki.tree;") - require.NoError(t, err) - _, err = c.db.Exec("TRUNCATE fpki.updates;") + err = db.TruncateAllTables(context.Background()) require.NoError(t, err) err = db.Close() require.NoError(t, err) } -// GetDomainNamesForTest will get rows count of domain entries table +// GetDomainCountWithoutTestObject will get rows count of domain entries table // be used only while testing. -func GetDomainNamesForTest() int { +func GetDomainCountWithoutTestObject() int { t := &testingT{} return getDomainNames(t) } func getDomainNames(t require.TestingT) int { - db, err := Connect(nil) + db, err := mysql.Connect(nil) require.NoError(t, err) - c := db.(*mysqlDB) - require.NotNil(t, c) var count int - err = c.db.QueryRow("SELECT COUNT(*) FROM domainEntries;").Scan(&count) + err = db.DB().QueryRow("SELECT COUNT(*) FROM domainEntries;").Scan(&count) require.NoError(t, err) err = db.Close() @@ -64,3 +48,13 @@ func getDomainNames(t require.TestingT) int { return count } + +type testingT struct{} + +func (t *testingT) Errorf(format string, args ...interface{}) { + str := fmt.Sprintf(format, args...) + panic(str) +} +func (t *testingT) FailNow() { + panic("") +} From 59cf8d5e47e164397d16fad6d8928aa3e7dcbfc4 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 16:44:47 +0100 Subject: [PATCH 055/187] Move testdata to tests/. --- .../mapserver_benchmark => }/testdata/certs.pem.gz | Bin .../testdata/dump100K.sql.gz | Bin .../mapserver_benchmark => }/testdata/root100K.bin | 0 .../testdata/uniqueNames.txt | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename tests/{benchmark/mapserver_benchmark => }/testdata/certs.pem.gz (100%) rename tests/{benchmark/mapserver_benchmark => }/testdata/dump100K.sql.gz (100%) rename tests/{benchmark/mapserver_benchmark => }/testdata/root100K.bin (100%) rename tests/{benchmark/mapserver_benchmark => }/testdata/uniqueNames.txt (100%) diff --git a/tests/benchmark/mapserver_benchmark/testdata/certs.pem.gz b/tests/testdata/certs.pem.gz similarity index 100% rename from tests/benchmark/mapserver_benchmark/testdata/certs.pem.gz rename to tests/testdata/certs.pem.gz diff --git a/tests/benchmark/mapserver_benchmark/testdata/dump100K.sql.gz b/tests/testdata/dump100K.sql.gz similarity index 100% rename from tests/benchmark/mapserver_benchmark/testdata/dump100K.sql.gz rename to tests/testdata/dump100K.sql.gz diff --git a/tests/benchmark/mapserver_benchmark/testdata/root100K.bin b/tests/testdata/root100K.bin similarity index 100% rename from tests/benchmark/mapserver_benchmark/testdata/root100K.bin rename to tests/testdata/root100K.bin diff --git a/tests/benchmark/mapserver_benchmark/testdata/uniqueNames.txt b/tests/testdata/uniqueNames.txt similarity index 100% rename from tests/benchmark/mapserver_benchmark/testdata/uniqueNames.txt rename to tests/testdata/uniqueNames.txt From d68a6898d3f002b3dcd8b016701884168e88c9e5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 20 Mar 2023 18:27:39 +0100 Subject: [PATCH 056/187] Add configurability to the DB package. --- cmd/ingest/main.go | 10 ++-- pkg/db/conf.go | 31 ++++++++++++ pkg/db/init.go | 51 -------------------- pkg/db/mysql/conf.go | 74 +++++++++++++++++++++++++++++ pkg/db/mysql/init.go | 38 ++++++++++++++- pkg/db/mysql/mysql.go | 2 +- tests/integration/mapserver/main.go | 12 +++-- 7 files changed, 157 insertions(+), 61 deletions(-) create mode 100644 pkg/db/conf.go delete mode 100644 pkg/db/init.go create mode 100644 pkg/db/mysql/conf.go diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 321ce9a2..0bdba9ce 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -12,6 +12,7 @@ import ( "syscall" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" ) const ( @@ -101,9 +102,12 @@ func mainFunction() int { }() // Connect to DB via local socket, should be faster. - config := db.ConfigFromEnvironment() - config.Dsn = "root@unix(/var/run/mysqld/mysqld.sock)/fpki" - conn, err := db.Connect(config) + config := db.NewConfig( + mysql.WithDefaults(), + mysql.WithEnvironment(), + mysql.WithLocalSocket("/var/run/mysqld/mysqld.sock"), + ) + conn, err := mysql.Connect(config) exitIfError(err) // Load root if any: diff --git a/pkg/db/conf.go b/pkg/db/conf.go new file mode 100644 index 00000000..65f076a6 --- /dev/null +++ b/pkg/db/conf.go @@ -0,0 +1,31 @@ +package db + +const KeyDBName = "DBNAME" + +// Configuration for the db connection +type Configuration struct { + Dsn string + Values map[string]string + CheckSchema bool // indicates if opening the connection checks the health of the schema +} + +type ConfigurationModFunction func(*Configuration) *Configuration + +func NewConfig(modifiers ...ConfigurationModFunction) *Configuration { + c := &Configuration{ + Values: map[string]string{ + KeyDBName: "fpki", + }, + } + for _, fcn := range modifiers { + c = fcn(c) + } + return c +} + +func WithDB(dbName string) ConfigurationModFunction { + return func(c *Configuration) *Configuration { + c.Values[KeyDBName] = dbName + return c + } +} diff --git a/pkg/db/init.go b/pkg/db/init.go deleted file mode 100644 index fd7a081a..00000000 --- a/pkg/db/init.go +++ /dev/null @@ -1,51 +0,0 @@ -package db - -import ( - "os" - - _ "github.com/go-sql-driver/mysql" -) - -// Configuration for the db connection -type Configuration struct { - Dsn string - Values map[string]string - CheckSchema bool // indicates if opening the connection checks the health of the schema -} - -// ConfigFromEnvironment returns a valid DB connection configuration set up from environment -// variables. MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST AND MYSQL_PORT are values that a user can -// set to influence the connection. The defaults are set to yield "root@tcp(localhost)/fpki" as -// the DSN. -func ConfigFromEnvironment() *Configuration { - env := map[string]string{ - "MYSQL_USER": "root", - "MYSQL_PASSWORD": "", - "MYSQL_HOST": "127.0.0.1", - "MYSQL_PORT": "", - } - for k := range env { - v, exists := os.LookupEnv(k) - if exists { - env[k] = v - } - } - dsnString := env["MYSQL_USER"] - if env["MYSQL_PASSWORD"] != "" { - dsnString += ":" + env["MYSQL_PASSWORD"] - } - dsnString += "@tcp(" + env["MYSQL_HOST"] - if env["MYSQL_PORT"] != "" { - dsnString += ":" + env["MYSQL_PORT"] - } - dsnString += ")/fpki" - // fmt.Printf("FPKI | DB INIT | using dsn: %s\n", dsnString) - return &Configuration{ - Dsn: dsnString, - Values: map[string]string{ - "interpolateParams": "true", // 1 round trip per query - "collation": "binary", - "maxAllowedPacket": "1073741824", // 1G (cannot use "1G" as the driver uses Atoi) - }, - } -} diff --git a/pkg/db/mysql/conf.go b/pkg/db/mysql/conf.go new file mode 100644 index 00000000..f3bb9499 --- /dev/null +++ b/pkg/db/mysql/conf.go @@ -0,0 +1,74 @@ +package mysql + +import ( + "os" + + "github.com/netsec-ethz/fpki/pkg/db" +) + +const ( + keyUser = "MYSQL_USER" + keyPassword = "MYSQL_PASSWORD" + keyHost = "MYSQL_HOST" + keyPort = "MYSQL_PORT" + keyLocalSocket = "MYSQL_LOCALSOCKET" +) + +// WithEnvironment modifies the configuration with values from the environment variables. +// MYSQL_USER, MYSQL_PASSWORD, MYSQL_HOST AND MYSQL_PORT are values that a user can +// set to influence the connection. The defaults are set to yield "root@tcp(localhost)/DBNAME" as +// the DSN. +func WithEnvironment() db.ConfigurationModFunction { + return func(c *db.Configuration) *db.Configuration { + env := map[string]string{ + keyUser: "root", + keyPassword: "", + keyHost: "127.0.0.1", + keyPort: "", + } + for k, v := range env { + envValue, exists := os.LookupEnv(k) + if exists { + v = envValue + } + c.Values[k] = v + } + return c + } +} + +// WithUser modifies the configuration to set a specific user. +func WithUser(user string) db.ConfigurationModFunction { + return func(c *db.Configuration) *db.Configuration { + c.Values[keyUser] = user + return c + } +} + +// WithLocalSocket modifies the configuration so that the DSN looks like e.g. +// "root@unix(/var/run/mysqld/mysqld.sock)/fpki". +// These values can be altered by setting the socket path, the user and the DB. +func WithLocalSocket(path string) db.ConfigurationModFunction { + return func(c *db.Configuration) *db.Configuration { + c.Values[keyLocalSocket] = path + return c + } +} + +func WithDefaults() db.ConfigurationModFunction { + return func(c *db.Configuration) *db.Configuration { + defaults := map[string]string{ + keyUser: "root", + keyPassword: "", + keyHost: "127.0.0.1", + keyPort: "", + "interpolateParams": "true", // 1 round trip per query + "collation": "binary", + "maxAllowedPacket": "1073741824", // 1G (cannot use "1G" as the driver uses Atoi) + } + for k, v := range defaults { + c.Values[k] = v + } + return c + } +} diff --git a/pkg/db/mysql/init.go b/pkg/db/mysql/init.go index d393699a..4d69955e 100644 --- a/pkg/db/mysql/init.go +++ b/pkg/db/mysql/init.go @@ -5,18 +5,21 @@ import ( "fmt" "net/url" + _ "github.com/go-sql-driver/mysql" + "github.com/netsec-ethz/fpki/pkg/db" ) // Connect: connect to db, using the config file func Connect(config *db.Configuration) (db.Conn, error) { if config == nil { - config = db.ConfigFromEnvironment() + return nil, fmt.Errorf("nil config not allowed") } + config.Dsn = parseDSN(config) db, err := connect(config) if err != nil { - return nil, fmt.Errorf("cannot open DB: %w", err) + return nil, fmt.Errorf("with DSN: %s, cannot open DB: %w", config.Dsn, err) } // Set a very small number of concurrent connections per sql.DB . @@ -35,6 +38,37 @@ func Connect(config *db.Configuration) (db.Conn, error) { return NewMysqlDB(db) } +func parseDSN(config *db.Configuration) string { + val := config.Values + dsnString := val[keyUser] + // If a local socket is requested, the DSN is composed of different keys. + if path, ok := val[keyLocalSocket]; ok { + // Form a string like "root@unix(/var/run/mysqld/mysqld.sock)/fpki" + dsnString += fmt.Sprintf("@unix(%s)/%s", + path, val[db.KeyDBName]) + } else { + // Form a string like "root:password@tcp(1.1.1.1:8080)/fpki" + if val[keyPassword] != "" { + dsnString += ":" + val[keyPassword] + } + dsnString += "@tcp(" + val[keyHost] + if val[keyPort] != "" { + dsnString += ":" + val[keyPort] + } + dsnString += fmt.Sprintf(")/%s", val[db.KeyDBName]) + } + + // Remove all values that are used to establish the DSN from the remaining pairs. + delete(val, keyUser) + delete(val, keyPassword) + delete(val, keyHost) + delete(val, keyPort) + delete(val, keyLocalSocket) + delete(val, db.KeyDBName) + + return dsnString +} + func connect(config *db.Configuration) (*sql.DB, error) { dsn, err := url.Parse(config.Dsn) if err != nil { diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index a78d2c45..ea86cb52 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -29,7 +29,7 @@ type mysqlDB struct { prepGetValueDomainEntries *sql.Stmt // returns the domain entries prepGetValueTree *sql.Stmt // get key-value pair from tree table - prepGetUpdatedDomains *sql.Stmt // get updated domains + // prepGetUpdatedDomains *sql.Stmt // get updated domains getDomainEntriesUpdateStmts prepStmtGetter // used to update key-values in domain entries getTreeStructureUpdateStmts prepStmtGetter // used to update key-values in the tree table diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 0d4c5913..d18623f5 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -22,17 +22,21 @@ func mainFunc() int { defer cancelF() // Create an empty test DB - dbName := "testo" + dbName := "mapServerIT" err := testdb.CreateTestDB(ctx, dbName) panicIfError(err) + defer func() { + // TODO(juagargi) destroy the DB with + }() // Connect to the test DB - // Connect to DB via local socket, should be faster. - config := db.ConfigFromEnvironment() - // config.Dsn = "root@unix(/var/run/mysqld/mysqld.sock)/fpki" + // config := db.NewConfig(mysql.WithDefaults(), mysql.WithEnvironment(), db.WithDB("mapserverIT")) + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) conn, err := mysql.Connect(config) panicIfError(err) + // Ingest the testdata. + root, err := conn.LoadRoot(ctx) panicIfError(err) fmt.Printf("root is %s\n", hex.EncodeToString((*root)[:])) From cb32333d456db9d84570cdfa50cda1db29d844d7 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Mar 2023 13:45:10 +0100 Subject: [PATCH 057/187] WIP Integration tests with CSV data. --- pkg/mapserver/updater/certs_updater.go | 13 +- pkg/util/io.go | 117 ++++++++++++++++++ .../mapserver_benchmark/download_test.go | 20 +-- .../mapserver_benchmark/updater_test.go | 45 +++---- tests/integration/mapserver/main.go | 9 ++ 5 files changed, 148 insertions(+), 56 deletions(-) create mode 100644 pkg/util/io.go diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 5e99f98c..f6d5b4c4 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -184,22 +184,18 @@ func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( // Additionally, if the payload of any of the ancestors of the certificate is nil, this function // interprets it as the ancestor is already present in the DB, and thus will omit returning it // and any posterior ancestors. -func UnfoldCert(cert *ctx509.Certificate, certID *common.SHA256Output, +func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, ) (certPayloads []*ctx509.Certificate, certIDs []*common.SHA256Output, parentPayloads []*ctx509.Certificate, parentIDs []*common.SHA256Output) { - // return UnfoldCerts([]*ctx509.Certificate{cert}, [][]*ctx509.Certificate{chain}) - - // todo: do not add parents that have their payload to nil, because they must be in DB already - certPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) certIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) parentPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) parentIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) // Always add the leaf certificate. - certPayloads = append(certPayloads, cert) + certPayloads = append(certPayloads, leafCert) certIDs = append(certIDs, certID) parentPayloads = append(parentPayloads, chain[0]) parentIDs = append(parentIDs, chainIDs[0]) @@ -207,8 +203,9 @@ func UnfoldCert(cert *ctx509.Certificate, certID *common.SHA256Output, i := 0 for ; i < len(chain)-1; i++ { if chain[i] == nil { - // This parent has been inserted already in DB. - // Its parent must have been inserted as well. There are no more parents to insert. + // This parent has been inserted already in DB. This implies that its own parent, + // the grandparent of the leaf, must have been inserted as well; and so on. + // There are no more parents to insert. return } certPayloads = append(certPayloads, chain[i]) diff --git a/pkg/util/io.go b/pkg/util/io.go new file mode 100644 index 00000000..04a24cac --- /dev/null +++ b/pkg/util/io.go @@ -0,0 +1,117 @@ +package util + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "encoding/csv" + "encoding/pem" + "io" + "os" + "strings" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" +) + +const ( + CertificateColumn = 3 + CertChainColumn = 4 +) + +func Gunzip(filename string) ([]byte, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + z, err := gzip.NewReader(f) + if err != nil { + return nil, err + } + + raw, err := io.ReadAll(z) + if err != nil { + return nil, err + } + + err = z.Close() + if err != nil { + return nil, err + } + + err = f.Close() + return raw, err +} + +func LoadCertsFromPEM(raw []byte) ([]*ctx509.Certificate, error) { + certs := make([]*ctx509.Certificate, 0) + for len(raw) > 0 { + var block *pem.Block + block, raw = pem.Decode(raw) + if block.Type != "CERTIFICATE" { + continue + } + c, err := ctx509.ParseTBSCertificate(block.Bytes) + if err != nil { + return nil, err + } + + certs = append(certs, c) + } + return certs, nil +} + +// LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of IDs and payloads for +// each certificate and its ancestry. +// +// a slice containing N elements, which represent the certificate +// chain from the leaf to the root certificate. +func LoadCertsAndChainsFromCSV(raw []byte) ([]*ctx509.Certificate, error) { + r := bytes.NewReader(raw) + reader := csv.NewReader(r) + reader.FieldsPerRecord = -1 // don't check number of fields + + records, err := reader.ReadAll() + if err != nil { + return nil, err + } + for _, fields := range records { + if len(fields) == 0 { + continue + } + + // Parse the certificate. + rawBytes, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) + if err != nil { + return nil, err + } + certID := common.SHA256Hash32Bytes(rawBytes) + cert, err := ctx509.ParseCertificate(rawBytes) + if err != nil { + return nil, err + } + + // Parse the chain. + // The certificate chain is a list of base64 strings separated by semicolon (;). + strs := strings.Split(fields[CertChainColumn], ";") + chain := make([]*ctx509.Certificate, len(strs)) + chainIDs := make([]*common.SHA256Output, len(strs)) + for i, s := range strs { + rawBytes, err = base64.StdEncoding.DecodeString(s) + if err != nil { + return nil, err + } + chain[i], err = ctx509.ParseCertificate(rawBytes) + if err != nil { + return nil, err + } + } + + _ = certID + _ = cert + _ = chainIDs + + } + + return nil, nil +} diff --git a/tests/benchmark/mapserver_benchmark/download_test.go b/tests/benchmark/mapserver_benchmark/download_test.go index ec3bc074..7d0da7a0 100644 --- a/tests/benchmark/mapserver_benchmark/download_test.go +++ b/tests/benchmark/mapserver_benchmark/download_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/stretchr/testify/require" @@ -65,7 +64,7 @@ func TestCreateCerts(t *testing.T) { require.NoError(t, err) require.Len(t, certs, count, "we have %d certificates", len(certs)) - f, err := os.Create("testdata/certs.pem.gz") + f, err := os.Create("../../testdata/certs.pem.gz") require.NoError(t, err) z, err := gzip.NewWriterLevel(f, gzip.BestCompression) require.NoError(t, err) @@ -93,7 +92,7 @@ func TestCreateCerts(t *testing.T) { } } sort.Strings(names) - f, err = os.Create("testdata/uniqueNames.txt") + f, err = os.Create("../../testdata/uniqueNames.txt") require.NoError(t, err) for _, n := range names { _, err = f.WriteString(n + "\n") @@ -102,18 +101,3 @@ func TestCreateCerts(t *testing.T) { err = f.Close() require.NoError(t, err) } - -func loadCertsFromPEM(t require.TestingT, raw []byte) []*ctx509.Certificate { - certs := make([]*ctx509.Certificate, 0) - for len(raw) > 0 { - var block *pem.Block - block, raw = pem.Decode(raw) - if block.Type != "CERTIFICATE" { - continue - } - c, err := ctx509.ParseTBSCertificate(block.Bytes) - require.NoError(t, err) - certs = append(certs, c) - } - return certs -} diff --git a/tests/benchmark/mapserver_benchmark/updater_test.go b/tests/benchmark/mapserver_benchmark/updater_test.go index 6108f7d9..9548528f 100644 --- a/tests/benchmark/mapserver_benchmark/updater_test.go +++ b/tests/benchmark/mapserver_benchmark/updater_test.go @@ -1,10 +1,8 @@ package benchmark import ( - "compress/gzip" "context" "fmt" - "io" "io/ioutil" "os" "os/exec" @@ -15,6 +13,7 @@ import ( ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) @@ -86,7 +85,7 @@ func benchmarkFullUpdate(b *testing.B, count int) { expensiveBenchmark(b, count) swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) @@ -127,9 +126,10 @@ func TestDoUpdatesFromTestDataCerts(t *testing.T) { swapBack := swapDBs(t) defer swapBack() fmt.Println("Loading certs ...") - raw, err := gunzip(t, "testdata/certs.pem.gz") + raw, err := util.Gunzip("../../testdata/certs.pem.gz") + require.NoError(t, err) + certs, err := util.LoadCertsFromPEM(raw) require.NoError(t, err) - certs := loadCertsFromPEM(t, raw) emptyChains := make([][]*ctx509.Certificate, len(certs)) db.TruncateAllTablesForTest(t) @@ -149,12 +149,12 @@ func TestDoUpdatesFromTestDataCerts(t *testing.T) { root := up.GetRoot() err = up.Close() require.NoError(t, err) - err = ioutil.WriteFile("testdata/root100K.bin", root, 0664) + err = ioutil.WriteFile("../../testdata/root100K.bin", root, 0664) require.NoError(t, err) // dump contents using mysqldump err = exec.Command("bash", "-c", "mysqldump -u root fpki |gzip - "+ - ">testdata/dump100K.sql.gz").Run() + ">../../testdata/dump100K.sql.gz").Run() require.NoError(t, err) } @@ -166,9 +166,10 @@ func BenchmarkUpdateDomainEntriesUsingCerts10K(b *testing.B) { func benchmarkUpdateDomainEntriesUsingCerts(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") + require.NoError(b, err) + certs, err := util.LoadCertsFromPEM(raw) require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) certs = certs[:count] emptyChains := make([][]*ctx509.Certificate, len(certs)) @@ -199,7 +200,7 @@ func BenchmarkFetchUpdatedDomainHash10K(b *testing.B) { func benchmarkFetchUpdatedDomainHash(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) @@ -234,7 +235,7 @@ func BenchmarkRetrieveDomainEntries10K(b *testing.B) { func benchmarkRetrieveDomainEntries(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) @@ -271,7 +272,7 @@ func BenchmarkKeyValuePairToSMTInput10K(b *testing.B) { func benchmarkKeyValuePairToSMTInput(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) @@ -311,7 +312,7 @@ func BenchmarkSmtUpdate10K(b *testing.B) { func benchmarkSmtUpdate(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) @@ -354,7 +355,7 @@ func BenchmarkCommitChanges10K(b *testing.B) { func benchmarkCommitChanges(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "testdata/certs.pem.gz") + raw, err := gunzip(b, "../../testdata/certs.pem.gz") require.NoError(b, err) certs := loadCertsFromPEM(b, raw) require.GreaterOrEqual(b, len(certs), count) @@ -406,22 +407,6 @@ func swapDBs(t require.TestingT) func() { return swapBack } -func gunzip(t require.TestingT, filename string) ([]byte, error) { - f, err := os.Open(filename) - require.NoError(t, err) - z, err := gzip.NewReader(f) - require.NoError(t, err) - - raw, theErr := io.ReadAll(z) - - err = z.Close() - require.NoError(t, err) - err = f.Close() - require.NoError(t, err) - - return raw, theErr -} - func expensiveBenchmark(b *testing.B, count int) { if count > 30000 && os.Getenv("FPKI_BENCH") == "" { b.Skip("benchmark is expensive. Skipping") diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index d18623f5..5f609e7e 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -10,6 +10,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" + "github.com/netsec-ethz/fpki/pkg/util" testdb "github.com/netsec-ethz/fpki/tests/pkg/db" ) @@ -36,6 +37,14 @@ func mainFunc() int { panicIfError(err) // Ingest the testdata. + raw, err := util.ReadAllGzippedFile("./tests/testdata/2-xenon2023.csv.gz") + panicIfError(err) + payloads, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) + panicIfError(err) + + certs, err := util.LoadCertsFromPEM(raw) + panicIfError(err) + _ = certs root, err := conn.LoadRoot(ctx) panicIfError(err) From bd67ff13502c4f34b3587c6693c8a347b93ab750 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 21 Mar 2023 17:10:49 +0100 Subject: [PATCH 058/187] Inserting certificates in DB doesn't require parent payload. To insert a certificate in the DB three fields are needed: the payload of the certificate, the ID of the certificate, and the ID of the parent. --- cmd/ingest/certProcessor.go | 8 +--- cmd/ingest/processor.go | 3 +- pkg/mapserver/internal/mockdb_for_testing.go | 13 ++++--- pkg/mapserver/updater/certs_updater.go | 39 +++++++++---------- pkg/mapserver/updater/certs_updater_test.go | 40 +++++++++++++------ pkg/mapserver/updater/updater.go | 41 +++++++------------- pkg/mapserver/updater/updater_test.go | 2 +- 7 files changed, 72 insertions(+), 74 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 9a6ccdba..9c3b5627 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -17,7 +17,6 @@ type CertificateNode struct { CertID *common.SHA256Output Cert *ctx509.Certificate ParentID *common.SHA256Output - Parent *ctx509.Certificate IsLeaf bool } @@ -28,7 +27,6 @@ type CertBatch struct { Expirations []*time.Time Certs []*ctx509.Certificate CertIDs []*common.SHA256Output - Parents []*ctx509.Certificate ParentIDs []*common.SHA256Output AreLeaves []bool } @@ -39,7 +37,6 @@ func NewCertificateBatch() *CertBatch { Expirations: make([]*time.Time, 0, BatchSize), Certs: make([]*ctx509.Certificate, 0, BatchSize), CertIDs: make([]*common.SHA256Output, 0, BatchSize), - Parents: make([]*ctx509.Certificate, 0, BatchSize), ParentIDs: make([]*common.SHA256Output, 0, BatchSize), AreLeaves: make([]bool, 0, BatchSize), } @@ -50,7 +47,6 @@ func (b *CertBatch) AddCertificate(c *CertificateNode) { b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) b.CertIDs = append(b.CertIDs, c.CertID) - b.Parents = append(b.Parents, c.Parent) b.ParentIDs = append(b.ParentIDs, c.ParentID) b.AreLeaves = append(b.AreLeaves, c.IsLeaf) } @@ -85,7 +81,7 @@ const ( ) type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, - []*ctx509.Certificate, []*common.SHA256Output, []*ctx509.Certificate, []bool) error + []*ctx509.Certificate, []*common.SHA256Output, []*common.SHA256Output, []bool) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { @@ -245,7 +241,7 @@ func (p *CertificateProcessor) createBatches() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.CertIDs, batch.Parents, batch.AreLeaves) + batch.Certs, batch.CertIDs, batch.ParentIDs, batch.AreLeaves) if err != nil { panic(err) } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 431ee8b4..512fe870 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -92,14 +92,13 @@ func (p *Processor) start() { // Process the parsed content into the DB, and from DB into SMT: go func() { for data := range p.certWithChainChan { - certs, certIDs, parents, parentIDs := updater.UnfoldCert(data.Cert, data.CertID, + certs, certIDs, parentIDs := updater.UnfoldCert(data.Cert, data.CertID, data.ChainPayloads, data.ChainIDs) for i := range certs { p.nodeChan <- &CertificateNode{ CertID: certIDs[i], Cert: certs[i], ParentID: parentIDs[i], - Parent: parents[i], IsLeaf: i == 0, // Only the first certificate is a leaf. } } diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index cf47c760..e2a790c4 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -38,7 +38,8 @@ func (d *MockDB) Close() error { return nil } func (d *MockDB) TruncateAllTables(ctx context.Context) error { return nil } -func (*MockDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { return nil, nil } +func (*MockDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { return nil, nil } +func (*MockDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error { return nil } func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { return make([]bool, len(ids)), nil @@ -77,16 +78,16 @@ func (d *MockDB) RetrieveKeyValuePairTreeStruct(ctx context.Context, id []common return result, nil } -func (d *MockDB) RetrieveDomainEntries(ctx context.Context, ids []common.SHA256Output) ( +func (d *MockDB) RetrieveDomainEntries(ctx context.Context, ids []*common.SHA256Output) ( []*db.KeyValuePair, error) { result := make([]*db.KeyValuePair, 0, len(ids)) for _, key := range ids { - value, ok := d.DomainEntriesTable[key] + value, ok := d.DomainEntriesTable[*key] if !ok { continue } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) + result = append(result, &db.KeyValuePair{Key: *key, Value: value}) } return result, nil } @@ -138,6 +139,8 @@ func (d *MockDB) RemoveAllUpdatedDomains(ctx context.Context) error { return nil } -func (d *MockDB) UpdatedDomains() (chan []common.SHA256Output, chan error) { return nil, nil } +func (d *MockDB) UpdatedDomains(context.Context) ([]*common.SHA256Output, error) { + return nil, nil +} func (*MockDB) CleanupDirty(ctx context.Context) error { return nil } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index f6d5b4c4..c6417f44 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -153,27 +153,31 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct // The parents returned slice has the same elements as the certificates returned slice. // When a certificate is root, it's corresponding parents entry is nil. // The leaf certificates are always returned at the head of the slice. -func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( - certificates, parents []*ctx509.Certificate) { +func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate, +) (certificates []*ctx509.Certificate, certIDs, parentIDs []*common.SHA256Output) { - for len(certs) > 0 { + for len(leafCerts) > 0 { var pendingCerts []*ctx509.Certificate var pendingChains [][]*ctx509.Certificate - for i, c := range certs { + for i, c := range leafCerts { certificates = append(certificates, c) - var parent *ctx509.Certificate + ID := common.SHA256Hash32Bytes(c.Raw) + certIDs = append(certIDs, &ID) + var parentID *common.SHA256Output if len(chains[i]) > 0 { // The certificate has a trust chain (it is not root): add the first certificate // from the chain as the parent. - parent = chains[i][0] + parent := chains[i][0] + ID := common.SHA256Hash32Bytes(parent.Raw) + parentID = &ID // Add this parent to the back of the certs, plus the corresponding chain entry, // so that it's processed as a certificate. pendingCerts = append(pendingCerts, parent) pendingChains = append(pendingChains, chains[i][1:]) } - parents = append(parents, parent) + parentIDs = append(parentIDs, parentID) } - certs = pendingCerts + leafCerts = pendingCerts chains = pendingChains } return @@ -186,18 +190,15 @@ func UnfoldCerts(certs []*ctx509.Certificate, chains [][]*ctx509.Certificate) ( // and any posterior ancestors. func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, -) (certPayloads []*ctx509.Certificate, certIDs []*common.SHA256Output, - parentPayloads []*ctx509.Certificate, parentIDs []*common.SHA256Output) { +) (certs []*ctx509.Certificate, certIDs, parentIDs []*common.SHA256Output) { - certPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) - certIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) - parentPayloads = make([]*ctx509.Certificate, 0, len(parentPayloads)+1) - parentIDs = make([]*common.SHA256Output, 0, len(parentPayloads)+1) + certs = make([]*ctx509.Certificate, 0, len(chainIDs)+1) + certIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) + parentIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) // Always add the leaf certificate. - certPayloads = append(certPayloads, leafCert) + certs = append(certs, leafCert) certIDs = append(certIDs, certID) - parentPayloads = append(parentPayloads, chain[0]) parentIDs = append(parentIDs, chainIDs[0]) // Add the intermediate certs iff their payload is not nil. i := 0 @@ -208,16 +209,14 @@ func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, // There are no more parents to insert. return } - certPayloads = append(certPayloads, chain[i]) + certs = append(certs, chain[i]) certIDs = append(certIDs, chainIDs[i]) - parentPayloads = append(parentPayloads, chain[i+1]) parentIDs = append(parentIDs, chainIDs[i+1]) } // Add the root certificate (no parent) iff we haven't inserted it yet. if chain[i] != nil { - certPayloads = append(certPayloads, chain[i]) + certs = append(certs, chain[i]) certIDs = append(certIDs, chainIDs[i]) - parentPayloads = append(parentPayloads, nil) parentIDs = append(parentIDs, nil) } return diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index e7c694eb..46188e0c 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -162,10 +162,10 @@ func TestUpdateSameCertTwice(t *testing.T) { func TestUnfoldCerts(t *testing.T) { // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. - a := &ctx509.Certificate{} - b := &ctx509.Certificate{} - c := &ctx509.Certificate{} - d := &ctx509.Certificate{} + a := &ctx509.Certificate{Raw: []byte{0}} + b := &ctx509.Certificate{Raw: []byte{1}} + c := &ctx509.Certificate{Raw: []byte{2}} + d := &ctx509.Certificate{Raw: []byte{3}} certs := []*ctx509.Certificate{ a, b, @@ -174,23 +174,39 @@ func TestUnfoldCerts(t *testing.T) { nil, {c, d}, } - allCerts, parents := UnfoldCerts(certs, chains) + allCerts, IDs, parentIDs := UnfoldCerts(certs, chains) fmt.Printf("[%p %p %p %p]\n", a, b, c, d) fmt.Printf("%v\n", allCerts) - fmt.Printf("%v\n", parents) + fmt.Printf("%v\n", IDs) + fmt.Printf("%v\n", parentIDs) assert.Len(t, allCerts, 4) - assert.Len(t, parents, 4) + assert.Len(t, IDs, 4) + assert.Len(t, parentIDs, 4) + // Check payloads. assert.Equal(t, a, allCerts[0]) assert.Equal(t, b, allCerts[1]) assert.Equal(t, c, allCerts[2]) assert.Equal(t, d, allCerts[3]) - nilParent := (*ctx509.Certificate)(nil) - assert.Equal(t, nilParent, parents[0], "bad parent at 0") - assert.Equal(t, c, parents[1], "bad parent at 1") - assert.Equal(t, d, parents[2], "bad parent at 2") - assert.Equal(t, nilParent, parents[3], "bad parent at 3") + // Check IDs. + aID := common.SHA256Hash32Bytes(a.Raw) + bID := common.SHA256Hash32Bytes(b.Raw) + cID := common.SHA256Hash32Bytes(c.Raw) + dID := common.SHA256Hash32Bytes(d.Raw) + + assert.Equal(t, aID, *IDs[0]) + assert.Equal(t, bID, *IDs[1]) + assert.Equal(t, cID, *IDs[2]) + assert.Equal(t, dID, *IDs[3]) + + // Check parent IDs. + + nilID := (*common.SHA256Output)(nil) + assert.Equal(t, nilID, parentIDs[0], "bad parent at 0") + assert.Equal(t, cID, *parentIDs[1], "bad parent at 1") + assert.Equal(t, dID, *parentIDs[2], "bad parent at 2") + assert.Equal(t, nilID, parentIDs[3], "bad parent at 3") } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 13816f5c..24170234 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -92,7 +92,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ } certChains = append(certChains, chain) } - certs, parents := UnfoldCerts(certs, certChains) + certs, IDs, parentIDs := UnfoldCerts(certs, certChains) areLeaves := make([]bool, 0, len(certs)) // The leaves are always at the head of the returned slice: just flag all leaves for the // length of the original certificate list. @@ -100,7 +100,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ areLeaves[i] = true } return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, - ComputeCertIDs(certs), parents, areLeaves) + IDs, parentIDs, areLeaves) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -246,31 +246,31 @@ func (mapUpdater *MapUpdater) Close() error { func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parents []*ctx509.Certificate, areLeaves []bool) error { + parentIDs []*common.SHA256Output, areLeaves []bool) error { payloads := make([][]byte, len(certs)) - parentIds := make([]*common.SHA256Output, len(certs)) for i, c := range certs { payloads[i] = c.Raw - if parents[i] != nil { - id := common.SHA256Hash32Bytes(parents[i].Raw) - parentIds[i] = &id - } + } - return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads, areLeaves) + return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads, areLeaves) } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parents []*ctx509.Certificate, areLeaves []bool) error { + parentIDs []*common.SHA256Output, areLeaves []bool) error { // First check which certificates are already present in the DB. mask, err := conn.CheckCertsExist(ctx, ids) if err != nil { return err } + + // For all those certificates not already present in the DB, prepare three slices: IDs, + // payloads, and parentIDs. payloads := make([][]byte, 0, len(certs)) parentIds := make([]*common.SHA256Output, 0, len(certs)) + // Prepare new parents, IDs and payloads skipping those certificates already in the DB. runWhenFalse(mask, func(to, from int) { if to != from { // probably unnecessary check, as swapping with itself would be okay @@ -278,12 +278,7 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st names[to] = names[from] } payloads = append(payloads, certs[from].Raw) - var parent *common.SHA256Output - if parents[from] != nil { - id := common.SHA256Hash32Bytes(parents[from].Raw) - parent = &id - } - parentIds = append(parentIds, parent) + parentIds = append(parentIds, parentIDs[from]) }) // Trim the end of the original ID slice, as it contains values from the unmasked certificates. @@ -292,24 +287,14 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st // Only update those certificates that are not in the mask. return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads, areLeaves) - -} - -func ComputeCertIDs(certs []*ctx509.Certificate) []*common.SHA256Output { - ids := make([]*common.SHA256Output, len(certs)) - for i, c := range certs { - id := common.SHA256Hash32Bytes(c.Raw) - ids[i] = &id - } - return ids } func insertCerts(ctx context.Context, conn db.Conn, names [][]string, - ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte, + ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte, areLeaves []bool) error { // Send hash, parent hash, expiration and payload to the certs table. - if err := conn.InsertCerts(ctx, ids, parents, expirations, payloads); err != nil { + if err := conn.InsertCerts(ctx, ids, parentIDs, expirations, payloads); err != nil { return fmt.Errorf("inserting certificates: %w", err) } diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 2f44354c..f81158b4 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -28,10 +28,10 @@ func TestUpdateCerts(t *testing.T) { require.NoError(t, err) certs := []*x509.Certificate{} - // load test certs files, err := ioutil.ReadDir("./testdata/certs/") require.NoError(t, err, "ioutil.ReadDir") + for _, file := range files { cert, err := projectCommon.CTX509CertFromFile("./testdata/certs/" + file.Name()) require.NoError(t, err, "projectCommon.CTX509CertFromFile") From e5ad34fb651fadd00e112a27e31f936b95f4d7d3 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 14:52:59 +0100 Subject: [PATCH 059/187] Remove the leaf marker isLeaf areLeaves. It's unnecessary if we use the domain names being nil when the certificate is non-leaf. --- cmd/ingest/certProcessor.go | 11 ++--- cmd/ingest/processor.go | 4 +- pkg/mapserver/updater/certs_updater.go | 42 +++++++++++++++-- pkg/mapserver/updater/certs_updater_test.go | 44 +++++++++++++++--- pkg/mapserver/updater/updater.go | 51 ++++++++------------- 5 files changed, 102 insertions(+), 50 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 9c3b5627..111d1812 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -17,7 +17,7 @@ type CertificateNode struct { CertID *common.SHA256Output Cert *ctx509.Certificate ParentID *common.SHA256Output - IsLeaf bool + Names []string } // CertBatch is an unwrapped collection of Certificate. @@ -28,7 +28,6 @@ type CertBatch struct { Certs []*ctx509.Certificate CertIDs []*common.SHA256Output ParentIDs []*common.SHA256Output - AreLeaves []bool } func NewCertificateBatch() *CertBatch { @@ -38,17 +37,15 @@ func NewCertificateBatch() *CertBatch { Certs: make([]*ctx509.Certificate, 0, BatchSize), CertIDs: make([]*common.SHA256Output, 0, BatchSize), ParentIDs: make([]*common.SHA256Output, 0, BatchSize), - AreLeaves: make([]bool, 0, BatchSize), } } func (b *CertBatch) AddCertificate(c *CertificateNode) { - b.Names = append(b.Names, updater.ExtractCertDomains(c.Cert)) + b.Names = append(b.Names, c.Names) b.Expirations = append(b.Expirations, &c.Cert.NotAfter) b.Certs = append(b.Certs, c.Cert) b.CertIDs = append(b.CertIDs, c.CertID) b.ParentIDs = append(b.ParentIDs, c.ParentID) - b.AreLeaves = append(b.AreLeaves, c.IsLeaf) } func (b *CertBatch) IsFull() bool { @@ -81,7 +78,7 @@ const ( ) type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, - []*ctx509.Certificate, []*common.SHA256Output, []*common.SHA256Output, []bool) error + []*ctx509.Certificate, []*common.SHA256Output, []*common.SHA256Output) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { @@ -241,7 +238,7 @@ func (p *CertificateProcessor) createBatches() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.CertIDs, batch.ParentIDs, batch.AreLeaves) + batch.Certs, batch.CertIDs, batch.ParentIDs) if err != nil { panic(err) } diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index 512fe870..e545b212 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -92,14 +92,14 @@ func (p *Processor) start() { // Process the parsed content into the DB, and from DB into SMT: go func() { for data := range p.certWithChainChan { - certs, certIDs, parentIDs := updater.UnfoldCert(data.Cert, data.CertID, + certs, certIDs, parentIDs, names := updater.UnfoldCert(data.Cert, data.CertID, data.ChainPayloads, data.ChainIDs) for i := range certs { p.nodeChan <- &CertificateNode{ CertID: certIDs[i], Cert: certs[i], ParentID: parentIDs[i], - IsLeaf: i == 0, // Only the first certificate is a leaf. + Names: names[i], } } } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index c6417f44..0fdafbe4 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -148,13 +148,36 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct } // UnfoldCerts takes a slice of certificates and chains with the same length, -// and returns all certificates once, without duplicates, and a pointer to the parent in the +// and returns all certificates once, without duplicates, and the ID of the parent in the // trust chain, or nil if the certificate is root. // The parents returned slice has the same elements as the certificates returned slice. // When a certificate is root, it's corresponding parents entry is nil. -// The leaf certificates are always returned at the head of the slice. +// Additionally, all the names of the leaf certificates are returned in its corresponding position +// in the names slice iff the certificate is a leaf one. If it is not, nil is returned in that +// position instead. +// +// The leaf certificates are always returned at the head of the slice, which means, among others, +// that once a nil value is found in the names slice, the rest of the slice will be nil as well. func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate, -) (certificates []*ctx509.Certificate, certIDs, parentIDs []*common.SHA256Output) { +) ( + certificates []*ctx509.Certificate, + certIDs []*common.SHA256Output, + parentIDs []*common.SHA256Output, + names [][]string, +) { + + // extractNames is the function that extracts the names from a certificate. It starts being + // a regular names extraction, but after processing all leaves it is assigned to a function + // that always returns nil. + extractNames := func(c *ctx509.Certificate) []string { + return ExtractCertDomains(c) + } + // ChangeFcn changes extractNames to always return nil. + changeFcn := func() { + extractNames = func(c *ctx509.Certificate) []string { + return nil + } + } for len(leafCerts) > 0 { var pendingCerts []*ctx509.Certificate @@ -176,7 +199,9 @@ func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate pendingChains = append(pendingChains, chains[i][1:]) } parentIDs = append(parentIDs, parentID) + names = append(names, extractNames(c)) } + changeFcn() // This will change the function `extractNames` to always return nil. leafCerts = pendingCerts chains = pendingChains } @@ -190,16 +215,23 @@ func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate // and any posterior ancestors. func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, -) (certs []*ctx509.Certificate, certIDs, parentIDs []*common.SHA256Output) { +) ( + certs []*ctx509.Certificate, + certIDs []*common.SHA256Output, + parentIDs []*common.SHA256Output, + names [][]string, +) { certs = make([]*ctx509.Certificate, 0, len(chainIDs)+1) certIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) parentIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) + names = make([][]string, 0, len(chainIDs)+1) // Always add the leaf certificate. certs = append(certs, leafCert) certIDs = append(certIDs, certID) parentIDs = append(parentIDs, chainIDs[0]) + names = append(names, ExtractCertDomains(leafCert)) // Add the intermediate certs iff their payload is not nil. i := 0 for ; i < len(chain)-1; i++ { @@ -212,12 +244,14 @@ func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, certs = append(certs, chain[i]) certIDs = append(certIDs, chainIDs[i]) parentIDs = append(parentIDs, chainIDs[i+1]) + names = append(names, nil) } // Add the root certificate (no parent) iff we haven't inserted it yet. if chain[i] != nil { certs = append(certs, chain[i]) certIDs = append(certIDs, chainIDs[i]) parentIDs = append(parentIDs, nil) + names = append(names, nil) } return } diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 46188e0c..162483df 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -7,6 +7,7 @@ import ( "testing" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -162,10 +163,35 @@ func TestUpdateSameCertTwice(t *testing.T) { func TestUnfoldCerts(t *testing.T) { // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. - a := &ctx509.Certificate{Raw: []byte{0}} - b := &ctx509.Certificate{Raw: []byte{1}} - c := &ctx509.Certificate{Raw: []byte{2}} - d := &ctx509.Certificate{Raw: []byte{3}} + a := &ctx509.Certificate{ + Raw: []byte{0}, + Subject: pkix.Name{ + CommonName: "a", + }, + DNSNames: []string{"a", "a", "a.com"}, + } + b := &ctx509.Certificate{ + Raw: []byte{1}, + Subject: pkix.Name{ + CommonName: "b", + }, + DNSNames: []string{"b", "b", "b.com"}, + } + c := &ctx509.Certificate{ + Raw: []byte{1}, + Subject: pkix.Name{ + CommonName: "c", + }, + DNSNames: []string{"c", "c", "c.com"}, + } + d := &ctx509.Certificate{ + Raw: []byte{3}, + Subject: pkix.Name{ + CommonName: "d", + }, + DNSNames: []string{"d", "d", "d.com"}, + } + certs := []*ctx509.Certificate{ a, b, @@ -174,7 +200,7 @@ func TestUnfoldCerts(t *testing.T) { nil, {c, d}, } - allCerts, IDs, parentIDs := UnfoldCerts(certs, chains) + allCerts, IDs, parentIDs, names := UnfoldCerts(certs, chains) fmt.Printf("[%p %p %p %p]\n", a, b, c, d) fmt.Printf("%v\n", allCerts) @@ -203,10 +229,16 @@ func TestUnfoldCerts(t *testing.T) { assert.Equal(t, dID, *IDs[3]) // Check parent IDs. - nilID := (*common.SHA256Output)(nil) assert.Equal(t, nilID, parentIDs[0], "bad parent at 0") assert.Equal(t, cID, *parentIDs[1], "bad parent at 1") assert.Equal(t, dID, *parentIDs[2], "bad parent at 2") assert.Equal(t, nilID, parentIDs[3], "bad parent at 3") + + // Check domain names. + nilNames := ([]string)(nil) + assert.ElementsMatch(t, []string{"a", "a.com"}, names[0]) // root but also a leaf + assert.ElementsMatch(t, []string{"b", "b.com"}, names[1]) // just a leaf + assert.Equal(t, nilNames, names[2]) // not a leaf + assert.Equal(t, nilNames, names[3]) // not a leaf } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 24170234..d5d92d0f 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -70,7 +70,6 @@ func (u *MapUpdater) UpdateNextBatch(ctx context.Context) (int, error) { // UpdateCertsLocally: add certs (in the form of asn.1 encoded byte arrays) directly without querying log func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [][]byte, certChainList [][][]byte) error { - names := make([][]string, 0, len(certList)) // Set of names per certificate expirations := make([]*time.Time, 0, len(certList)) certs := make([]*ctx509.Certificate, 0, len(certList)) certChains := make([][]*ctx509.Certificate, 0, len(certList)) @@ -80,7 +79,6 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ return err } certs = append(certs, cert) - names = append(names, ExtractCertDomains(cert)) expirations = append(expirations, &cert.NotAfter) chain := make([]*ctx509.Certificate, len(certChainList[i])) @@ -92,15 +90,9 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ } certChains = append(certChains, chain) } - certs, IDs, parentIDs := UnfoldCerts(certs, certChains) - areLeaves := make([]bool, 0, len(certs)) - // The leaves are always at the head of the returned slice: just flag all leaves for the - // length of the original certificate list. - for i := range certList { - areLeaves[i] = true - } + certs, IDs, parentIDs, names := UnfoldCerts(certs, certChains) return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, - IDs, parentIDs, areLeaves) + IDs, parentIDs) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -246,19 +238,19 @@ func (mapUpdater *MapUpdater) Close() error { func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parentIDs []*common.SHA256Output, areLeaves []bool) error { + parentIDs []*common.SHA256Output) error { payloads := make([][]byte, len(certs)) for i, c := range certs { payloads[i] = c.Raw } - return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads, areLeaves) + return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parentIDs []*common.SHA256Output, areLeaves []bool) error { + parentIDs []*common.SHA256Output) error { // First check which certificates are already present in the DB. mask, err := conn.CheckCertsExist(ctx, ids) @@ -267,31 +259,28 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st } // For all those certificates not already present in the DB, prepare three slices: IDs, - // payloads, and parentIDs. + // names, payloads, and parentIDs. payloads := make([][]byte, 0, len(certs)) - parentIds := make([]*common.SHA256Output, 0, len(certs)) // Prepare new parents, IDs and payloads skipping those certificates already in the DB. runWhenFalse(mask, func(to, from int) { - if to != from { // probably unnecessary check, as swapping with itself would be okay - ids[to] = ids[from] - names[to] = names[from] - } + ids[to] = ids[from] + names[to] = names[from] + parentIDs[to] = parentIDs[from] payloads = append(payloads, certs[from].Raw) - parentIds = append(parentIds, parentIDs[from]) }) // Trim the end of the original ID slice, as it contains values from the unmasked certificates. ids = ids[:len(payloads)] names = names[:len(payloads)] + parentIDs = parentIDs[:len(payloads)] // Only update those certificates that are not in the mask. - return insertCerts(ctx, conn, names, ids, parentIds, expirations, payloads, areLeaves) + return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } func insertCerts(ctx context.Context, conn db.Conn, names [][]string, - ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte, - areLeaves []bool) error { + ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { // Send hash, parent hash, expiration and payload to the certs table. if err := conn.InsertCerts(ctx, ids, parentIDs, expirations, payloads); err != nil { @@ -304,16 +293,16 @@ func insertCerts(ctx context.Context, conn db.Conn, names [][]string, newIDs := make([]*common.SHA256Output, 0, estimatedSize) domainIDs := make([]*common.SHA256Output, 0, estimatedSize) for i, names := range names { - if areLeaves[i] { - // If the certificate is a leaf certificate, insert one entry per name. - for _, name := range names { - newNames = append(newNames, name) - newIDs = append(newIDs, ids[i]) - domainID := common.SHA256Hash32Bytes([]byte(name)) - domainIDs = append(domainIDs, &domainID) - } + // Iff the certificate is a leaf certificate it will have a non-nil names slice: insert + // one entry per name. + for _, name := range names { + newNames = append(newNames, name) + newIDs = append(newIDs, ids[i]) + domainID := common.SHA256Hash32Bytes([]byte(name)) + domainIDs = append(domainIDs, &domainID) } } + // Push the changes of the domains to the DB. if err := conn.UpdateDomainsWithCerts(ctx, newIDs, domainIDs, newNames); err != nil { return fmt.Errorf("updating domains: %w", err) } From 7d76ccde710b3417e06562a1019eb361c49efafb Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 20:22:04 +0100 Subject: [PATCH 060/187] CoalescePayloadsForDirtyDomains in updater, config has a DBName. There is a helper function in DB to coalesce several IDs. Use it also from the mapserver IT. --- cmd/ingest/coalescePayloads.go | 59 ++------------ pkg/db/conf.go | 1 + pkg/db/db.go | 6 ++ pkg/db/mysql/init.go | 7 +- pkg/db/mysql/mysql.go | 17 ++++ pkg/mapserver/internal/mockdb_for_testing.go | 4 + pkg/mapserver/updater/updater.go | 68 ++++++++++++++++ pkg/util/certificate.go | 27 +++++++ pkg/util/io.go | 80 +++++++++++-------- .../mapserver_benchmark/updater_test.go | 2 +- tests/integration/mapserver/main.go | 67 ++++++++++++---- tests/pkg/db/testDB.go | 13 +++ 12 files changed, 251 insertions(+), 100 deletions(-) create mode 100644 pkg/util/certificate.go diff --git a/cmd/ingest/coalescePayloads.go b/cmd/ingest/coalescePayloads.go index c8e33d06..7cf89353 100644 --- a/cmd/ingest/coalescePayloads.go +++ b/cmd/ingest/coalescePayloads.go @@ -3,63 +3,20 @@ package main import ( "context" "fmt" - "sync" - "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) -func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) { - // Get all dirty domain IDs. - domainIDs, err := conn.UpdatedDomains(ctx) - if err != nil { - panic(err) - } - - // Start NumDBWriters workers. +func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { fmt.Printf("Starting %d workers coalescing payloads for modified domains\n", NumDBWriters) - ch := make(chan []*common.SHA256Output) - wg := sync.WaitGroup{} - wg.Add(NumDBWriters) - for i := 0; i < NumDBWriters; i++ { - go func() { - defer wg.Done() - for ids := range ch { - // We receive ids as a slice of IDs. We ought to build a long slice of bytes - // with all the bytes concatenated. - param := make([]byte, len(ids)*common.SHA256Size) - for i, id := range ids { - copy(param[i*common.SHA256Size:], id[:]) - } - // Now call the stored procedure with this parameter. - str := "CALL calc_several_domain_payloads(?)" - _, err := conn.DB().Exec(str, param) - if err != nil { - panic(fmt.Errorf("error coalescing payload for domains: %w", err)) - } - } - }() - } - - // Split the dirty domain ID list in NumDBWriters - batchSize := len(domainIDs) / NumDBWriters - // First workers handle one more ID than the rest, to take into account also the remainder. - for i := 0; i < len(domainIDs)%NumDBWriters; i++ { - b := domainIDs[i*(batchSize+1) : (i+1)*(batchSize+1)] - ch <- b - } - // The rest of the workers will do a batchSize-sized item. - restOfWorkersCount := NumDBWriters - (len(domainIDs) % NumDBWriters) - domainIDs = domainIDs[(len(domainIDs)%NumDBWriters)*(batchSize+1):] - for i := 0; i < restOfWorkersCount; i++ { - b := domainIDs[i*batchSize : (i+1)*batchSize] - ch <- b + // Use NumDBWriters. + err := updater.CoalescePayloadsForDirtyDomains(ctx, conn, NumDBWriters) + if err != nil { + return err } - // Close the batches channel. - close(ch) - // And wait for all workers to finish. - wg.Wait() - + // Print message if no errors. fmt.Println("Done coalescing.") + return nil } diff --git a/pkg/db/conf.go b/pkg/db/conf.go index 65f076a6..4dbd04eb 100644 --- a/pkg/db/conf.go +++ b/pkg/db/conf.go @@ -5,6 +5,7 @@ const KeyDBName = "DBNAME" // Configuration for the db connection type Configuration struct { Dsn string + DBName string Values map[string]string CheckSchema bool // indicates if opening the connection checks the health of the schema } diff --git a/pkg/db/db.go b/pkg/db/db.go index 3a52b54e..46b04404 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -27,6 +27,12 @@ type Conn interface { LoadRoot(ctx context.Context) (*common.SHA256Output, error) SaveRoot(ctx context.Context, root *common.SHA256Output) error + // CoalesceDomainsPayloads takes some IDs (which should come from the dirty table) and + // retrieves the payloads of all certificates for each domain, represented by each ID. + // With those payloads it writes an entry in domain_payloads and computes the SHA256 of it. + // This is done via a stored procedure, to avoid moving data from DB to server. + CoalesceDomainsPayloads(ctx context.Context, ids []*common.SHA256Output) error + ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // ////////////////////////////////////////////////////////////////// diff --git a/pkg/db/mysql/init.go b/pkg/db/mysql/init.go index 4d69955e..c032c22b 100644 --- a/pkg/db/mysql/init.go +++ b/pkg/db/mysql/init.go @@ -15,7 +15,11 @@ func Connect(config *db.Configuration) (db.Conn, error) { if config == nil { return nil, fmt.Errorf("nil config not allowed") } - config.Dsn = parseDSN(config) + if config.Dsn == "" { + config.Dsn = parseDSN(config) + config.DBName = config.Values[db.KeyDBName] + delete(config.Values, db.KeyDBName) + } db, err := connect(config) if err != nil { @@ -64,7 +68,6 @@ func parseDSN(config *db.Configuration) string { delete(val, keyHost) delete(val, keyPort) delete(val, keyLocalSocket) - delete(val, db.KeyDBName) return dsnString } diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index ea86cb52..d6fa81b6 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -264,6 +264,23 @@ func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs return err } +func (c *mysqlDB) CoalesceDomainsPayloads(ctx context.Context, ids []*common.SHA256Output) error { + + // We receive ids as a slice of IDs. We ought to build a long slice of bytes + // with all the bytes concatenated. + param := make([]byte, len(ids)*common.SHA256Size) + for i, id := range ids { + copy(param[i*common.SHA256Size:], id[:]) + } + // Now call the stored procedure with this parameter. + str := "CALL calc_several_domain_payloads(?)" + _, err := c.db.Exec(str, param) + if err != nil { + return fmt.Errorf("coalescing payload for domains: %w", err) + } + return nil +} + // repeatStmt returns ( (?,..dimensions..,?), ...elemCount... ) // Use it like repeatStmt(1, len(IDs)) to obtain (?,?,...) func repeatStmt(elemCount int, dimensions int) string { diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/mapserver/internal/mockdb_for_testing.go index e2a790c4..c88699b1 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/mapserver/internal/mockdb_for_testing.go @@ -144,3 +144,7 @@ func (d *MockDB) UpdatedDomains(context.Context) ([]*common.SHA256Output, error) } func (*MockDB) CleanupDirty(ctx context.Context) error { return nil } + +func (*MockDB) CoalesceDomainsPayloads(context.Context, []*common.SHA256Output) error { + return nil +} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index d5d92d0f..ddaf4d4c 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -5,6 +5,7 @@ import ( "context" "fmt" "sort" + "sync" "time" _ "github.com/go-sql-driver/mysql" @@ -279,6 +280,73 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } +func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn, numDBWriters int) error { + // Get all dirty domain IDs. + domainIDs, err := conn.UpdatedDomains(ctx) + if err != nil { + return err + } + + // Start numWriters workers. + errCh := make(chan error) + ch := make(chan []*common.SHA256Output) + wg := sync.WaitGroup{} + wg.Add(numDBWriters) + for i := 0; i < numDBWriters; i++ { + go func() { + defer wg.Done() + for ids := range ch { + err := conn.CoalesceDomainsPayloads(ctx, ids) + if err != nil { + errCh <- err + return + } + } + errCh <- nil + }() + } + + // Split the dirty domain ID list in numWriters + batchSize := len(domainIDs) / numDBWriters + // First workers handle one more ID than the rest, to take into account also the remainder. + for i := 0; i < len(domainIDs)%numDBWriters; i++ { + b := domainIDs[i*(batchSize+1) : (i+1)*(batchSize+1)] + ch <- b + } + // The rest of the workers will do a batchSize-sized item. + restOfWorkersCount := numDBWriters - (len(domainIDs) % numDBWriters) + domainIDs = domainIDs[(len(domainIDs)%numDBWriters)*(batchSize+1):] + for i := 0; i < restOfWorkersCount; i++ { + b := domainIDs[i*batchSize : (i+1)*batchSize] + ch <- b + } + + // Close the batches channel. + close(ch) + + var errs []error + go func() { + // Absorb any errors encountered. + for err := range errCh { + if err != nil { + errs = append(errs, err) + } + } + }() + + // And wait for all workers to finish. + wg.Wait() + + // Any errors? + close(errCh) + if len(errs) > 0 { + // There have been errors. Just return the first one. + return fmt.Errorf("encountered %d errors, first one is: %w", len(errs), errs[0]) + } + + return nil +} + func insertCerts(ctx context.Context, conn db.Conn, names [][]string, ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { diff --git a/pkg/util/certificate.go b/pkg/util/certificate.go new file mode 100644 index 00000000..115ba8c3 --- /dev/null +++ b/pkg/util/certificate.go @@ -0,0 +1,27 @@ +package util + +import ( + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" +) + +// ExtractNames returns a list of lists of names. Since each certificate contains several names, +// the function returns a collection of slices of names, extracted from each certificate's SAN. +func ExtractNames(certs []*ctx509.Certificate) [][]string { + names := make([][]string, len(certs)) + for i, c := range certs { + names[i] = updater.ExtractCertDomains(c) + } + return names +} + +// ExtractExpirations simply returns all expiration times in order. +func ExtractExpirations(certs []*ctx509.Certificate) []*time.Time { + expirations := make([]*time.Time, len(certs)) + for i, c := range certs { + expirations[i] = &c.NotAfter + } + return expirations +} diff --git a/pkg/util/io.go b/pkg/util/io.go index 04a24cac..ebfdf4e7 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -12,6 +12,7 @@ import ( ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) const ( @@ -19,7 +20,7 @@ const ( CertChainColumn = 4 ) -func Gunzip(filename string) ([]byte, error) { +func ReadAllGzippedFile(filename string) ([]byte, error) { f, err := os.Open(filename) if err != nil { return nil, err @@ -43,11 +44,11 @@ func Gunzip(filename string) ([]byte, error) { return raw, err } -func LoadCertsFromPEM(raw []byte) ([]*ctx509.Certificate, error) { +func LoadCertsFromPEM(buff []byte) ([]*ctx509.Certificate, error) { certs := make([]*ctx509.Certificate, 0) - for len(raw) > 0 { + for len(buff) > 0 { var block *pem.Block - block, raw = pem.Decode(raw) + block, buff = pem.Decode(buff) if block.Type != "CERTIFICATE" { continue } @@ -61,57 +62,72 @@ func LoadCertsFromPEM(raw []byte) ([]*ctx509.Certificate, error) { return certs, nil } -// LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of IDs and payloads for -// each certificate and its ancestry. -// -// a slice containing N elements, which represent the certificate -// chain from the leaf to the root certificate. -func LoadCertsAndChainsFromCSV(raw []byte) ([]*ctx509.Certificate, error) { - r := bytes.NewReader(raw) +// LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of the leaf certificate +// payload, its ID, its parent ID, and its names, for each certificate and its ancestry chain. +// The returned names contains nil unless the corresponding certificate is a leaf certificate. +func LoadCertsAndChainsFromCSV( + fileContents []byte, +) (payloads []*ctx509.Certificate, + IDs []*common.SHA256Output, + parentIDs []*common.SHA256Output, + names [][]string, + errRet error, +) { + + r := bytes.NewReader(fileContents) reader := csv.NewReader(r) reader.FieldsPerRecord = -1 // don't check number of fields records, err := reader.ReadAll() if err != nil { - return nil, err + errRet = err + return } + leafs := make([]*ctx509.Certificate, 0, len(payloads)) + chains := make([][]*ctx509.Certificate, 0, len(payloads)) for _, fields := range records { if len(fields) == 0 { continue } - // Parse the certificate. - rawBytes, err := base64.StdEncoding.DecodeString(fields[CertificateColumn]) - if err != nil { - return nil, err - } - certID := common.SHA256Hash32Bytes(rawBytes) - cert, err := ctx509.ParseCertificate(rawBytes) + cert, err := ParseCertFromCSVField(fields[CertificateColumn]) if err != nil { - return nil, err + errRet = err + return } + leafs = append(leafs, cert) // Parse the chain. // The certificate chain is a list of base64 strings separated by semicolon (;). strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) - chainIDs := make([]*common.SHA256Output, len(strs)) for i, s := range strs { - rawBytes, err = base64.StdEncoding.DecodeString(s) - if err != nil { - return nil, err - } - chain[i], err = ctx509.ParseCertificate(rawBytes) + chain[i], err = ParseCertFromCSVField(s) if err != nil { - return nil, err + errRet = err + return } } + chains = append(chains, chain) + } - _ = certID - _ = cert - _ = chainIDs + // Unfold the received certificates. + payloads, IDs, parentIDs, names = updater.UnfoldCerts(leafs, chains) + return +} +// ParseCertFromCSVField takes a row from a CSV encoding certs and chains in base64 and returns +// the CT x509 Certificate or error. +func ParseCertFromCSVField(field string) (*ctx509.Certificate, error) { + // Base64 to raw bytes. + rawBytes, err := base64.StdEncoding.DecodeString(field) + if err != nil { + return nil, err } - - return nil, nil + // Parse the certificate. + cert, err := ctx509.ParseCertificate(rawBytes) + if err != nil { + return nil, err + } + return cert, nil } diff --git a/tests/benchmark/mapserver_benchmark/updater_test.go b/tests/benchmark/mapserver_benchmark/updater_test.go index 9548528f..b5b257f1 100644 --- a/tests/benchmark/mapserver_benchmark/updater_test.go +++ b/tests/benchmark/mapserver_benchmark/updater_test.go @@ -126,7 +126,7 @@ func TestDoUpdatesFromTestDataCerts(t *testing.T) { swapBack := swapDBs(t) defer swapBack() fmt.Println("Loading certs ...") - raw, err := util.Gunzip("../../testdata/certs.pem.gz") + raw, err := util.ReadAllGzippedFile("../../testdata/certs.pem.gz") require.NoError(t, err) certs, err := util.LoadCertsFromPEM(raw) require.NoError(t, err) diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 5f609e7e..19626a91 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -2,7 +2,6 @@ package main import ( "context" - "encoding/hex" "fmt" "os" "time" @@ -10,10 +9,16 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/util" testdb "github.com/netsec-ethz/fpki/tests/pkg/db" ) +const ( + BatchSize = 1000 + DBName = "mapServerIT" +) + func main() { os.Exit(mainFunc()) } @@ -23,18 +28,41 @@ func mainFunc() int { defer cancelF() // Create an empty test DB - dbName := "mapServerIT" - err := testdb.CreateTestDB(ctx, dbName) + err := testdb.CreateTestDB(ctx, DBName) + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(DBName)) panicIfError(err) defer func() { - // TODO(juagargi) destroy the DB with + err := testdb.RemoveTestDB(ctx, *config) + panicIfError(err) }() + // Test connect several times. + conn, err := mysql.Connect(config) + panicIfError(err) + panicIfError(conn.Close()) + conn, err = mysql.Connect(config) + panicIfError(err) + panicIfError(conn.Close()) + + // Ingest data. + ingestData(ctx, config) + + // Get some proofs. + retrieveSomeProofs(ctx, config) + + // Compare expected results + return 0 +} + +func ingestData(ctx context.Context, config *db.Configuration) { // Connect to the test DB - // config := db.NewConfig(mysql.WithDefaults(), mysql.WithEnvironment(), db.WithDB("mapserverIT")) - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) conn, err := mysql.Connect(config) panicIfError(err) + defer func() { + fmt.Println("deleteme closing") + err := conn.Close() + panicIfError(err) + }() // Ingest the testdata. raw, err := util.ReadAllGzippedFile("./tests/testdata/2-xenon2023.csv.gz") @@ -42,15 +70,29 @@ func mainFunc() int { payloads, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) panicIfError(err) - certs, err := util.LoadCertsFromPEM(raw) + // Insert the certificates into the test DB in batches. + expirations := util.ExtractExpirations(payloads) + for i := 0; i < (len(names) / BatchSize); i++ { + b := i * BatchSize // begin + e := (i + 1) * BatchSize // end + err = updater.UpdateCertsWithKeepExisting(ctx, conn, names[b:e], expirations[b:e], + payloads[b:e], IDs[b:e], parentIDs[b:e]) + panicIfError(err) + } + // Remainder of the certificates + b := (len(names) / BatchSize) * BatchSize + err = updater.UpdateCertsWithKeepExisting(ctx, conn, names[b:], expirations[b:], + payloads[b:], IDs[b:], parentIDs[b:]) panicIfError(err) - _ = certs - root, err := conn.LoadRoot(ctx) + err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 2) panicIfError(err) - fmt.Printf("root is %s\n", hex.EncodeToString((*root)[:])) +} - // Ingest mock data +func retrieveSomeProofs(ctx context.Context, config *db.Configuration) { + // Connect to the test DB + conn, err := mysql.Connect(config) + panicIfError(err) // Retrieve some domains res, err := responder.NewMapResponder(ctx, "./config/mapserver_config.json", conn) @@ -58,9 +100,6 @@ func mainFunc() int { p, err := res.GetProof(ctx, "aname.com") panicIfError(err) _ = p - - // Compare results - return 0 } func panicIfError(err error) { diff --git a/tests/pkg/db/testDB.go b/tests/pkg/db/testDB.go index fd238f41..c2139d70 100644 --- a/tests/pkg/db/testDB.go +++ b/tests/pkg/db/testDB.go @@ -27,6 +27,19 @@ func CreateTestDB(ctx context.Context, dbName string) error { return nil } +func RemoveTestDB(ctx context.Context, config db.Configuration) error { + conn, err := Connect(&config) + if err != nil { + return fmt.Errorf("connecting to test DB: %w", err) + } + str := fmt.Sprintf("DROP DATABASE IF EXISTS %s", config.DBName) + _, err = conn.DB().ExecContext(ctx, str) + if err != nil { + return fmt.Errorf("removing the database: %w", err) + } + return nil +} + func Connect(config *db.Configuration) (db.Conn, error) { return mysql.Connect(config) } From 9a0fe65b7700a3880f47024a3e8d5a204aa0f9e9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 20:32:35 +0100 Subject: [PATCH 061/187] Minor changes to mapserver IT. --- cmd/ingest/smtUpdater.go | 2 +- pkg/mapserver/responder/responder.go | 8 +++++--- tests/integration/mapserver/main.go | 7 ++++--- 3 files changed, 10 insertions(+), 7 deletions(-) diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go index 737b933a..5f0d0c79 100644 --- a/cmd/ingest/smtUpdater.go +++ b/cmd/ingest/smtUpdater.go @@ -21,7 +21,7 @@ type SMTUpdater struct { func NewSMTUpdater(conn db.Conn, root *common.SHA256Output, cacheHeight int) *SMTUpdater { var rootSlice []byte if root != nil { - rootSlice = (*root)[:] + rootSlice = root[:] } smtTrie, err := trie.NewTrie(rootSlice, common.SHA256Hash, conn) if err != nil { diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 3bb36d27..16172005 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -19,13 +19,15 @@ type MapResponder struct { func NewMapResponder(ctx context.Context, configFile string, conn db.Conn) (*MapResponder, error) { // Load root. - root, err := conn.LoadRoot(ctx) - if err != nil { + var root []byte + if rootID, err := conn.LoadRoot(ctx); err != nil { return nil, err + } else if root != nil { + root = rootID[:] } // Build the Sparse Merkle Tree (SMT). - smt, err := trie.NewTrie(root[:], common.SHA256Hash, conn) + smt, err := trie.NewTrie(root, common.SHA256Hash, conn) if err != nil { return nil, fmt.Errorf("error loading SMT: %w", err) } diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 19626a91..35f627a6 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -2,7 +2,6 @@ package main import ( "context" - "fmt" "os" "time" @@ -59,7 +58,6 @@ func ingestData(ctx context.Context, config *db.Configuration) { conn, err := mysql.Connect(config) panicIfError(err) defer func() { - fmt.Println("deleteme closing") err := conn.Close() panicIfError(err) }() @@ -95,7 +93,10 @@ func retrieveSomeProofs(ctx context.Context, config *db.Configuration) { panicIfError(err) // Retrieve some domains - res, err := responder.NewMapResponder(ctx, "./config/mapserver_config.json", conn) + res, err := responder.NewMapResponder( + ctx, + "./tests/integration/mapserver/config/mapserver_config.json", + conn) panicIfError(err) p, err := res.GetProof(ctx, "aname.com") panicIfError(err) From 17eff743232daa605fee7d83fd04d2f434c9be82 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 20:44:38 +0100 Subject: [PATCH 062/187] Simplify SMT updater. --- cmd/ingest/main.go | 3 +- cmd/ingest/smtUpdater.go | 78 +++++++++++----------------------------- 2 files changed, 22 insertions(+), 59 deletions(-) diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index 0bdba9ce..fb51f416 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -134,8 +134,7 @@ func mainFunction() int { // Now start processing the changed domains into the SMT: smtProcessor := NewSMTUpdater(conn, root, 32) - smtProcessor.Start(ctx) - err = smtProcessor.Wait() + err = smtProcessor.Update(ctx) exitIfError(err) // Cleanup dirty entries. diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go index 5f0d0c79..2e62c041 100644 --- a/cmd/ingest/smtUpdater.go +++ b/cmd/ingest/smtUpdater.go @@ -13,9 +13,6 @@ import ( type SMTUpdater struct { conn db.Conn smtTrie *trie.Trie - - errorCh chan error - doneCh chan error // Will have just one entry when all the processing is done } func NewSMTUpdater(conn db.Conn, root *common.SHA256Output, cacheHeight int) *SMTUpdater { @@ -31,83 +28,50 @@ func NewSMTUpdater(conn db.Conn, root *common.SHA256Output, cacheHeight int) *SM return &SMTUpdater{ conn: conn, smtTrie: smtTrie, - errorCh: make(chan error), - doneCh: make(chan error), } } -func (u *SMTUpdater) Start(ctx context.Context) { +func (u *SMTUpdater) Update(ctx context.Context) error { fmt.Println("Starting SMT updater") - // Start processing the error channel. - go u.processErrorChannel() - - // Read batches of updated nodes from `updates`: - go func() { - // This is the last and only processing function. After it finishes, there is nothing - // else to process, close error channel on exiting. - defer close(u.errorCh) - - domains, err := u.conn.UpdatedDomains(ctx) - if err != nil { - u.errorCh <- err - return - } - u.processBatch(ctx, domains) - - // Save root value: - err = u.conn.SaveRoot(ctx, (*common.SHA256Output)(u.smtTrie.Root)) - if err != nil { - u.errorCh <- err - return - } - fmt.Println("Done SMT updater.") - }() -} - -func (u *SMTUpdater) Wait() error { - return <-u.doneCh -} - -func (u *SMTUpdater) processErrorChannel() { - var withErrors bool - for err := range u.errorCh { - if err != nil { - withErrors = true - fmt.Printf("SMT update, error: %s\n", err) - } + domains, err := u.conn.UpdatedDomains(ctx) + if err != nil { + return err + } + err = u.processBatch(ctx, domains) + if err != nil { + return err } - if withErrors { - u.doneCh <- fmt.Errorf("errors found") - } else { - u.doneCh <- nil + + // Save root value: + err = u.conn.SaveRoot(ctx, (*common.SHA256Output)(u.smtTrie.Root)) + if err != nil { + return err } - close(u.doneCh) + fmt.Println("Done SMT updater.") + return nil } -func (u *SMTUpdater) processBatch(ctx context.Context, batch []*common.SHA256Output) { +func (u *SMTUpdater) processBatch(ctx context.Context, batch []*common.SHA256Output) error { // Read those certificates: entries, err := u.conn.RetrieveDomainEntries(ctx, batch) if err != nil { - u.errorCh <- err - return + return err } keys, values, err := updater.KeyValuePairToSMTInput(entries) if err != nil { - u.errorCh <- err - return + return err } // Update the tree. _, err = u.smtTrie.Update(context.Background(), keys, values) if err != nil { - u.errorCh <- err - return + return err } // And update the tree in the DB. err = u.smtTrie.Commit(context.Background()) if err != nil { - u.errorCh <- err - return + return err } + return nil } From 7af1d202f1cfe552901f989ed46a901dba2c8996 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 20:57:29 +0100 Subject: [PATCH 063/187] SMT updater partially in updater pkg. --- cmd/ingest/smtUpdater.go | 26 +------------------------- pkg/mapserver/updater/updater.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 31 insertions(+), 25 deletions(-) diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go index 2e62c041..162e3165 100644 --- a/cmd/ingest/smtUpdater.go +++ b/cmd/ingest/smtUpdater.go @@ -38,7 +38,7 @@ func (u *SMTUpdater) Update(ctx context.Context) error { if err != nil { return err } - err = u.processBatch(ctx, domains) + err = updater.UpdateSMTfromDomains(ctx, u.conn, u.smtTrie, domains) if err != nil { return err } @@ -51,27 +51,3 @@ func (u *SMTUpdater) Update(ctx context.Context) error { fmt.Println("Done SMT updater.") return nil } - -func (u *SMTUpdater) processBatch(ctx context.Context, batch []*common.SHA256Output) error { - // Read those certificates: - entries, err := u.conn.RetrieveDomainEntries(ctx, batch) - if err != nil { - return err - } - keys, values, err := updater.KeyValuePairToSMTInput(entries) - if err != nil { - return err - } - - // Update the tree. - _, err = u.smtTrie.Update(context.Background(), keys, values) - if err != nil { - return err - } - // And update the tree in the DB. - err = u.smtTrie.Commit(context.Background()) - if err != nil { - return err - } - return nil -} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index ddaf4d4c..a238436d 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -347,6 +347,36 @@ func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn, numDBWri return nil } +func UpdateSMTfromDomains( + ctx context.Context, + conn db.Conn, + smtTrie *trie.Trie, + domainIDs []*common.SHA256Output, +) error { + + // Read those certificates: + entries, err := conn.RetrieveDomainEntries(ctx, domainIDs) + if err != nil { + return err + } + keys, values, err := KeyValuePairToSMTInput(entries) + if err != nil { + return err + } + + // Update the tree. + _, err = smtTrie.Update(context.Background(), keys, values) + if err != nil { + return err + } + // And update the tree in the DB. + err = smtTrie.Commit(context.Background()) + if err != nil { + return err + } + return nil +} + func insertCerts(ctx context.Context, conn db.Conn, names [][]string, ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { From 0304dbd6c2ea8b2ca466af3ea87bfd1100203626 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 21:14:43 +0100 Subject: [PATCH 064/187] Moved SMT update to updater pkg. --- cmd/ingest/main.go | 8 +++-- cmd/ingest/smtUpdater.go | 53 -------------------------------- pkg/mapserver/updater/updater.go | 38 +++++++++++++++++++++++ 3 files changed, 43 insertions(+), 56 deletions(-) delete mode 100644 cmd/ingest/smtUpdater.go diff --git a/cmd/ingest/main.go b/cmd/ingest/main.go index fb51f416..d8edc9d6 100644 --- a/cmd/ingest/main.go +++ b/cmd/ingest/main.go @@ -13,6 +13,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) const ( @@ -132,10 +133,11 @@ func mainFunction() int { // Coalesce the payloads of all modified domains. CoalescePayloadsForDirtyDomains(ctx, conn) - // Now start processing the changed domains into the SMT: - smtProcessor := NewSMTUpdater(conn, root, 32) - err = smtProcessor.Update(ctx) + // Now update the SMT Trie with the changed domains: + fmt.Println("Starting SMT update ...") + err = updater.UpdateSMT(ctx, conn, 32) exitIfError(err) + fmt.Println("Done SMT update.") // Cleanup dirty entries. err = conn.CleanupDirty(ctx) diff --git a/cmd/ingest/smtUpdater.go b/cmd/ingest/smtUpdater.go deleted file mode 100644 index 162e3165..00000000 --- a/cmd/ingest/smtUpdater.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "context" - "fmt" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" -) - -type SMTUpdater struct { - conn db.Conn - smtTrie *trie.Trie -} - -func NewSMTUpdater(conn db.Conn, root *common.SHA256Output, cacheHeight int) *SMTUpdater { - var rootSlice []byte - if root != nil { - rootSlice = root[:] - } - smtTrie, err := trie.NewTrie(rootSlice, common.SHA256Hash, conn) - if err != nil { - panic(err) - } - smtTrie.CacheHeightLimit = cacheHeight - return &SMTUpdater{ - conn: conn, - smtTrie: smtTrie, - } -} - -func (u *SMTUpdater) Update(ctx context.Context) error { - fmt.Println("Starting SMT updater") - - domains, err := u.conn.UpdatedDomains(ctx) - if err != nil { - return err - } - err = updater.UpdateSMTfromDomains(ctx, u.conn, u.smtTrie, domains) - if err != nil { - return err - } - - // Save root value: - err = u.conn.SaveRoot(ctx, (*common.SHA256Output)(u.smtTrie.Root)) - if err != nil { - return err - } - fmt.Println("Done SMT updater.") - return nil -} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index a238436d..4d0acff4 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -377,6 +377,44 @@ func UpdateSMTfromDomains( return nil } +// UpdateSMT reads all the dirty domains (pending to update their contents in the SMT), creates +// a SMT Trie, loads it, and updates its entries with the new values. +// It finally commits the Trie and saves its root in the DB. +func UpdateSMT(ctx context.Context, conn db.Conn, cacheHeight int) error { + // Load root. + var root []byte + if rootID, err := conn.LoadRoot(ctx); err != nil { + return err + } else if rootID != nil { + root = rootID[:] + } + + // Load SMT. + smtTrie, err := trie.NewTrie(root, common.SHA256Hash, conn) + if err != nil { + panic(err) + } + smtTrie.CacheHeightLimit = cacheHeight + + // Get the dirty domains. + domains, err := conn.UpdatedDomains(ctx) + if err != nil { + return err + } + err = UpdateSMTfromDomains(ctx, conn, smtTrie, domains) + if err != nil { + return err + } + + // Save root value: + err = conn.SaveRoot(ctx, (*common.SHA256Output)(smtTrie.Root)) + if err != nil { + return err + } + + return nil +} + func insertCerts(ctx context.Context, conn db.Conn, names [][]string, ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { From a816b6a7e6cddeb94bf67d767a9d5bb91b4e183f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 22 Mar 2023 21:18:34 +0100 Subject: [PATCH 065/187] mapserver IT updates SMT. --- tests/integration/mapserver/main.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 35f627a6..b00e9267 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -2,6 +2,7 @@ package main import ( "context" + "fmt" "os" "time" @@ -34,6 +35,7 @@ func mainFunc() int { err := testdb.RemoveTestDB(ctx, *config) panicIfError(err) }() + fmt.Printf("created DB %s.\n", DBName) // Test connect several times. conn, err := mysql.Connect(config) @@ -42,12 +44,15 @@ func mainFunc() int { conn, err = mysql.Connect(config) panicIfError(err) panicIfError(conn.Close()) + fmt.Println("done testing the DB connection.") // Ingest data. ingestData(ctx, config) + fmt.Println("done ingesting test data.") // Get some proofs. retrieveSomeProofs(ctx, config) + fmt.Println("done loading a responder.") // Compare expected results return 0 @@ -83,8 +88,13 @@ func ingestData(ctx context.Context, config *db.Configuration) { payloads[b:], IDs[b:], parentIDs[b:]) panicIfError(err) + // Build the domain_payloads entries from dirty. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 2) panicIfError(err) + + // Do the SMT update. + err = updater.UpdateSMT(ctx, conn, 32) + panicIfError(err) } func retrieveSomeProofs(ctx context.Context, config *db.Configuration) { From a06815b55c35eae8547fe2eaeb7c0935b5705ef7 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 23 Mar 2023 21:01:43 +0100 Subject: [PATCH 066/187] Create pkg/tests and move the tests/pkg/db there. Fix RetrieveDomainEntry. Finish the GetProof function. Fix retrieveAffectedDomainsFromDB. Extend the mapserver IT. --- pkg/db/mysql/read.go | 15 +- pkg/mapserver/responder/old_responder_test.go | 6 +- pkg/mapserver/responder/responder.go | 7 +- pkg/mapserver/trie/trie.go | 4 +- pkg/mapserver/trie/trie_cache.go | 11 +- pkg/mapserver/trie/trie_test.go | 241 ++++++++---------- pkg/mapserver/updater/dbutil.go | 13 +- pkg/mapserver/updater/updater.go | 1 + pkg/mapserver/updater/updater_test.go | 16 +- .../internal => tests}/mockdb_for_testing.go | 2 +- {tests/pkg/db => pkg/tests}/testDB.go | 2 +- {tests/pkg/db => pkg/tests}/test_utils.go | 2 +- pkg/util/proof.go | 61 +++++ tests/integration/mapserver/main.go | 122 ++++++--- 14 files changed, 295 insertions(+), 208 deletions(-) rename pkg/{mapserver/internal => tests}/mockdb_for_testing.go (99%) rename {tests/pkg/db => pkg/tests}/testDB.go (98%) rename {tests/pkg/db => pkg/tests}/test_utils.go (98%) create mode 100644 pkg/util/proof.go diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index 7d712181..d4f59a8f 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -46,16 +46,13 @@ func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Outp func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( []byte, error) { - keyValuePair, err := retrieveValue(ctx, c.prepGetValueDomainEntries, key) - if err != nil { - if err != sql.ErrNoRows { - return nil, fmt.Errorf("RetrieveDomainEntry | %w", err) - } else { - // return sql.ErrNoRows - return nil, err - } + str := "SELECT payload FROM domain_payloads WHERE id = ?" + var payload []byte + err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&payload) + if err != nil && err != sql.ErrNoRows { + return nil, fmt.Errorf("RetrieveDomainEntry | %w", err) } - return keyValuePair, nil + return payload, nil } // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index c2029b8b..951d7fe7 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -10,11 +10,11 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/internal" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/stretchr/testify/require" ) @@ -48,7 +48,7 @@ func TestGetProof(t *testing.T) { } func TestResponderWithPoP(t *testing.T) { - db.TruncateAllTablesForTest(t) + tests.TruncateAllTablesForTest(t) mapUpdater, err := updater.NewMapUpdater(nil, 233) require.NoError(t, err) @@ -154,7 +154,7 @@ func getUpdatedUpdater(t require.TestingT, certs []*x509.Certificate) (db.Conn, ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - conn := internal.NewMockDB() + conn := tests.NewMockDB() smt, err := trie.NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.CacheHeightLimit = 233 diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 16172005..b2a3f6be 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -64,7 +64,10 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, proofType := mapCommon.PoA if isPoP { proofType = mapCommon.PoP - _ = payload + payload, err = r.conn.RetrieveDomainEntry(ctx, hash) + if err != nil { + return nil, fmt.Errorf("error obtaining payload for %s: %w", domainPart, err) + } } proofList[i] = &mapCommon.MapServerResponse{ @@ -77,7 +80,7 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, ProofValue: proofValue, }, DomainEntryBytes: payload, - // TreeHeadSig: , + // TreeHeadSig: , TODO(juagargi) } } return proofList, nil diff --git a/pkg/mapserver/trie/trie.go b/pkg/mapserver/trie/trie.go index 15c1378f..23023fcf 100644 --- a/pkg/mapserver/trie/trie.go +++ b/pkg/mapserver/trie/trie.go @@ -10,8 +10,6 @@ import ( "context" "fmt" "sync" - - "github.com/netsec-ethz/fpki/pkg/db" ) // Trie is a modified sparse Merkle tree. @@ -49,7 +47,7 @@ type Trie struct { } // NewSMT creates a new SMT given a keySize and a hash function. -func NewTrie(root []byte, hash func(data ...[]byte) []byte, store db.Conn) (*Trie, error) { +func NewTrie(root []byte, hash func(data ...[]byte) []byte, store DBConn) (*Trie, error) { s := &Trie{ hash: hash, TrieHeight: len(hash([]byte("height"))) * 8, // hash any string to get output length diff --git a/pkg/mapserver/trie/trie_cache.go b/pkg/mapserver/trie/trie_cache.go index e1f14c6b..ff012ee2 100644 --- a/pkg/mapserver/trie/trie_cache.go +++ b/pkg/mapserver/trie/trie_cache.go @@ -29,7 +29,7 @@ type CacheDB struct { wholeCacheDBLock sync.RWMutex // dbConn is the conn to mysql db - Store db.Conn + Store DBConn readLimiter chan struct{} // nodes to be removed from db @@ -37,8 +37,15 @@ type CacheDB struct { removeMux sync.RWMutex } +type DBConn interface { + Close() error + RetrieveTreeNode(ctx context.Context, key common.SHA256Output) ([]byte, error) + UpdateTreeNodes(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) + DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) +} + // NewCacheDB: return a cached db -func NewCacheDB(store db.Conn) (*CacheDB, error) { +func NewCacheDB(store DBConn) (*CacheDB, error) { return &CacheDB{ liveCache: make(map[Hash][][]byte), updatedNodes: make(map[Hash][][]byte), diff --git a/pkg/mapserver/trie/trie_test.go b/pkg/mapserver/trie/trie_test.go index 1093121b..cb09efe2 100644 --- a/pkg/mapserver/trie/trie_test.go +++ b/pkg/mapserver/trie/trie_test.go @@ -6,43 +6,42 @@ package trie import ( - "bytes" "context" + "fmt" "math/rand" "sort" "testing" "time" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/internal" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/stretchr/testify/require" ) // TestTrieEmpty: test empty SMT func TestTrieEmpty(t *testing.T) { - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) - if len(smt.Root) != 0 { - t.Fatal("empty trie root hash not correct") - } + require.Empty(t, smt.Root) } // TestTrieUpdateAndGet: Update leaves and get leaves func TestTrieUpdateAndGet(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) smt.atomicUpdate = false // Add data to empty trie - keys := getFreshData(10, 32) - values := getFreshData(10, 32) + keys := getRandomData(t, 10) + values := getRandomData(t, 10) ch := make(chan mResult, 1) smt.update(ctx, smt.Root, keys, values, nil, 0, smt.TrieHeight, ch) res := <-ch @@ -51,77 +50,67 @@ func TestTrieUpdateAndGet(t *testing.T) { // Check all keys have been stored for i, key := range keys { value, _ := smt.get(ctx, root, key, nil, 0, smt.TrieHeight) - if !bytes.Equal(values[i], value) { - t.Fatal("value not updated") - } + require.Equal(t, values[i], value) } // Add another new leaves - newKeys := getFreshData(5, 32) - newValues := getFreshData(5, 32) + newKeys := getRandomData(t, 5) + newValues := getRandomData(t, 5) ch = make(chan mResult, 1) smt.update(ctx, root, newKeys, newValues, nil, 0, smt.TrieHeight, ch) res = <-ch newRoot := res.update - if bytes.Equal(root, newRoot) { - t.Fatal("trie not updated") - } + require.NotEqual(t, root, newRoot) for i, newKey := range newKeys { newValue, _ := smt.get(ctx, newRoot, newKey, nil, 0, smt.TrieHeight) - if !bytes.Equal(newValues[i], newValue) { - t.Fatal("failed to get value") - } + require.Equal(t, newValues[i], newValue) } } // TestTrieAtomicUpdate: test AtomicUpdate() func TestTrieAtomicUpdate(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) smt.CacheHeightLimit = 0 - keys := getFreshData(1, 32) - values := getFreshData(1, 32) + keys := getRandomData(t, 1) + values := getRandomData(t, 1) root, _ := smt.AtomicUpdate(ctx, keys, values) updatedNb := len(smt.db.updatedNodes) cacheNb := len(smt.db.liveCache) - newValues := getFreshData(1, 32) + newValues := getRandomData(t, 1) smt.AtomicUpdate(ctx, keys, newValues) - if len(smt.db.updatedNodes) != 2*updatedNb { - t.Fatal("Atomic update doesn't store all tries") - } - if len(smt.db.liveCache) != cacheNb { - t.Fatal("Cache size should remain the same") - } + require.Len(t, smt.db.updatedNodes, 2*updatedNb) + require.Len(t, smt.db.liveCache, cacheNb) // check keys of previous atomic update are accessible in // updated nodes with root. smt.atomicUpdate = false for i, key := range keys { value, _ := smt.get(ctx, root, key, nil, 0, smt.TrieHeight) - if !bytes.Equal(values[i], value) { - t.Fatal("failed to get value") - } + require.Equal(t, values[i], value) } } // TestTriePublicUpdateAndGet: test Update() and verify the memory func TestTriePublicUpdateAndGet(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) smt.CacheHeightLimit = 0 // Add data to empty trie - keys := getFreshData(20, 32) - values := getFreshData(20, 32) + keys := getRandomData(t, 20) + values := getRandomData(t, 20) root, _ := smt.Update(ctx, keys, values) updatedNb := len(smt.db.updatedNodes) cacheNb := len(smt.db.liveCache) @@ -129,148 +118,130 @@ func TestTriePublicUpdateAndGet(t *testing.T) { // Check all keys have been stored for i, key := range keys { value, _ := smt.Get(ctx, key) - if !bytes.Equal(values[i], value) { - t.Fatal("trie not updated") - } - } - if !bytes.Equal(root, smt.Root) { - t.Fatal("Root not stored") + require.Equal(t, values[i], value) } + require.Equal(t, root, smt.Root) - newValues := getFreshData(20, 32) + newValues := getRandomData(t, 20) smt.Update(ctx, keys, newValues) - if len(smt.db.updatedNodes) != updatedNb { - t.Fatal("multiple updates don't actualize updated nodes") - } - if len(smt.db.liveCache) != cacheNb { - t.Fatal("multiple updates don't actualize liveCache") - } + require.Len(t, smt.db.updatedNodes, updatedNb) + require.Len(t, smt.db.liveCache, cacheNb) // Check all keys have been modified for i, key := range keys { value, _ := smt.Get(ctx, key) - if !bytes.Equal(newValues[i], value) { - t.Fatal("trie not updated") - } + require.Equal(t, newValues[i], value) } } // TestTrieUpdateAndDelete: test updating and deleting at the same time func TestTrieUpdateAndDelete(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) smt.CacheHeightLimit = 0 - key0 := make([]byte, 32, 32) - values := getFreshData(1, 32) + key0 := make([]byte, 32) + values := getRandomData(t, 1) root, _ := smt.Update(ctx, [][]byte{key0}, values) cacheNb := len(smt.db.liveCache) updatedNb := len(smt.db.updatedNodes) smt.atomicUpdate = false _, _, k, v, isShortcut, _ := smt.loadChildren(ctx, root, smt.TrieHeight, 0, nil) - if !isShortcut || !bytes.Equal(k[:HashLength], key0) || !bytes.Equal(v[:HashLength], values[0]) { - t.Fatal("leaf shortcut didn't move up to root") - } + require.True(t, isShortcut) + require.Equal(t, key0, k[:HashLength]) + require.Equal(t, values[0], v[:HashLength]) - key1 := make([]byte, 32, 32) + key1 := make([]byte, 32) // set the last bit bitSet(key1, 255) keys := [][]byte{key0, key1} - values = [][]byte{DefaultLeaf, getFreshData(1, 32)[0]} + values = [][]byte{DefaultLeaf, getRandomData(t, 1)[0]} root, _ = smt.Update(ctx, keys, values) - - if len(smt.db.liveCache) != cacheNb { - t.Fatal("number of cache nodes not correct after delete") - } - if len(smt.db.updatedNodes) != updatedNb { - t.Fatal("number of cache nodes not correct after delete") - } + require.Len(t, smt.db.liveCache, cacheNb) + require.Len(t, smt.db.updatedNodes, updatedNb) smt.atomicUpdate = false _, _, k, v, isShortcut, _ = smt.loadChildren(ctx, root, smt.TrieHeight, 0, nil) - if !isShortcut || !bytes.Equal(k[:HashLength], key1) || !bytes.Equal(v[:HashLength], values[1]) { - t.Fatal("leaf shortcut didn't move up to root") - } + require.True(t, isShortcut) + require.Equal(t, key1, k[:HashLength]) + require.Equal(t, values[1], v[:HashLength]) } // TestTrieMerkleProof: test if merkle proof is correct func TestTrieMerkleProof(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) + rand.Seed(1) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) // Add data to empty trie - keys := getFreshData(10, 32) - values := getFreshData(10, 32) + keys := getRandomData(t, 10) + values := getRandomData(t, 10) smt.Update(ctx, keys, values) for i, key := range keys { ap, _, k, v, _ := smt.MerkleProof(ctx, key) - if !VerifyInclusion(smt.Root, ap, key, values[i]) { - t.Fatalf("failed to verify inclusion proof") - } - if !bytes.Equal(key, k) && !bytes.Equal(values[i], v) { - t.Fatalf("merkle proof didn't return the correct key-value pair") - } + require.True(t, VerifyInclusion(smt.Root, ap, key, values[i])) + // (key,value) can be 1- (nil, value), value of the included key, 2- the kv of a LeafNode + // on the path of the non-included key, 3- (nil, nil) for a non-included key + // with a DefaultLeaf on the path + require.Nil(t, k) + require.Equal(t, v, values[i]) } emptyKey := common.SHA256Hash([]byte("non-member")) ap, included, proofKey, proofValue, _ := smt.MerkleProof(ctx, emptyKey) - if included { - t.Fatalf("failed to verify non inclusion proof") - } + require.False(t, included) - if !VerifyNonInclusion(smt.Root, ap, emptyKey, proofValue, proofKey) { - t.Fatalf("failed to verify non inclusion proof") - } + require.True(t, VerifyNonInclusion(smt.Root, ap, emptyKey, proofValue, proofKey)) } // TestTrieMerkleProofCompressed: compressed proofs test func TestTrieMerkleProofCompressed(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) // Add data to empty trie - keys := getFreshData(10, 32) - values := getFreshData(10, 32) + keys := getRandomData(t, 10) + values := getRandomData(t, 10) smt.Update(ctx, keys, values) for i, key := range keys { + fmt.Printf("i=%d\n", i) bitmap, ap, length, _, k, v, _ := smt.MerkleProofCompressed(ctx, key) - if !smt.VerifyInclusionC(bitmap, key, values[i], ap, length) { - t.Fatalf("failed to verify inclusion proof") - } - if !bytes.Equal(key, k) && !bytes.Equal(values[i], v) { - t.Fatalf("merkle proof didn't return the correct key-value pair") - } + require.True(t, smt.VerifyInclusionC(bitmap, key, values[i], ap, length)) + // (key,value) can be 1- (nil, value), value of the included key, 2- the kv of a LeafNode + // on the path of the non-included key, 3- (nil, nil) for a non-included key + // with a DefaultLeaf on the path + require.Nil(t, k) + require.Equal(t, values[i], v) } emptyKey := common.SHA256Hash([]byte("non-member")) bitmap, ap, length, included, proofKey, proofValue, _ := smt.MerkleProofCompressed(ctx, emptyKey) - if included { - t.Fatalf("failed to verify non inclusion proof") - } - if !smt.VerifyNonInclusionC(ap, length, bitmap, emptyKey, proofValue, proofKey) { - t.Fatalf("failed to verify non inclusion proof") - } + require.False(t, included) + require.True(t, smt.VerifyNonInclusionC(ap, length, bitmap, emptyKey, proofValue, proofKey)) } func TestHeight0LeafShortcut(t *testing.T) { + rand.Seed(1) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() keySize := 32 - db := internal.NewMockDB() + db := tests.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -279,65 +250,53 @@ func TestHeight0LeafShortcut(t *testing.T) { key1 := make([]byte, keySize, keySize) bitSet(key1, keySize*8-1) keys := [][]byte{key0, key1} - values := getFreshData(2, 32) + values := getRandomData(t, 2) smt.Update(ctx, keys, values) updatedNb := len(smt.db.updatedNodes) // Check all keys have been stored for i, key := range keys { value, _ := smt.Get(ctx, key) - if !bytes.Equal(values[i], value) { - t.Fatal("trie not updated") - } + require.Equal(t, values[i], value) } bitmap, ap, length, _, k, v, err := smt.MerkleProofCompressed(ctx, key1) - if err != nil { - t.Fatal(err) - } - if !bytes.Equal(key1, k) && !bytes.Equal(values[1], v) { - t.Fatalf("merkle proof didn't return the correct key-value pair") - } - if length != smt.TrieHeight { - t.Fatal("proof should have length equal to trie height for a leaf shortcut") - } - if !smt.VerifyInclusionC(bitmap, key1, values[1], ap, length) { - t.Fatal("failed to verify inclusion proof") - } + require.NoError(t, err) + // (key,value) can be 1- (nil, value), value of the included key, 2- the kv of a LeafNode + // on the path of the non-included key, 3- (nil, nil) for a non-included key + // with a DefaultLeaf on the path + require.Nil(t, k) + require.Equal(t, values[1], v) + require.Equal(t, smt.TrieHeight, length) + require.True(t, smt.VerifyInclusionC(bitmap, key1, values[1], ap, length)) // Delete one key and check that the remaining one moved up to the root of the tree newRoot, _ := smt.AtomicUpdate(ctx, keys[0:1], [][]byte{DefaultLeaf}) // Nb of updated nodes remains same because the new shortcut root was already stored at height 0. - if len(smt.db.updatedNodes) != updatedNb { - t.Fatal("number of cache nodes not correct after delete") - } + require.Len(t, smt.db.updatedNodes, updatedNb) smt.atomicUpdate = false _, _, k, v, isShortcut, err := smt.loadChildren(ctx, newRoot, smt.TrieHeight, 0, nil) - if err != nil { - t.Fatal(err) - } - if !isShortcut || !bytes.Equal(k[:HashLength], key1) || !bytes.Equal(v[:HashLength], values[1]) { - t.Fatal("leaf shortcut didn't move up to root") - } + require.NoError(t, err) + require.True(t, isShortcut) + require.Equal(t, key1, k[:HashLength]) + require.Equal(t, values[1], v[:HashLength]) _, _, length, _, k, v, _ = smt.MerkleProofCompressed(ctx, key1) - if length != 0 { - t.Fatal("proof should have length equal to trie height for a leaf shortcut") - } - if !bytes.Equal(key1, k) && !bytes.Equal(values[1], v) { - t.Fatalf("merkle proof didn't return the correct key-value pair") - } + require.Equal(t, 0, length) + // (key,value) can be 1- (nil, value), value of the included key, 2- the kv of a LeafNode + // on the path of the non-included key, 3- (nil, nil) for a non-included key + // with a DefaultLeaf on the path + require.Nil(t, k) + require.Equal(t, values[1], v) } -func getFreshData(size, length int) [][]byte { - var data [][]byte - for i := 0; i < size; i++ { - key := make([]byte, 32) +func getRandomData(t require.TestingT, count int) [][]byte { + data := make([][]byte, count) + for i := 0; i < count; i++ { + key := make([]byte, common.SHA256Size) _, err := rand.Read(key) - if err != nil { - panic(err) - } - data = append(data, common.SHA256Hash(key)[:length]) + require.NoError(t, err) + data[i] = key } sort.Sort(DataArray(data)) return data diff --git a/pkg/mapserver/updater/dbutil.go b/pkg/mapserver/updater/dbutil.go index af56e302..efd90bbe 100644 --- a/pkg/mapserver/updater/dbutil.go +++ b/pkg/mapserver/updater/dbutil.go @@ -23,16 +23,15 @@ func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDB(ctx context.Context, // XXX(juagargi) review why passing a set (we need to convert it to a slice) // list of domain hashes to fetch the domain entries from db - affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsSet)) + affectedDomainHashes := make([]*common.SHA256Output, 0, len(affectedDomainsSet)) for k := range affectedDomainsSet { - affectedDomainHashes = append(affectedDomainHashes, k) + affectedDomainHashes = append(affectedDomainHashes, &k) } - // work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { - // domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) - // resultChan <- dbResult{pairs: domainEntries, err: err} - // } - work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) {} + work := func(domainHashes []*common.SHA256Output, resultChan chan dbResult) { + domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) + resultChan <- dbResult{pairs: domainEntries, err: err} + } resultChan := make(chan dbResult) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 4d0acff4..6a00335f 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -99,6 +99,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // updateCerts: update the tables and SMT (in memory) using certificates func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { start := time.Now() + keyValuePairs, numOfUpdates, err := mapUpdater.UpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) if err != nil { return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index f81158b4..e44253bd 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -10,20 +10,20 @@ import ( projectCommon "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/internal" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestUpdateCerts: test updateCerts() func TestUpdateCerts(t *testing.T) { - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, internal.NewMockDB()) + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := internal.NewMockDB() + updaterDB := tests.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -82,11 +82,11 @@ func TestUpdateRPCAndPC(t *testing.T) { pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 20) require.NoError(t, err) - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, internal.NewMockDB()) + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := internal.NewMockDB() + updaterDB := tests.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -145,11 +145,11 @@ func TestUpdateRPCAndPC(t *testing.T) { // TestFetchUpdatedDomainHash: test fetchUpdatedDomainHash() func TestFetchUpdatedDomainHash(t *testing.T) { - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, internal.NewMockDB()) + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := internal.NewMockDB() + updaterDB := tests.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -218,7 +218,7 @@ func getRandomHash() projectCommon.SHA256Output { } // get a updater using mock db -func getMockUpdater(smt *trie.Trie, updaterDB *internal.MockDB) (*MapUpdater, error) { +func getMockUpdater(smt *trie.Trie, updaterDB *tests.MockDB) (*MapUpdater, error) { return &MapUpdater{ smt: smt, dbConn: updaterDB, diff --git a/pkg/mapserver/internal/mockdb_for_testing.go b/pkg/tests/mockdb_for_testing.go similarity index 99% rename from pkg/mapserver/internal/mockdb_for_testing.go rename to pkg/tests/mockdb_for_testing.go index c88699b1..a1180c97 100644 --- a/pkg/mapserver/internal/mockdb_for_testing.go +++ b/pkg/tests/mockdb_for_testing.go @@ -1,4 +1,4 @@ -package internal +package tests import ( "context" diff --git a/tests/pkg/db/testDB.go b/pkg/tests/testDB.go similarity index 98% rename from tests/pkg/db/testDB.go rename to pkg/tests/testDB.go index c2139d70..49a62d90 100644 --- a/tests/pkg/db/testDB.go +++ b/pkg/tests/testDB.go @@ -1,4 +1,4 @@ -package db +package tests import ( "context" diff --git a/tests/pkg/db/test_utils.go b/pkg/tests/test_utils.go similarity index 98% rename from tests/pkg/db/test_utils.go rename to pkg/tests/test_utils.go index 385f57ac..d568b117 100644 --- a/tests/pkg/db/test_utils.go +++ b/pkg/tests/test_utils.go @@ -1,4 +1,4 @@ -package db +package tests import ( "context" diff --git a/pkg/util/proof.go b/pkg/util/proof.go new file mode 100644 index 00000000..638a4de1 --- /dev/null +++ b/pkg/util/proof.go @@ -0,0 +1,61 @@ +package util + +import ( + "bytes" + "fmt" + "strings" + + ctx509 "github.com/google/certificate-transparency-go/x509" + mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/mapserver/prover" +) + +// CheckProof checks the validity of the proof. The CA from the certificate is checked for +// those subdomains where entries are found in the mapserver. +func CheckProof( + proof []*mapCommon.MapServerResponse, + name string, + cert *ctx509.Certificate, +) error { + + caName := cert.Issuer.String() + foundPoP := false + for i, proof := range proof { + if !strings.Contains(name, proof.Domain) { + return fmt.Errorf("proof step %d of %s: subdomain %s not in name %s", + i, name, proof.Domain, name) + } + proofType, correct, err := prover.VerifyProofByDomain(*proof) + if err != nil { + return fmt.Errorf("proof step %d of %s: verifying proof: %w", + i, name, err) + } + if !correct { + return fmt.Errorf("proof step %d of %s: incorrect proof", i, name) + } + if proofType == mapCommon.PoP { + foundPoP = true + domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) + if err != nil { + return fmt.Errorf("proof step %d of %s: deserializing payload: %w", + i, name, err) + } + // Find the CA entry that corresponds to the CA in this certificate. + for _, ca := range domainEntry.CAEntry { + if ca.CAName == caName { + for _, raw := range ca.DomainCerts { + if bytes.Equal(raw, cert.Raw) { + return nil + } + } + } + } + } else { + if len(proof.DomainEntryBytes) != 0 { + return fmt.Errorf("payload for a absence step (%s)", name) + } + } + } + return fmt.Errorf("certificate/CA not found; all proof steps are PoA? %v", + !foundPoP) +} diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index b00e9267..23152dec 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -6,12 +6,14 @@ import ( "os" "time" + ctx509 "github.com/google/certificate-transparency-go/x509" + + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/util" - testdb "github.com/netsec-ethz/fpki/tests/pkg/db" ) const ( @@ -27,34 +29,54 @@ func mainFunc() int { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - // Create an empty test DB - err := testdb.CreateTestDB(ctx, DBName) config := db.NewConfig(mysql.WithDefaults(), db.WithDB(DBName)) - panicIfError(err) - defer func() { - err := testdb.RemoveTestDB(ctx, *config) - panicIfError(err) - }() - fmt.Printf("created DB %s.\n", DBName) - - // Test connect several times. - conn, err := mysql.Connect(config) - panicIfError(err) - panicIfError(conn.Close()) - conn, err = mysql.Connect(config) - panicIfError(err) - panicIfError(conn.Close()) - fmt.Println("done testing the DB connection.") - // Ingest data. - ingestData(ctx, config) - fmt.Println("done ingesting test data.") - - // Get some proofs. - retrieveSomeProofs(ctx, config) + // // Create an empty test DB + // err := testdb.CreateTestDB(ctx, DBName) + // panicIfError(err) + // defer func() { + // err := testdb.RemoveTestDB(ctx, *config) + // panicIfError(err) + // }() + // fmt.Printf("created DB %s.\n", DBName) + + // // Test connect several times. + // conn, err := mysql.Connect(config) + // panicIfError(err) + // panicIfError(conn.Close()) + // conn, err = mysql.Connect(config) + // panicIfError(err) + // panicIfError(conn.Close()) + // fmt.Println("done testing the DB connection.") + + // // Ingest data. + // ingestData(ctx, config) + // fmt.Println("done ingesting test data.") + + // Get a responder. + res := getResponder(ctx, config) fmt.Println("done loading a responder.") - // Compare expected results + // Compare proofs against expected results. + data := getSomeDataPointsToTest(ctx, config) + errors := make([]error, 0) + for _, d := range data { + fmt.Printf("checking %s ... ", d.Name) + proof, err := res.GetProof(ctx, d.Name) + panicIfError(err) + fmt.Printf("has %d steps\n", len(proof)) + err = util.CheckProof(proof, d.Name, d.Certs[0]) + if err != nil { + errors = append(errors, err) + } + } + for _, err := range errors { + fmt.Fprintf(os.Stderr, "%s\n", err) + } + if len(errors) > 0 { + return 1 + } + return 0 } @@ -63,8 +85,7 @@ func ingestData(ctx context.Context, config *db.Configuration) { conn, err := mysql.Connect(config) panicIfError(err) defer func() { - err := conn.Close() - panicIfError(err) + panicIfError(conn.Close()) }() // Ingest the testdata. @@ -97,7 +118,7 @@ func ingestData(ctx context.Context, config *db.Configuration) { panicIfError(err) } -func retrieveSomeProofs(ctx context.Context, config *db.Configuration) { +func getResponder(ctx context.Context, config *db.Configuration) *responder.MapResponder { // Connect to the test DB conn, err := mysql.Connect(config) panicIfError(err) @@ -108,9 +129,50 @@ func retrieveSomeProofs(ctx context.Context, config *db.Configuration) { "./tests/integration/mapserver/config/mapserver_config.json", conn) panicIfError(err) - p, err := res.GetProof(ctx, "aname.com") + return res +} + +type DataPoint struct { + Name string + Certs []*ctx509.Certificate +} + +func getSomeDataPointsToTest(ctx context.Context, config *db.Configuration) []DataPoint { + // Connect to the test DB + conn, err := mysql.Connect(config) panicIfError(err) - _ = p + defer func() { + panicIfError(conn.Close()) + }() + + // Some names from the test DB. + names := []string{ + // (4568 certs), + "*.us-west-2.es.amazonaws.com", + + // (2198 certs), + "flowers-to-the-world.com", + + // (1 cert), + "vg01.sjc006.ix.nflxvideo.net", + + // (0 certs), + "doesnnotexist.iamsure.of.that.ch", + } + + // Find certificates for these names. + data := make([]DataPoint, len(names)) + for i, name := range names { + data[i].Name = name + ID := common.SHA256Hash32Bytes([]byte(name)) + payload, err := conn.RetrieveDomainEntry(ctx, ID) + panicIfError(err) + // payload contains several certificates. + data[i].Certs, err = ctx509.ParseCertificates(payload) + panicIfError(err) + fmt.Printf("found %d certs for %s\n", len(data[i].Certs), name) + } + return data } func panicIfError(err error) { From 91a7c3acfa54c861b166238a4e7c5931f28781d0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 23 Mar 2023 22:35:20 +0100 Subject: [PATCH 067/187] Allow creation of test DBs during tests. The script is now embedded in the binaries. --- pkg/db/mysql/write.go | 13 ++++- pkg/mapserver/responder/old_responder.go | 11 ++++- pkg/mapserver/responder/old_responder_test.go | 20 ++++++-- pkg/mapserver/trie/trie_test.go | 2 - pkg/mapserver/updater/certs_updater.go | 27 +++------- pkg/mapserver/updater/updater.go | 11 +---- pkg/mapserver/updater/updater_test_adapter.go | 4 +- pkg/tests/testDB.go | 49 +++++++++++++++---- tests/integration/mapserver/main.go | 21 ++++---- tools/script.go | 10 ++++ 10 files changed, 107 insertions(+), 61 deletions(-) create mode 100644 tools/script.go diff --git a/pkg/db/mysql/write.go b/pkg/db/mysql/write.go index c9601816..6d2aac06 100644 --- a/pkg/db/mysql/write.go +++ b/pkg/db/mysql/write.go @@ -14,7 +14,18 @@ import ( ) func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, pairs []*db.KeyValuePair) (int, error) { - panic("not available") + str := "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(len(pairs), 2) + params := make([]interface{}, len(pairs)*2) + for i, p := range pairs { + params[i*2] = p.Key[:] + params[i*2+1] = p.Value + } + _, err := c.db.ExecContext(ctx, str, params...) + if err != nil { + return 0, fmt.Errorf("UpdateDomainEntries | %w", err) + } + + return 666666666, nil } // UpdateDomainEntries: Update a list of key-value store diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go index 61f790c8..530e9e9c 100644 --- a/pkg/mapserver/responder/old_responder.go +++ b/pkg/mapserver/responder/old_responder.go @@ -24,9 +24,16 @@ type OldMapResponder struct { } // NewOldMapResponder: return a new responder -func NewOldMapResponder(ctx context.Context, root []byte, cacheHeight int, mapServerConfigPath string) (*OldMapResponder, error) { +func NewOldMapResponder( + ctx context.Context, + config *db.Configuration, + root []byte, + cacheHeight int, + mapServerConfigPath string, +) (*OldMapResponder, error) { + // new db connection for SMT - conn, err := mysql.Connect(nil) + conn, err := mysql.Connect(config) if err != nil { return nil, fmt.Errorf("NewMapResponder | Connect | %w", err) } diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 951d7fe7..d79f68a1 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -9,6 +9,7 @@ import ( "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" @@ -48,12 +49,21 @@ func TestGetProof(t *testing.T) { } func TestResponderWithPoP(t *testing.T) { - tests.TruncateAllTablesForTest(t) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - mapUpdater, err := updater.NewMapUpdater(nil, 233) + err := tests.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = tests.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() + + mapUpdater, err := updater.NewMapUpdater(config, nil, 233) require.NoError(t, err) - ctx, cancelF := context.WithTimeout(context.Background(), 15*time.Minute) - defer cancelF() mapUpdater.Fetcher.BatchSize = 10000 const baseCTSize = 2 * 1000 @@ -90,7 +100,7 @@ func TestResponderWithPoP(t *testing.T) { require.Len(t, certs, count) // create responder and request proof for those names - responder, err := NewOldMapResponder(ctx, root, 233, "./testdata/mapserver_config.json") + responder, err := NewOldMapResponder(ctx, config, root, 233, "./testdata/mapserver_config.json") require.NoError(t, err) for _, cert := range certs { responses, err := responder.GetProof(ctx, cert.Subject.CommonName) diff --git a/pkg/mapserver/trie/trie_test.go b/pkg/mapserver/trie/trie_test.go index cb09efe2..372a91b8 100644 --- a/pkg/mapserver/trie/trie_test.go +++ b/pkg/mapserver/trie/trie_test.go @@ -7,7 +7,6 @@ package trie import ( "context" - "fmt" "math/rand" "sort" "testing" @@ -220,7 +219,6 @@ func TestTrieMerkleProofCompressed(t *testing.T) { smt.Update(ctx, keys, values) for i, key := range keys { - fmt.Printf("i=%d\n", i) bitmap, ap, length, _, k, v, _ := smt.MerkleProofCompressed(ctx, key) require.True(t, smt.VerifyInclusionC(bitmap, key, values[i], ap, length)) // (key,value) can be 1- (nil, value), value of the included key, 2- the kv of a LeafNode diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 0fdafbe4..8b09911f 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -3,7 +3,6 @@ package updater import ( "context" "fmt" - "time" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" @@ -33,43 +32,33 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( return nil, 0, nil } - start := time.Now() // get the unique list of affected domains affectedDomainsSet, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap( certs, certChains) - end := time.Now() - fmt.Println("(memory) time to process certs: ", end.Sub(start)) // if no domain to update if len(affectedDomainsSet) == 0 { return nil, 0, nil } - start = time.Now() // retrieve (possibly)affected domain entries from db // It's possible that no records will be changed, because the certs are already recorded. domainEntriesMap, err := mapUpdater.retrieveAffectedDomainFromDB(ctx, affectedDomainsSet) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | %w", err) } - end = time.Now() - fmt.Println("(db) time to retrieve domain entries: ", end.Sub(start)) - start = time.Now() // update the domain entries updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | updateDomainEntries | %w", err) } - end = time.Now() - fmt.Println("(db) time to update domain entries: ", end.Sub(start)) // if during this updates, no cert is added, directly return if len(updatedDomains) == 0 { return nil, 0, nil } - start = time.Now() // get the domain entries only if they are updated, from DB domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) if err != nil { @@ -81,17 +70,12 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | serializeUpdatedDomainEntries | %w", err) } - end = time.Now() - fmt.Println("(memory) time to process updated domains: ", end.Sub(start)) - start = time.Now() // commit changes to db num, err := mapUpdater.writeChangesToDB(ctx, keyValuePairs) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | writeChangesToDB | %w", err) } - end = time.Now() - fmt.Println("(db) time to write updated domain entries: ", end.Sub(start)) return keyValuePairs, num, nil } @@ -257,8 +241,11 @@ func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, } // update domain entries -func UpdateDomainEntries(domainEntries map[common.SHA256Output]*mcommon.DomainEntry, - certDomainMap map[string][]*ctx509.Certificate, certChainDomainMap map[string][][]*ctx509.Certificate) (uniqueSet, error) { +func UpdateDomainEntries( + domainEntries map[common.SHA256Output]*mcommon.DomainEntry, + certDomainMap map[string][]*ctx509.Certificate, + certChainDomainMap map[string][][]*ctx509.Certificate, +) (uniqueSet, error) { updatedDomainHash := make(uniqueSet) // read from previous map @@ -321,8 +308,8 @@ func SerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.Doma result := make([]*db.KeyValuePair, 0, len(domains)) - for domainNameHash, domainEntryBytes := range domains { - domainBytes, err := mcommon.SerializedDomainEntry(domainEntryBytes) + for domainNameHash, domainEntry := range domains { + domainBytes, err := mcommon.SerializedDomainEntry(domainEntry) if err != nil { return nil, fmt.Errorf("serializeUpdatedDomainEntries | SerializedDomainEntry | %w", err) } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 6a00335f..cea51e29 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -25,9 +25,9 @@ type MapUpdater struct { } // NewMapUpdater: return a new map updater. -func NewMapUpdater(root []byte, cacheHeight int) (*MapUpdater, error) { +func NewMapUpdater(config *db.Configuration, root []byte, cacheHeight int) (*MapUpdater, error) { // db conn for map updater - dbConn, err := mysql.Connect(nil) + dbConn, err := mysql.Connect(config) if err != nil { return nil, fmt.Errorf("NewMapUpdater | db.Connect | %w", err) } @@ -98,7 +98,6 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // updateCerts: update the tables and SMT (in memory) using certificates func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { - start := time.Now() keyValuePairs, numOfUpdates, err := mapUpdater.UpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) if err != nil { @@ -107,9 +106,6 @@ func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.C return nil } - end := time.Now() - fmt.Println("(db and memory) time to update domain entries: ", end.Sub(start)) - if len(keyValuePairs) == 0 { return nil } @@ -119,13 +115,10 @@ func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.C return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) } - start = time.Now() _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) if err != nil { return fmt.Errorf("CollectCerts | Update | %w", err) } - end = time.Now() - fmt.Println("(memory) time to update tree in memory: ", end.Sub(start)) return nil } diff --git a/pkg/mapserver/updater/updater_test_adapter.go b/pkg/mapserver/updater/updater_test_adapter.go index 225f1ff7..82d5a26f 100644 --- a/pkg/mapserver/updater/updater_test_adapter.go +++ b/pkg/mapserver/updater/updater_test_adapter.go @@ -11,8 +11,8 @@ import ( type UpdaterTestAdapter MapUpdater -func NewMapTestUpdater(root []byte, cacheHeight int) (*UpdaterTestAdapter, error) { - up, err := NewMapUpdater(root, cacheHeight) +func NewMapTestUpdater(config *db.Configuration, root []byte, cacheHeight int) (*UpdaterTestAdapter, error) { + up, err := NewMapUpdater(config, root, cacheHeight) return (*UpdaterTestAdapter)(up), err } diff --git a/pkg/tests/testDB.go b/pkg/tests/testDB.go index 49a62d90..4575ed95 100644 --- a/pkg/tests/testDB.go +++ b/pkg/tests/testDB.go @@ -3,32 +3,61 @@ package tests import ( "context" "fmt" - "os" + "io" "os/exec" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/tools" ) // CreateTestDB creates a new and ready test DB with the same structure as the F-PKI one. func CreateTestDB(ctx context.Context, dbName string) error { - // Import the tools/create_script.sh in a bash session and run its function. - args := []string{ - "-c", - fmt.Sprintf("source ./tools/create_schema.sh && create_new_db %s", dbName), + // The create_schema script is embedded. Send it to the stdin of bash, and right after + // send a line with the invocation of the create_new_db function. + script := tools.CreateSchemaScript() + + // Prepare a simple bash. + cmd := exec.Command("bash") + // We need to write to stdin. + stdin, err := cmd.StdinPipe() + if err != nil { + return err + } + + // Start the command. + err = cmd.Start() + if err != nil { + return err + } + + // Write to stdin the script to import the function. + _, err = io.WriteString(stdin, script) + if err != nil { + return err + } + // and the invocation of the function. + _, err = io.WriteString(stdin, "create_new_db "+dbName) + if err != nil { + return err + } + + // Close stdin so that bash can finish. + err = stdin.Close() + if err != nil { + return err } - cmd := exec.Command("bash", args...) - out, err := cmd.CombinedOutput() + // Get the exit code. + err = cmd.Wait() if err != nil { - fmt.Fprint(os.Stderr, string(out)) return err } return nil } -func RemoveTestDB(ctx context.Context, config db.Configuration) error { - conn, err := Connect(&config) +func RemoveTestDB(ctx context.Context, config *db.Configuration) error { + conn, err := Connect(config) if err != nil { return fmt.Errorf("connecting to test DB: %w", err) } diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index 23152dec..b0ba9604 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -32,10 +32,10 @@ func mainFunc() int { config := db.NewConfig(mysql.WithDefaults(), db.WithDB(DBName)) // // Create an empty test DB - // err := testdb.CreateTestDB(ctx, DBName) + // err := tests.CreateTestDB(ctx, DBName) // panicIfError(err) // defer func() { - // err := testdb.RemoveTestDB(ctx, *config) + // err := tests.RemoveTestDB(ctx, config) // panicIfError(err) // }() // fmt.Printf("created DB %s.\n", DBName) @@ -59,21 +59,22 @@ func mainFunc() int { // Compare proofs against expected results. data := getSomeDataPointsToTest(ctx, config) - errors := make([]error, 0) + errors := false for _, d := range data { fmt.Printf("checking %s ... ", d.Name) proof, err := res.GetProof(ctx, d.Name) panicIfError(err) fmt.Printf("has %d steps\n", len(proof)) - err = util.CheckProof(proof, d.Name, d.Certs[0]) - if err != nil { - errors = append(errors, err) + // Present domains will surely have certificates. + for _, c := range d.Certs { + err = util.CheckProof(proof, d.Name, c) + if err != nil { + errors = true + fmt.Printf("error found with %s: %s\n", d.Name, err) + } } } - for _, err := range errors { - fmt.Fprintf(os.Stderr, "%s\n", err) - } - if len(errors) > 0 { + if errors { return 1 } diff --git a/tools/script.go b/tools/script.go new file mode 100644 index 00000000..eeb07f29 --- /dev/null +++ b/tools/script.go @@ -0,0 +1,10 @@ +package tools + +import _ "embed" + +//go:embed create_schema.sh +var script string + +func CreateSchemaScript() string { + return script +} From 5ca431a8b9488d49d956f2986e344c3289353a8e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 24 Mar 2023 10:29:00 +0100 Subject: [PATCH 068/187] Temporary changes to easily debug unittest. --- pkg/mapserver/responder/old_responder_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index d79f68a1..d524fa2e 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -49,7 +49,7 @@ func TestGetProof(t *testing.T) { } func TestResponderWithPoP(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) defer cancelF() dbName := t.Name() @@ -67,7 +67,7 @@ func TestResponderWithPoP(t *testing.T) { mapUpdater.Fetcher.BatchSize = 10000 const baseCTSize = 2 * 1000 - const count = 2 + const count = 1 mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", baseCTSize, baseCTSize+count-1) From 4646f3fbaec8e142b47e2547aa9b56b0f1c4cfd2 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 24 Mar 2023 15:26:34 +0100 Subject: [PATCH 069/187] Clarify comments. --- pkg/mapserver/common/structure.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index d5e53255..ff5fcf58 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -18,11 +18,11 @@ type DomainEntry struct { type CAEntry struct { CAName string CAHash []byte - CurrentRPC common.RPC - FutureRPC common.RPC - CurrentPC common.SP - Revocation [][]byte - FutureRevocation [][]byte + CurrentRPC common.RPC // TODO(juagargi) we will have a list of RPCs + FutureRPC common.RPC // + CurrentPC common.SP // TODO(juagargi) we will have a list of PCs + Revocation [][]byte // TODO(juagargi) these are policy revocations + FutureRevocation [][]byte // TODO(juagargi) these are policy revocations DomainCerts [][]byte DomainCertChains [][][]byte } From 1d41bc9a73004d1d051cde66f901cfadd0c9c8b8 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 24 Mar 2023 15:33:36 +0100 Subject: [PATCH 070/187] Adding new UT for the new responder. --- pkg/mapserver/responder/old_responder_test.go | 16 +++++------ pkg/mapserver/responder/responder_test.go | 27 +++++++++++++++++++ 2 files changed, 35 insertions(+), 8 deletions(-) create mode 100644 pkg/mapserver/responder/responder_test.go diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index d524fa2e..8610099f 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -20,7 +20,7 @@ import ( ) // TestGetProof: test GetProof() -func TestGetProof(t *testing.T) { +func TestOldGetProof(t *testing.T) { certs := []*x509.Certificate{} // load test certs @@ -34,7 +34,7 @@ func TestGetProof(t *testing.T) { } // get mock responder - responder := getMockResponder(t, certs) + responder := getMockOldResponder(t, certs) require.NoError(t, err) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) @@ -48,8 +48,8 @@ func TestGetProof(t *testing.T) { } } -func TestResponderWithPoP(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) +func TestOldResponderWithPoP(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() dbName := t.Name() @@ -119,7 +119,7 @@ func TestResponderWithPoP(t *testing.T) { } // TestGetDomainProof: test getDomainProof() -func TestGetDomainProof(t *testing.T) { +func TestOldGetDomainProof(t *testing.T) { certs := []*x509.Certificate{} // load test certs @@ -132,7 +132,7 @@ func TestGetDomainProof(t *testing.T) { certs = append(certs, cert) } - responderWorker := getMockResponder(t, certs) + responderWorker := getMockOldResponder(t, certs) ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() @@ -145,8 +145,8 @@ func TestGetDomainProof(t *testing.T) { } } -// getMockResponder builds a mock responder. -func getMockResponder(t require.TestingT, certs []*x509.Certificate) *OldMapResponder { +// getMockOldResponder builds a mock responder. +func getMockOldResponder(t require.TestingT, certs []*x509.Certificate) *OldMapResponder { // update the certs, and get the mock db of SMT and db conn, root, err := getUpdatedUpdater(t, certs) require.NoError(t, err) diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go new file mode 100644 index 00000000..b9c908a4 --- /dev/null +++ b/pkg/mapserver/responder/responder_test.go @@ -0,0 +1,27 @@ +package responder + +import ( + "context" + "testing" + "time" + + "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/stretchr/testify/require" +) + +func TestProofWithPoP(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + + err := tests.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = tests.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() +} From 479cf01e5d91c4130b8173af0476b355fac8def2 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Sat, 25 Mar 2023 00:12:10 +0100 Subject: [PATCH 071/187] New stored procedures to coalesce payloads. --- tools/create_schema.sh | 100 +++++++++++++++++++++++++++++------------ 1 file changed, 72 insertions(+), 28 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 0fff56ee..5904533a 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -138,51 +138,95 @@ EOF CMD=$(cat < 0 DO - SET ID = LEFT(IDS,32); - CALL calc_domain_payload(ID); - SET IDS = RIGHT(IDS,LENGTH(IDS)-32); - END WHILE; - END$$ +CREATE PROCEDURE calc_several_domain_payloads( IN domain_ids LONGBLOB ) +BEGIN + WHILE LENGTH(domain_ids) > 0 DO + SET @id = LEFT(domain_ids, 32); + SET domain_ids = RIGHT(domain_ids,LENGTH(domain_ids)-32); + CALL cert_IDs_for_domain(@id, @certIDs); + CALL payloads_for_certs(@certIDs, @payload); + REPLACE INTO domain_payloads(id, payload, payload_id) VALUES(@id, @payload, UNHEX(SHA2(@payload, 256))); + END WHILE; +END$$ DELIMITER ; EOF ) echo "$CMD" | $MYSQLCMD + } + + if [ "${BASH_SOURCE[0]}" -ef "$0" ] then echo "This will destroy everything in the fpki database" From 1e83fe620e9d4d1ce0d97771c830b35b05d2bfe2 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Sat, 25 Mar 2023 00:12:53 +0100 Subject: [PATCH 072/187] WIP extend the responder unit test. --- pkg/mapserver/responder/responder_test.go | 24 ++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index b9c908a4..7e0ed36c 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -7,21 +7,43 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) func TestProofWithPoP(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) defer cancelF() + // DB will have the same name as the test function. dbName := t.Name() config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + // Create a new DB with that name. On exiting the function, it will be removed. err := tests.CreateTestDB(ctx, dbName) require.NoError(t, err) defer func() { err = tests.RemoveTestDB(ctx, config) require.NoError(t, err) }() + + // Connect to the DB. + conn, err := mysql.Connect(config) + require.NoError(t, err) + defer conn.Close() + + // Ingest two certificates and their chains. + raw, err := util.ReadAllGzippedFile("../../../tests/testdata/2-xenon2023.csv.gz") + require.NoError(t, err) + certs, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) + require.NoError(t, err) + err = updater.UpdateCertsWithKeepExisting(ctx, conn, names, util.ExtractExpirations(certs), + certs, IDs, parentIDs) + require.NoError(t, err) + + // Final stage of ingestion: coalescing of payloads. + err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 1) + require.NoError(t, err) } From 80e57069b249e4ea7297b18d39c10e332a8bd2fa Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 28 Mar 2023 10:01:25 +0200 Subject: [PATCH 073/187] WIP extend the responder UT --- pkg/mapserver/common/structure.go | 3 ++ pkg/mapserver/prover/prover.go | 13 +++++- pkg/mapserver/responder/old_responder_test.go | 40 +++++++++++++++++-- pkg/mapserver/responder/responder_test.go | 39 ++++++++++++++++-- pkg/util/proof.go | 2 +- 5 files changed, 87 insertions(+), 10 deletions(-) diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index ff5fcf58..9665851c 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -8,6 +8,9 @@ import ( ) // DomainEntry: Value of the leaf. The value will be hashed, and stored in the sparse merkle tree +// The design for the v1 version has changed the semantics of this payload. It is computed in DB +// via a stored procedure during ingestion, and retrieved from DB by the responder. +// The domain is identified by the SHA256 of the DomainName in the DB. type DomainEntry struct { DomainName string CAEntry []CAEntry diff --git a/pkg/mapserver/prover/prover.go b/pkg/mapserver/prover/prover.go index 12afce0b..73d6b9f6 100644 --- a/pkg/mapserver/prover/prover.go +++ b/pkg/mapserver/prover/prover.go @@ -6,8 +6,19 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) +func VerifyProofByDomainOld(proof mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { + if proof.PoI.ProofType == mapCommon.PoP { + //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue + value := common.SHA256Hash(proof.DomainEntryBytes) + return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), + value), nil + } + return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), + proof.PoI.ProofValue, proof.PoI.ProofKey), nil +} + // VerifyProofByDomain: verify the MapServerResponse(received from map server), return the type of proof, and proofing result -func VerifyProofByDomain(proof mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { +func VerifyProofByDomain(proof *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { if proof.PoI.ProofType == mapCommon.PoP { //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue value := common.SHA256Hash(proof.DomainEntryBytes) diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 8610099f..171891d4 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -44,7 +44,7 @@ func TestOldGetProof(t *testing.T) { proofs, err := responder.GetProof(ctx, cert.Subject.CommonName) require.NoError(t, err) - checkProof(t, *cert, proofs) + checkProofOld(t, *cert, proofs) } } @@ -111,7 +111,7 @@ func TestOldResponderWithPoP(t *testing.T) { } require.NotEmpty(t, responses) - checkProof(t, *cert, responses) + checkProofOld(t, *cert, responses) // ensure that the response for the whole name is a PoP require.Equal(t, mapcommon.PoP, responses[len(responses)-1].PoI.ProofType, "PoP not found for %s", cert.Subject.CommonName) @@ -141,7 +141,7 @@ func TestOldGetDomainProof(t *testing.T) { proofs, err := responderWorker.getProof(ctx, cert.Subject.CommonName) require.NoError(t, err) - checkProof(t, *cert, proofs) + checkProofOld(t, *cert, proofs) } } @@ -184,8 +184,40 @@ func getUpdatedUpdater(t require.TestingT, certs []*x509.Certificate) (db.Conn, return conn, updater.SMT().Root, nil } +func checkProofOld(t *testing.T, cert x509.Certificate, proofs []mapcommon.MapServerResponse) { + t.Helper() + caName := cert.Issuer.String() + require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, + "PoP not found for %s", cert.Subject.CommonName) + for _, proof := range proofs { + require.Contains(t, cert.Subject.CommonName, proof.Domain) + proofType, isCorrect, err := prover.VerifyProofByDomainOld(proof) + require.NoError(t, err) + require.True(t, isCorrect) + + if proofType == mapcommon.PoA { + require.Empty(t, proof.DomainEntryBytes) + } + if proofType == mapcommon.PoP { + domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) + require.NoError(t, err) + // get the correct CA entry + for _, caEntry := range domainEntry.CAEntry { + if caEntry.CAName == caName { + // check if the cert is in the CA entry + for _, certRaw := range caEntry.DomainCerts { + require.Equal(t, certRaw, cert.Raw) + return + } + } + } + } + } + require.Fail(t, "cert/CA not found") +} + // checkProof checks the proof to be correct. -func checkProof(t *testing.T, cert x509.Certificate, proofs []mapcommon.MapServerResponse) { +func checkProof(t *testing.T, cert *x509.Certificate, proofs []*mapcommon.MapServerResponse) { t.Helper() caName := cert.Issuer.String() require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 7e0ed36c..e776d036 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -24,10 +24,10 @@ func TestProofWithPoP(t *testing.T) { // Create a new DB with that name. On exiting the function, it will be removed. err := tests.CreateTestDB(ctx, dbName) require.NoError(t, err) - defer func() { - err = tests.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // defer func() { + // err = tests.RemoveTestDB(ctx, config) + // require.NoError(t, err) + // }() // Connect to the DB. conn, err := mysql.Connect(config) @@ -46,4 +46,35 @@ func TestProofWithPoP(t *testing.T) { // Final stage of ingestion: coalescing of payloads. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 1) require.NoError(t, err) + + // // Create a responder. + // responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) + // require.NoError(t, err) + + // // Log the names of the certs. + // for i, names := range names { + // t.Logf("cert %d for the following names:\n", i) + // for j, name := range names { + // t.Logf("\t[%3d]: \"%s\"\n", j, name) + // } + // if len(names) == 0 { + // t.Log("\t[no names]") + // } + // } + + // // Check proofs for the previously ingested certificates. + // for i, c := range certs { + // if names[i] == nil { + // // This is a non leaf certificate, skip. + // continue + // } + // for _, name := range names[i] { + // t.Logf("proof for %s\n", name) + // proofChain, err := responder.GetProof(ctx, name) + // assert.NoError(t, err) + // if err == nil { + // checkProof(t, c, proofChain) + // } + // } + // } } diff --git a/pkg/util/proof.go b/pkg/util/proof.go index 638a4de1..68890827 100644 --- a/pkg/util/proof.go +++ b/pkg/util/proof.go @@ -25,7 +25,7 @@ func CheckProof( return fmt.Errorf("proof step %d of %s: subdomain %s not in name %s", i, name, proof.Domain, name) } - proofType, correct, err := prover.VerifyProofByDomain(*proof) + proofType, correct, err := prover.VerifyProofByDomain(proof) if err != nil { return fmt.Errorf("proof step %d of %s: verifying proof: %w", i, name, err) From 0ea092e3145e3bf2a39ea8bde47f988eacff3590 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 29 Mar 2023 12:34:36 +0200 Subject: [PATCH 074/187] Rename some table columns. --- pkg/db/README.md | 5 +-- pkg/db/mysql/mysql.go | 3 +- pkg/db/mysql/read.go | 5 +-- tools/create_schema.sh | 70 +++++++++++++++++++++++++++--------------- 4 files changed, 53 insertions(+), 30 deletions(-) diff --git a/pkg/db/README.md b/pkg/db/README.md index 80d43ea8..e288c3a2 100644 --- a/pkg/db/README.md +++ b/pkg/db/README.md @@ -22,8 +22,9 @@ For performance reasons, no foreign keys exist in any table. - `certs` table 1. `id`: PK, this is the SHA256 of the certificate. - 2. `payload`: BLOB, this is the certificate, serialized. - 3. `parent`: this is the parent certificate, in the trust chain, or `NULL` if root. + 2. `parent_id`: this is the parent certificate, in the trust chain, or `NULL` if root. + 3. `expiration`: this is the _not_after_ field of the certificate. + 3. `payload`: BLOB, this is the certificate, serialized. - `domains` table. This table is updated in DB from the `certs` table 1. `cert_id`: PK, SHA256 of the certificate 2. `domain_id`: PK, SHA256 of the domain diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index d6fa81b6..1efb0849 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -213,7 +213,8 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 // Because the primary key is the SHA256 of the payload, if there is a clash, it must // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. const N = 4 - str := "REPLACE INTO certs (id, parent, expiration, payload) VALUES " + repeatStmt(len(ids), N) + str := "REPLACE INTO certs (id, parent_id, expiration, payload) VALUES " + + repeatStmt(len(ids), N) data := make([]interface{}, N*len(ids)) for i := range ids { data[i*N] = ids[i][:] diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index d4f59a8f..fe63205f 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -46,7 +46,7 @@ func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Outp func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( []byte, error) { - str := "SELECT payload FROM domain_payloads WHERE id = ?" + str := "SELECT payload FROM domain_payloads WHERE domain_id = ?" var payload []byte err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&payload) if err != nil && err != sql.ErrNoRows { @@ -69,7 +69,8 @@ func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common if len(domainIDs) == 0 { return nil, nil } - str := "SELECT id,payload FROM domain_payloads WHERE id IN " + repeatStmt(1, len(domainIDs)) + str := "SELECT domain_id,payload FROM domain_payloads WHERE domain_id IN " + + repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { params[i] = (*id)[:] diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 5904533a..2089c887 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -22,7 +22,7 @@ CMD=$(cat < 0 DO + SET @id = LEFT(IDs, 32); + SET IDs = RIGHT(IDs, LENGTH(IDs)-32); + SET @sql_ids = CONCAT(@sql_ids, "UNHEX('", HEX(@id),"'),"); + END WHILE; + -- Remove trailing comma. + RETURN LEFT(@sql_ids, LENGTH(@sql_ids)-1); +END $$ +DELIMITER ; +EOF + ) + echo "$CMD" | $MYSQLCMD + + + CMD=$(cat < 0 DO + SET @leaves = CONCAT(@leaves, @pending); SET @str = CONCAT( - "SELECT GROUP_CONCAT( - DISTINCT CONCAT('\"', HEX(parent), '\"') - SEPARATOR ',' ) INTO @pending FROM certs WHERE HEX(id) IN (",@pending,");"); + "SELECT GROUP_CONCAT( DISTINCT parent_id SEPARATOR '' ) + INTO @pending FROM certs WHERE id IN (", IDsToSql(@pending), ");"); PREPARE stmt FROM @str; EXECUTE stmt; DEALLOCATE PREPARE stmt; END WHILE; - -- Remove the leading comma from ,CERT1,CERT2... - SET @leaves = RIGHT(@leaves, LENGTH(@leaves)-1); -- Run a last query to get only the DISTINCT IDs, from all that we have in @leaves. SET @str = CONCAT( - "SELECT GROUP_CONCAT( - DISTINCT CONCAT('\"', HEX(id), '\"') - SEPARATOR ',' ) INTO @leaves FROM certs WHERE HEX(id) IN (",@leaves,");"); + "SELECT GROUP_CONCAT( DISTINCT id SEPARATOR '' ) + INTO @leaves FROM certs WHERE id IN (", IDsToSql(@leaves), ");"); PREPARE stmt FROM @str; EXECUTE stmt; DEALLOCATE PREPARE stmt; @@ -184,15 +204,14 @@ EOF USE $DBNAME; DROP PROCEDURE IF EXISTS payloads_for_certs; DELIMITER $$ --- expects the cert_ids in HEX, returns the payload in binary. +-- Expects the cert_ids in binary, 32 bytes then 32 more, etc. +-- Returns the payload in binary. CREATE PROCEDURE payloads_for_certs( IN cert_ids LONGBLOB , OUT payload LONGBLOB ) BEGIN SET group_concat_max_len = 1073741824; -- so that GROUP_CONCAT doesn't truncate results - SELECT CAST(cert_ids AS CHAR); SET @str = CONCAT( "SELECT GROUP_CONCAT(payload SEPARATOR '') INTO @payload - FROM certs WHERE HEX(id) IN (", cert_ids, ") ORDER BY expiration,payload;"); - SELECT CAST(@str AS CHAR); + FROM certs WHERE id IN (", IDsToSql(cert_ids), ") ORDER BY expiration,payload;"); PREPARE stmt FROM @str; EXECUTE stmt; DEALLOCATE PREPARE stmt; @@ -213,9 +232,10 @@ BEGIN WHILE LENGTH(domain_ids) > 0 DO SET @id = LEFT(domain_ids, 32); SET domain_ids = RIGHT(domain_ids,LENGTH(domain_ids)-32); + SET @certIDs = ''; CALL cert_IDs_for_domain(@id, @certIDs); CALL payloads_for_certs(@certIDs, @payload); - REPLACE INTO domain_payloads(id, payload, payload_id) VALUES(@id, @payload, UNHEX(SHA2(@payload, 256))); + REPLACE INTO domain_payloads(domain_id, payload_id, payload) VALUES( @id, UNHEX(SHA2(@payload, 256)), @payload ); END WHILE; END$$ DELIMITER ; From eff28daf191ed96123de34723b1e989025d417ba Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 4 Apr 2023 10:00:50 +0200 Subject: [PATCH 075/187] Change coalescing method in DB. Moving from a collection of stored procedures to one atomic CTE that retrieves the leaf certificates and their ancestry, and collates their payloads for the domain. --- cmd/ingest/coalescePayloads.go | 4 +- pkg/db/db.go | 14 +++-- pkg/db/mysql/mysql.go | 17 ++---- pkg/db/mysql/read.go | 9 +++ pkg/mapserver/responder/responder_test.go | 2 +- pkg/mapserver/updater/updater.go | 67 ++--------------------- pkg/tests/mockdb_for_testing.go | 6 +- tools/create_schema.sh | 54 +++++++++++++++++- 8 files changed, 90 insertions(+), 83 deletions(-) diff --git a/cmd/ingest/coalescePayloads.go b/cmd/ingest/coalescePayloads.go index 7cf89353..47b5f9bc 100644 --- a/cmd/ingest/coalescePayloads.go +++ b/cmd/ingest/coalescePayloads.go @@ -9,9 +9,9 @@ import ( ) func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { - fmt.Printf("Starting %d workers coalescing payloads for modified domains\n", NumDBWriters) + fmt.Println("Starting coalescing payloads for modified domains ...") // Use NumDBWriters. - err := updater.CoalescePayloadsForDirtyDomains(ctx, conn, NumDBWriters) + err := updater.CoalescePayloadsForDirtyDomains(ctx, conn) if err != nil { return err } diff --git a/pkg/db/db.go b/pkg/db/db.go index 46b04404..8c0c1d81 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -27,11 +27,12 @@ type Conn interface { LoadRoot(ctx context.Context) (*common.SHA256Output, error) SaveRoot(ctx context.Context, root *common.SHA256Output) error - // CoalesceDomainsPayloads takes some IDs (which should come from the dirty table) and - // retrieves the payloads of all certificates for each domain, represented by each ID. - // With those payloads it writes an entry in domain_payloads and computes the SHA256 of it. - // This is done via a stored procedure, to avoid moving data from DB to server. - CoalesceDomainsPayloads(ctx context.Context, ids []*common.SHA256Output) error + // ReplaceDirtyDomainPayloads retrieves dirty domains from the dirty list, starting + // at firstRow and finishing at lastRow (for a total of lastRow - firstRow + 1 domains), + // computes the aggregated payload for their certificates and policies, and stores it in the DB. + // The aggregated payload takes into account all policies and certificates needed for that + // domain, including e.g. the trust chain. + ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // @@ -97,5 +98,8 @@ type Conn interface { // Each updated domain represents the SHA256 of the textual domain that was updated and // present in the `updates` table. UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) + + //DirtyDomainsCount returns the number of domains that are still to be updated. + DirtyDomainsCount(ctx context.Context) (int, error) CleanupDirty(ctx context.Context) error } diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 1efb0849..743cd680 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -265,19 +265,12 @@ func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs return err } -func (c *mysqlDB) CoalesceDomainsPayloads(ctx context.Context, ids []*common.SHA256Output) error { - - // We receive ids as a slice of IDs. We ought to build a long slice of bytes - // with all the bytes concatenated. - param := make([]byte, len(ids)*common.SHA256Size) - for i, id := range ids { - copy(param[i*common.SHA256Size:], id[:]) - } - // Now call the stored procedure with this parameter. - str := "CALL calc_several_domain_payloads(?)" - _, err := c.db.Exec(str, param) +func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { + // Call the stored procedure with these parameters. + str := "CALL calc_some_dirty_domain_payloads(?,?)" + _, err := c.db.Exec(str, firstRow, lastRow) if err != nil { - return fmt.Errorf("coalescing payload for domains: %w", err) + return fmt.Errorf("aggregating payload for domains: %w", err) } return nil } diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index fe63205f..69b6b161 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -211,6 +211,15 @@ func (c *mysqlDB) UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, e return domainIDs, nil } +func (c *mysqlDB) DirtyDomainsCount(ctx context.Context) (int, error) { + str := "SELECT COUNT(*) FROM dirty" + var count int + if err := c.db.QueryRowContext(ctx, str).Scan(&count); err != nil { + return 0, fmt.Errorf("querying number of dirty domains: %w", err) + } + return count, nil +} + func (c *mysqlDB) CleanupDirty(ctx context.Context) error { // Remove all entries from the dirty table. str := "TRUNCATE dirty" diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index e776d036..9f47d7b8 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -44,7 +44,7 @@ func TestProofWithPoP(t *testing.T) { require.NoError(t, err) // Final stage of ingestion: coalescing of payloads. - err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 1) + err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) // // Create a responder. diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index cea51e29..3da42685 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -5,7 +5,6 @@ import ( "context" "fmt" "sort" - "sync" "time" _ "github.com/go-sql-driver/mysql" @@ -274,70 +273,16 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } -func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn, numDBWriters int) error { - // Get all dirty domain IDs. - domainIDs, err := conn.UpdatedDomains(ctx) +func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { + // How many domains to update? + dirtyCount, err := conn.DirtyDomainsCount(ctx) if err != nil { return err } - - // Start numWriters workers. - errCh := make(chan error) - ch := make(chan []*common.SHA256Output) - wg := sync.WaitGroup{} - wg.Add(numDBWriters) - for i := 0; i < numDBWriters; i++ { - go func() { - defer wg.Done() - for ids := range ch { - err := conn.CoalesceDomainsPayloads(ctx, ids) - if err != nil { - errCh <- err - return - } - } - errCh <- nil - }() - } - - // Split the dirty domain ID list in numWriters - batchSize := len(domainIDs) / numDBWriters - // First workers handle one more ID than the rest, to take into account also the remainder. - for i := 0; i < len(domainIDs)%numDBWriters; i++ { - b := domainIDs[i*(batchSize+1) : (i+1)*(batchSize+1)] - ch <- b - } - // The rest of the workers will do a batchSize-sized item. - restOfWorkersCount := numDBWriters - (len(domainIDs) % numDBWriters) - domainIDs = domainIDs[(len(domainIDs)%numDBWriters)*(batchSize+1):] - for i := 0; i < restOfWorkersCount; i++ { - b := domainIDs[i*batchSize : (i+1)*batchSize] - ch <- b - } - - // Close the batches channel. - close(ch) - - var errs []error - go func() { - // Absorb any errors encountered. - for err := range errCh { - if err != nil { - errs = append(errs, err) - } - } - }() - - // And wait for all workers to finish. - wg.Wait() - - // Any errors? - close(errCh) - if len(errs) > 0 { - // There have been errors. Just return the first one. - return fmt.Errorf("encountered %d errors, first one is: %w", len(errs), errs[0]) + // Do all updates at once, in one thread/connection (faster than multiple routines). + if err := conn.ReplaceDirtyDomainPayloads(ctx, 0, dirtyCount-1); err != nil { + return fmt.Errorf("coalescing payloads of dirty domains: %w", err) } - return nil } diff --git a/pkg/tests/mockdb_for_testing.go b/pkg/tests/mockdb_for_testing.go index a1180c97..a38bde0c 100644 --- a/pkg/tests/mockdb_for_testing.go +++ b/pkg/tests/mockdb_for_testing.go @@ -145,6 +145,10 @@ func (d *MockDB) UpdatedDomains(context.Context) ([]*common.SHA256Output, error) func (*MockDB) CleanupDirty(ctx context.Context) error { return nil } -func (*MockDB) CoalesceDomainsPayloads(context.Context, []*common.SHA256Output) error { +func (*MockDB) DirtyDomainsCount(ctx context.Context) (int, error) { + return 0, nil +} + +func (*MockDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { return nil } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 2089c887..89cbfb12 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -243,7 +243,59 @@ EOF ) echo "$CMD" | $MYSQLCMD -} + CMD=$(cat < Date: Tue, 4 Apr 2023 12:32:22 +0200 Subject: [PATCH 076/187] Create new table domain_certs. Rename some columns as well. --- pkg/db/mysql/mysql.go | 71 ++++++++++++++++++++++++++++-------------- tools/create_schema.sh | 48 ++++++++++++++++++++-------- 2 files changed, 83 insertions(+), 36 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 743cd680..7c3e830d 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -135,8 +135,9 @@ func (c *mysqlDB) TruncateAllTables(ctx context.Context) error { tables := []string{ "tree", "root", - "certs", "domains", + "certs", + "domain_certs", "domain_payloads", "dirty", } @@ -175,16 +176,16 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. elems := make([]string, len(data)) for i := range elems { - elems[i] = "SELECT ? AS id" + elems[i] = "SELECT ? AS cert_id" } // The query means: join two tables, one with the values I am passing as arguments (those // are the ids) and the certs table, and for those that exist write a 1, otherwise a 0. // Finally, group_concat all rows into just one field of type string. str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + - "SELECT (CASE WHEN certs.id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + + "SELECT (CASE WHEN certs.cert_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + strings.Join(elems, " UNION ALL ") + - ") AS request left JOIN ( SELECT id FROM certs ) AS certs ON certs.id = request.id" + + ") AS request left JOIN ( SELECT cert_id FROM certs ) AS certs ON certs.cert_id = request.cert_id" + ") AS t" // Return slice of booleans: @@ -213,7 +214,7 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 // Because the primary key is the SHA256 of the payload, if there is a clash, it must // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. const N = 4 - str := "REPLACE INTO certs (id, parent_id, expiration, payload) VALUES " + + str := "REPLACE INTO certs (cert_id, parent_id, expiration, payload) VALUES " + repeatStmt(len(ids), N) data := make([]interface{}, N*len(ids)) for i := range ids { @@ -233,35 +234,59 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 } // UpdateDomainsWithCerts updates both the domains and the dirty tables. -func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, - domainNames []string) error { +func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, + domainIDs []*common.SHA256Output, domainNames []string) error { if len(certIDs) == 0 { return nil } - // First insert into domains: - const N = 3 - str := "INSERT IGNORE INTO domains (cert_id,domain_id,domain_name) VALUES " + - repeatStmt(len(certIDs), N) - data := make([]interface{}, N*len(certIDs)) - for i := range certIDs { - data[i*N] = certIDs[i][:] - data[i*N+1] = domainIDs[i][:] - data[i*N+2] = domainNames[i] + + // First insert into domains. Find out which domain IDs are unique, and attach the + // corresponding name to them. + { + uniqueDomainIDs := make(map[common.SHA256Output]string) + for i, id := range domainIDs { + uniqueDomainIDs[*id] = domainNames[i] + } + + str := "INSERT IGNORE INTO domains (domain_id,domain_name) VALUES " + + repeatStmt(len(uniqueDomainIDs), 2) + + data := make([]interface{}, 2*len(uniqueDomainIDs)) + i := 0 + for k, v := range uniqueDomainIDs { + data[2*i] = k[:] + data[2*i+1] = v + i++ + } + _, err := c.db.ExecContext(ctx, str, data...) + if err != nil { + return err + } } - _, err := c.db.Exec(str, data...) - if err != nil { - return err + + // Now insert into the domain_certs: + { + str := "INSERT IGNORE INTO domain_certs (domain_id,cert_id) VALUES " + + repeatStmt(len(certIDs), 2) + data := make([]interface{}, 2*len(certIDs)) + for i := range certIDs { + data[2*i] = domainIDs[i][:] + data[2*i+1] = certIDs[i][:] + } + _, err := c.db.Exec(str, data...) + if err != nil { + return err + } } // Now insert into dirty. - str = "REPLACE INTO dirty (domain_id) VALUES " + repeatStmt(len(domainIDs), 1) - data = make([]interface{}, len(domainIDs)) + str := "REPLACE INTO dirty (domain_id) VALUES " + repeatStmt(len(domainIDs), 1) + data := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { data[i] = id[:] } - _, err = c.db.Exec(str, data...) - + _, err := c.db.Exec(str, data...) return err } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 89cbfb12..7bf8f33f 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -18,27 +18,45 @@ EOF echo "$CMD" | $MYSQLCMD + +CMD=$(cat < Date: Tue, 4 Apr 2023 15:21:32 +0200 Subject: [PATCH 077/187] Re-enable the responder UT. --- pkg/mapserver/responder/old_responder_test.go | 2 +- pkg/mapserver/responder/responder_test.go | 66 +++++++++++-------- 2 files changed, 39 insertions(+), 29 deletions(-) diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 171891d4..2d6973bb 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -221,7 +221,7 @@ func checkProof(t *testing.T, cert *x509.Certificate, proofs []*mapcommon.MapSer t.Helper() caName := cert.Issuer.String() require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, - "PoP not found for %s", cert.Subject.CommonName) + "PoP not found for \"%s\"", cert.Subject.CommonName) for _, proof := range proofs { require.Contains(t, cert.Subject.CommonName, proof.Domain) proofType, isCorrect, err := prover.VerifyProofByDomain(proof) diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 9f47d7b8..5a4a642c 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -7,9 +7,11 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/util" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -47,34 +49,42 @@ func TestProofWithPoP(t *testing.T) { err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) - // // Create a responder. - // responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) - // require.NoError(t, err) + // Create a responder. + responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) + require.NoError(t, err) + + // Log the names of the certs. + for i, names := range names { + t.Logf("cert %d for the following names:\n", i) + for j, name := range names { + t.Logf("\t[%3d]: \"%s\"\n", j, name) + } + if len(names) == 0 { + t.Log("\t[no names]") + } + } - // // Log the names of the certs. - // for i, names := range names { - // t.Logf("cert %d for the following names:\n", i) - // for j, name := range names { - // t.Logf("\t[%3d]: \"%s\"\n", j, name) - // } - // if len(names) == 0 { - // t.Log("\t[no names]") - // } - // } + // Check proofs for the previously ingested certificates. + foundValidDomainNames := false + for i, c := range certs { + if names[i] == nil { + // This is a non leaf certificate, skip. + continue + } - // // Check proofs for the previously ingested certificates. - // for i, c := range certs { - // if names[i] == nil { - // // This is a non leaf certificate, skip. - // continue - // } - // for _, name := range names[i] { - // t.Logf("proof for %s\n", name) - // proofChain, err := responder.GetProof(ctx, name) - // assert.NoError(t, err) - // if err == nil { - // checkProof(t, c, proofChain) - // } - // } - // } + for _, name := range names[i] { + t.Logf("Proving \"%s\"", name) + if !domain.IsValidDomain(name) { + t.Logf("Invalid domain name: \"%s\", skipping", name) + continue + } + foundValidDomainNames = true + proofChain, err := responder.GetProof(ctx, name) + assert.NoError(t, err) + if err == nil { + checkProof(t, c, proofChain) + } + } + } + require.True(t, foundValidDomainNames, "bad test: not one valid checkable domain name") } From 5e0d5cf028fe75d1f50ab9e935feacaa311cb855 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 4 Apr 2023 17:40:24 +0200 Subject: [PATCH 078/187] Two bugfixes: updating domains table and new responder. Bugfix: updating the domains table, copy ID. Bugfix: load the responder with the root. --- pkg/db/mysql/mysql.go | 2 +- pkg/domain/domain.go | 16 +++++ pkg/mapserver/prover/prover.go | 19 ++--- pkg/mapserver/responder/old_responder_test.go | 47 ++---------- pkg/mapserver/responder/responder.go | 2 +- pkg/mapserver/responder/responder_test.go | 71 ++++++++++++++++++- pkg/mapserver/updater/updater.go | 6 +- 7 files changed, 106 insertions(+), 57 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 7c3e830d..38ae0bfb 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -255,7 +255,7 @@ func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, data := make([]interface{}, 2*len(uniqueDomainIDs)) i := 0 for k, v := range uniqueDomainIDs { - data[2*i] = k[:] + data[2*i] = append([]byte{}, k[:]...) data[2*i+1] = v i++ } diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index cf6c1c74..553f7472 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -6,6 +6,7 @@ import ( "regexp" "strings" + ctx509 "github.com/google/certificate-transparency-go/x509" "golang.org/x/net/publicsuffix" ) @@ -138,6 +139,21 @@ func SplitE2LD(domain string) ([]string, error) { return subdomains, nil } +// CertSubjectName extracts one name that identifies the certificate. If there is an entry in the +// Subject.CommonName, that is used. Otherwise, the first non-empty DNSName will be used. +// In the case that no non-empty name was found, an empty string will be returned. +func CertSubjectName(c *ctx509.Certificate) string { + if c.Subject.CommonName != "" { + return c.Subject.CommonName + } + for _, n := range c.DNSNames { + if n != "" { + return n + } + } + return "" +} + // removeWildCardAndWWW: remove www. and *. func removeWildCardAndWWW(domainName string) string { // remove "*." diff --git a/pkg/mapserver/prover/prover.go b/pkg/mapserver/prover/prover.go index 73d6b9f6..c9f92055 100644 --- a/pkg/mapserver/prover/prover.go +++ b/pkg/mapserver/prover/prover.go @@ -10,21 +10,22 @@ func VerifyProofByDomainOld(proof mapCommon.MapServerResponse) (mapCommon.ProofT if proof.PoI.ProofType == mapCommon.PoP { //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue value := common.SHA256Hash(proof.DomainEntryBytes) - return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), - value), nil + return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, + common.SHA256Hash([]byte(proof.Domain)), value), nil } - return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), - proof.PoI.ProofValue, proof.PoI.ProofKey), nil + return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, + common.SHA256Hash([]byte(proof.Domain)), proof.PoI.ProofValue, proof.PoI.ProofKey), nil } -// VerifyProofByDomain: verify the MapServerResponse(received from map server), return the type of proof, and proofing result +// VerifyProofByDomain verifies the MapServerResponse (received from map server), +// and returns the type of proof, and proofing result. func VerifyProofByDomain(proof *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { if proof.PoI.ProofType == mapCommon.PoP { //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue value := common.SHA256Hash(proof.DomainEntryBytes) - return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), - value), nil + return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, + common.SHA256Hash([]byte(proof.Domain)), value), nil } - return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), - proof.PoI.ProofValue, proof.PoI.ProofKey), nil + return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, + common.SHA256Hash([]byte(proof.Domain)), proof.PoI.ProofValue, proof.PoI.ProofKey), nil } diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 2d6973bb..8c083c25 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -6,7 +6,7 @@ import ( "testing" "time" - "github.com/google/certificate-transparency-go/x509" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" @@ -21,7 +21,7 @@ import ( // TestGetProof: test GetProof() func TestOldGetProof(t *testing.T) { - certs := []*x509.Certificate{} + certs := []*ctx509.Certificate{} // load test certs files, err := ioutil.ReadDir("../updater/testdata/certs/") @@ -120,7 +120,7 @@ func TestOldResponderWithPoP(t *testing.T) { // TestGetDomainProof: test getDomainProof() func TestOldGetDomainProof(t *testing.T) { - certs := []*x509.Certificate{} + certs := []*ctx509.Certificate{} // load test certs files, err := ioutil.ReadDir("../updater/testdata/certs/") @@ -146,7 +146,7 @@ func TestOldGetDomainProof(t *testing.T) { } // getMockOldResponder builds a mock responder. -func getMockOldResponder(t require.TestingT, certs []*x509.Certificate) *OldMapResponder { +func getMockOldResponder(t require.TestingT, certs []*ctx509.Certificate) *OldMapResponder { // update the certs, and get the mock db of SMT and db conn, root, err := getUpdatedUpdater(t, certs) require.NoError(t, err) @@ -160,7 +160,7 @@ func getMockOldResponder(t require.TestingT, certs []*x509.Certificate) *OldMapR // getUpdatedUpdater builds an updater using a mock db, updates the certificates // and returns the mock db. -func getUpdatedUpdater(t require.TestingT, certs []*x509.Certificate) (db.Conn, []byte, error) { +func getUpdatedUpdater(t require.TestingT, certs []*ctx509.Certificate) (db.Conn, []byte, error) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() @@ -174,7 +174,7 @@ func getUpdatedUpdater(t require.TestingT, certs []*x509.Certificate) (db.Conn, updater.SetSMT(smt) // Update the db using the certs and empty chains: - emptyChains := make([][]*x509.Certificate, len(certs)) + emptyChains := make([][]*ctx509.Certificate, len(certs)) err = updater.UpdateCerts(ctx, certs, emptyChains) require.NoError(t, err) @@ -184,7 +184,7 @@ func getUpdatedUpdater(t require.TestingT, certs []*x509.Certificate) (db.Conn, return conn, updater.SMT().Root, nil } -func checkProofOld(t *testing.T, cert x509.Certificate, proofs []mapcommon.MapServerResponse) { +func checkProofOld(t *testing.T, cert ctx509.Certificate, proofs []mapcommon.MapServerResponse) { t.Helper() caName := cert.Issuer.String() require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, @@ -215,36 +215,3 @@ func checkProofOld(t *testing.T, cert x509.Certificate, proofs []mapcommon.MapSe } require.Fail(t, "cert/CA not found") } - -// checkProof checks the proof to be correct. -func checkProof(t *testing.T, cert *x509.Certificate, proofs []*mapcommon.MapServerResponse) { - t.Helper() - caName := cert.Issuer.String() - require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, - "PoP not found for \"%s\"", cert.Subject.CommonName) - for _, proof := range proofs { - require.Contains(t, cert.Subject.CommonName, proof.Domain) - proofType, isCorrect, err := prover.VerifyProofByDomain(proof) - require.NoError(t, err) - require.True(t, isCorrect) - - if proofType == mapcommon.PoA { - require.Empty(t, proof.DomainEntryBytes) - } - if proofType == mapcommon.PoP { - domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) - require.NoError(t, err) - // get the correct CA entry - for _, caEntry := range domainEntry.CAEntry { - if caEntry.CAName == caName { - // check if the cert is in the CA entry - for _, certRaw := range caEntry.DomainCerts { - require.Equal(t, certRaw, cert.Raw) - return - } - } - } - } - } - require.Fail(t, "cert/CA not found") -} diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index b2a3f6be..27535c5e 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -22,7 +22,7 @@ func NewMapResponder(ctx context.Context, configFile string, conn db.Conn) (*Map var root []byte if rootID, err := conn.LoadRoot(ctx); err != nil { return nil, err - } else if root != nil { + } else if rootID != nil { root = rootID[:] } diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 5a4a642c..3179f441 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -2,17 +2,22 @@ package responder import ( "context" + "strings" "testing" "time" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/domain" + mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) func TestProofWithPoP(t *testing.T) { @@ -38,6 +43,7 @@ func TestProofWithPoP(t *testing.T) { // Ingest two certificates and their chains. raw, err := util.ReadAllGzippedFile("../../../tests/testdata/2-xenon2023.csv.gz") + // raw, err := util.ReadAllGzippedFile("../../../tests/testdata/100K-xenon2023.csv.gz") require.NoError(t, err) certs, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) require.NoError(t, err) @@ -45,10 +51,20 @@ func TestProofWithPoP(t *testing.T) { certs, IDs, parentIDs) require.NoError(t, err) - // Final stage of ingestion: coalescing of payloads. + // Coalescing of payloads. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) + // Final stage: create/update a SMT. + err = updater.UpdateSMT(ctx, conn, 32) + require.NoError(t, err) + + // And cleanup dirty, flagging the end of the update cycle. + err = conn.CleanupDirty(ctx) + require.NoError(t, err) + + // Now to the test. + // Create a responder. responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) require.NoError(t, err) @@ -67,6 +83,7 @@ func TestProofWithPoP(t *testing.T) { // Check proofs for the previously ingested certificates. foundValidDomainNames := false for i, c := range certs { + t.Logf("Certificate subject is: \"%s\"", domain.CertSubjectName(c)) if names[i] == nil { // This is a non leaf certificate, skip. continue @@ -88,3 +105,51 @@ func TestProofWithPoP(t *testing.T) { } require.True(t, foundValidDomainNames, "bad test: not one valid checkable domain name") } + +// checkProof checks the proof to be correct. +func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapServerResponse) { + t.Helper() + // caName := cert.Issuer.String() + require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, + "PoP not found for \"%s\"", domain.CertSubjectName(cert)) + for _, proof := range proofs { + // require.Contains(t, cert.Subject.CommonName, proof.Domain) + includesDomainName(t, proof.Domain, cert) + proofType, isCorrect, err := prover.VerifyProofByDomain(proof) + require.NoError(t, err) + require.True(t, isCorrect) + + if proofType == mapcommon.PoA { + require.Empty(t, proof.DomainEntryBytes) + } + // if proofType == mapcommon.PoP { + // domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) + // require.NoError(t, err) + // // get the correct CA entry + // for _, caEntry := range domainEntry.CAEntry { + // if caEntry.CAName == caName { + // // check if the cert is in the CA entry + // for _, certRaw := range caEntry.DomainCerts { + // require.Equal(t, certRaw, cert.Raw) + // return + // } + // } + // } + // } + } + // require.Fail(t, "cert/CA not found") +} + +// includesDomainName checks that the subDomain appears as a substring of at least one of the +// names in the certificate. +func includesDomainName(t *testing.T, subDomain string, cert *ctx509.Certificate) { + names := updater.ExtractCertDomains(cert) + + for _, s := range names { + if strings.Contains(s, subDomain) { + return + } + } + require.FailNow(t, "the subdomain \"%s\" is not present as a preffix in any of the contained "+ + "names of the certtificate: [%s]", subDomain, strings.Join(names, ", ")) +} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 3da42685..129addd1 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -304,12 +304,12 @@ func UpdateSMTfromDomains( } // Update the tree. - _, err = smtTrie.Update(context.Background(), keys, values) + _, err = smtTrie.Update(ctx, keys, values) if err != nil { return err } // And update the tree in the DB. - err = smtTrie.Commit(context.Background()) + err = smtTrie.Commit(ctx) if err != nil { return err } @@ -333,7 +333,7 @@ func UpdateSMT(ctx context.Context, conn db.Conn, cacheHeight int) error { if err != nil { panic(err) } - smtTrie.CacheHeightLimit = cacheHeight + // smtTrie.CacheHeightLimit = cacheHeight // Get the dirty domains. domains, err := conn.UpdatedDomains(ctx) From bbbc13f62e5180ec60699d5c2a94d3644be130b9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 5 Apr 2023 10:27:38 +0200 Subject: [PATCH 079/187] Rename column domain_payloads.payload to cert_payload. --- pkg/db/mysql/read.go | 4 ++-- tools/create_schema.sh | 12 ++++++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index 69b6b161..da9615db 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -46,7 +46,7 @@ func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Outp func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( []byte, error) { - str := "SELECT payload FROM domain_payloads WHERE domain_id = ?" + str := "SELECT cert_payload FROM domain_payloads WHERE domain_id = ?" var payload []byte err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&payload) if err != nil && err != sql.ErrNoRows { @@ -69,7 +69,7 @@ func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common if len(domainIDs) == 0 { return nil, nil } - str := "SELECT domain_id,payload FROM domain_payloads WHERE domain_id IN " + + str := "SELECT domain_id,cert_payload FROM domain_payloads WHERE domain_id IN " + repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 7bf8f33f..e987af72 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -68,8 +68,8 @@ CMD=$(cat < Date: Thu, 6 Apr 2023 20:48:17 +0200 Subject: [PATCH 080/187] Redefining DomainEntry. --- pkg/db/db.go | 6 ++++-- pkg/db/mysql/read.go | 20 ++++++++++---------- pkg/mapserver/common/structure.go | 3 ++- pkg/mapserver/prover/prover.go | 14 +++++++++++--- pkg/mapserver/responder/old_responder.go | 4 +++- pkg/mapserver/responder/responder.go | 8 +++++--- pkg/tests/mockdb_for_testing.go | 7 +++++-- 7 files changed, 40 insertions(+), 22 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 8c0c1d81..a374baaf 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -67,8 +67,10 @@ type Conn interface { // Function for DomainEntries table // ************************************************************ - // RetrieveDomainEntry: Retrieve one key-value pair from domain entries table - RetrieveDomainEntry(ctx context.Context, id common.SHA256Output) ([]byte, error) + // RetrieveDomainEntry retrieves the domain's certificate payload ID and the payload + // itself, given the domain ID. + RetrieveDomainEntry(ctx context.Context, id common.SHA256Output) ( + certPayloadID *common.SHA256Output, certPayload []byte, err error) // RetrieveDomainEntries: Retrieve a list of domain entries table RetrieveDomainEntries(ctx context.Context, id []*common.SHA256Output) ([]*KeyValuePair, error) diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index da9615db..9b18c0d6 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -41,18 +41,18 @@ func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Outp return value, err } -// RetrieveDomainEntry: Retrieve one key-value pair from domain entries table -// Return sql.ErrNoRows if no row is round -func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( - []byte, error) { - - str := "SELECT cert_payload FROM domain_payloads WHERE domain_id = ?" - var payload []byte - err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&payload) +// RetrieveDomainEntry retrieves the domain's certificate payload ID and the payload +// itself, given the domain ID. +func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, domainID common.SHA256Output, +) (*common.SHA256Output, []byte, error) { + + str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" + var payloadID, payload []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("RetrieveDomainEntry | %w", err) + return nil, nil, fmt.Errorf("RetrieveDomainEntry | %w", err) } - return payload, nil + return (*common.SHA256Output)(payloadID),payload,nil } // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index 9665851c..d0650da1 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -57,7 +57,7 @@ type ProofType int const ( PoA ProofType = iota - PoP ProofType = iota + PoP ) // MapServerResponse: response from map server to client @@ -65,6 +65,7 @@ type MapServerResponse struct { Domain string // serialized bytes of DomainEntry DomainEntryBytes []byte `json:"DomainEntryBytes"` + DomainEntryID *common.SHA256Output PoI PoI TreeHeadSig []byte } diff --git a/pkg/mapserver/prover/prover.go b/pkg/mapserver/prover/prover.go index c9f92055..c1e5cc34 100644 --- a/pkg/mapserver/prover/prover.go +++ b/pkg/mapserver/prover/prover.go @@ -1,11 +1,16 @@ package prover import ( + "bytes" + "encoding/hex" + "fmt" + "github.com/netsec-ethz/fpki/pkg/common" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) +// deleteme func VerifyProofByDomainOld(proof mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { if proof.PoI.ProofType == mapCommon.PoP { //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue @@ -21,10 +26,13 @@ func VerifyProofByDomainOld(proof mapCommon.MapServerResponse) (mapCommon.ProofT // and returns the type of proof, and proofing result. func VerifyProofByDomain(proof *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { if proof.PoI.ProofType == mapCommon.PoP { - //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue - value := common.SHA256Hash(proof.DomainEntryBytes) + if !bytes.Equal(proof.DomainEntryID[:], proof.PoI.ProofValue) { + return 0, false, fmt.Errorf("different hash for value %s != %s", + hex.EncodeToString(proof.DomainEntryID[:]), + hex.EncodeToString(proof.PoI.ProofValue)) + } return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, - common.SHA256Hash([]byte(proof.Domain)), value), nil + common.SHA256Hash([]byte(proof.Domain)), proof.DomainEntryID[:]), nil } return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, common.SHA256Hash([]byte(proof.Domain)), proof.PoI.ProofValue, proof.PoI.ProofKey), nil diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go index 530e9e9c..af516fb5 100644 --- a/pkg/mapserver/responder/old_responder.go +++ b/pkg/mapserver/responder/old_responder.go @@ -139,11 +139,12 @@ func (r *OldMapResponder) getProof(ctx context.Context, domainName string) ( } var proofType mapCommon.ProofType + var payloadID *common.SHA256Output domainBytes := []byte{} // If it is PoP, query the domain entry. If it is PoA, directly return the PoA if isPoP { proofType = mapCommon.PoP - domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) + payloadID, domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) if err != nil { return nil, fmt.Errorf("GetDomainProof | %w", err) } @@ -159,6 +160,7 @@ func (r *OldMapResponder) getProof(ctx context.Context, domainName string) ( ProofType: proofType, ProofKey: proofKey, ProofValue: ProofValue}, + DomainEntryID: payloadID, DomainEntryBytes: domainBytes, TreeHeadSig: r.signedTreeHead, }) diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 27535c5e..df6b7d27 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -52,19 +52,20 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, // Prepare proof with the help of the SMT. proofList := make([]*mapCommon.MapServerResponse, len(domainParts)) for i, domainPart := range domainParts { - hash := common.SHA256Hash32Bytes([]byte(domainPart)) - proof, isPoP, proofKey, proofValue, err := r.smt.MerkleProof(ctx, hash[:]) + domainPartID := common.SHA256Hash32Bytes([]byte(domainPart)) + proof, isPoP, proofKey, proofValue, err := r.smt.MerkleProof(ctx, domainPartID[:]) if err != nil { return nil, fmt.Errorf("error obtaining Merkle proof for %s: %w", domainPart, err) } // If it is a proof of presence, obtain the payload. + var payloadID *common.SHA256Output var payload []byte proofType := mapCommon.PoA if isPoP { proofType = mapCommon.PoP - payload, err = r.conn.RetrieveDomainEntry(ctx, hash) + payloadID, payload, err = r.conn.RetrieveDomainEntry(ctx, domainPartID) if err != nil { return nil, fmt.Errorf("error obtaining payload for %s: %w", domainPart, err) } @@ -79,6 +80,7 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, ProofKey: proofKey, ProofValue: proofValue, }, + DomainEntryID: payloadID, DomainEntryBytes: payload, // TreeHeadSig: , TODO(juagargi) } diff --git a/pkg/tests/mockdb_for_testing.go b/pkg/tests/mockdb_for_testing.go index a38bde0c..2d400841 100644 --- a/pkg/tests/mockdb_for_testing.go +++ b/pkg/tests/mockdb_for_testing.go @@ -61,8 +61,11 @@ func (d *MockDB) RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ( return d.TreeTable[id], nil } -func (d *MockDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ([]byte, error) { - return d.DomainEntriesTable[key], nil +func (d *MockDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( + *common.SHA256Output, []byte, error) { + + id := common.SHA256Hash32Bytes(d.DomainEntriesTable[key]) + return &id, d.DomainEntriesTable[key], nil } func (d *MockDB) RetrieveKeyValuePairTreeStruct(ctx context.Context, id []common.SHA256Output, From 08073546274d4bd3b6f15707232eefa98d8c7a59 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 7 Apr 2023 00:03:59 +0200 Subject: [PATCH 081/187] Changing util functions to read PEM. --- pkg/{ => mapserver}/util/proof.go | 0 pkg/util/certificate.go | 113 +++++++++++++++++++++++++++++- pkg/util/certificate_test.go | 94 +++++++++++++++++++++++++ pkg/util/domain.go | 23 ++++++ pkg/util/domain_test.go | 22 ++++++ pkg/util/io.go | 100 +++++++++++++++++++++----- pkg/util/io_test.go | 42 +++++++++++ pkg/util/time.go | 11 +++ 8 files changed, 385 insertions(+), 20 deletions(-) rename pkg/{ => mapserver}/util/proof.go (100%) create mode 100644 pkg/util/certificate_test.go create mode 100644 pkg/util/domain.go create mode 100644 pkg/util/domain_test.go create mode 100644 pkg/util/io_test.go create mode 100644 pkg/util/time.go diff --git a/pkg/util/proof.go b/pkg/mapserver/util/proof.go similarity index 100% rename from pkg/util/proof.go rename to pkg/mapserver/util/proof.go diff --git a/pkg/util/certificate.go b/pkg/util/certificate.go index 115ba8c3..851fdbb0 100644 --- a/pkg/util/certificate.go +++ b/pkg/util/certificate.go @@ -4,7 +4,7 @@ import ( "time" ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/common" ) // ExtractNames returns a list of lists of names. Since each certificate contains several names, @@ -12,7 +12,7 @@ import ( func ExtractNames(certs []*ctx509.Certificate) [][]string { names := make([][]string, len(certs)) for i, c := range certs { - names[i] = updater.ExtractCertDomains(c) + names[i] = ExtractCertDomains(c) } return names } @@ -25,3 +25,112 @@ func ExtractExpirations(certs []*ctx509.Certificate) []*time.Time { } return expirations } + +// UnfoldCerts takes a slice of certificates and chains with the same length, +// and returns all certificates once, without duplicates, and the ID of the parent in the +// trust chain, or nil if the certificate is root. +// The parents returned slice has the same elements as the certificates returned slice. +// When a certificate is root, it's corresponding parents entry is nil. +// Additionally, all the names of the leaf certificates are returned in its corresponding position +// in the names slice iff the certificate is a leaf one. If it is not, nil is returned in that +// position instead. +// +// The leaf certificates are always returned at the head of the slice, which means, among others, +// that once a nil value is found in the names slice, the rest of the slice will be nil as well. +func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate, +) ( + certificates []*ctx509.Certificate, + certIDs []*common.SHA256Output, + parentIDs []*common.SHA256Output, + names [][]string, +) { + + // extractNames is the function that extracts the names from a certificate. It starts being + // a regular names extraction, but after processing all leaves it is assigned to a function + // that always returns nil. + extractNames := func(c *ctx509.Certificate) []string { + return ExtractCertDomains(c) + } + // ChangeFcn changes extractNames to always return nil. + changeFcn := func() { + extractNames = func(c *ctx509.Certificate) []string { + return nil + } + } + + for len(leafCerts) > 0 { + var pendingCerts []*ctx509.Certificate + var pendingChains [][]*ctx509.Certificate + for i, c := range leafCerts { + certificates = append(certificates, c) + ID := common.SHA256Hash32Bytes(c.Raw) + certIDs = append(certIDs, &ID) + var parentID *common.SHA256Output + if len(chains[i]) > 0 { + // The certificate has a trust chain (it is not root): add the first certificate + // from the chain as the parent. + parent := chains[i][0] + ID := common.SHA256Hash32Bytes(parent.Raw) + parentID = &ID + // Add this parent to the back of the certs, plus the corresponding chain entry, + // so that it's processed as a certificate. + pendingCerts = append(pendingCerts, parent) + pendingChains = append(pendingChains, chains[i][1:]) + } + parentIDs = append(parentIDs, parentID) + names = append(names, extractNames(c)) + } + changeFcn() // This will change the function `extractNames` to always return nil. + leafCerts = pendingCerts + chains = pendingChains + } + return +} + +// UnfoldCert takes a certificate with its trust chain and returns a ready-to-insert-in-DB +// collection of IDs and payloads for the certificate and its ancestry. +// Additionally, if the payload of any of the ancestors of the certificate is nil, this function +// interprets it as the ancestor is already present in the DB, and thus will omit returning it +// and any posterior ancestors. +func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, + chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, +) ( + certs []*ctx509.Certificate, + certIDs []*common.SHA256Output, + parentIDs []*common.SHA256Output, + names [][]string, +) { + + certs = make([]*ctx509.Certificate, 0, len(chainIDs)+1) + certIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) + parentIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) + names = make([][]string, 0, len(chainIDs)+1) + + // Always add the leaf certificate. + certs = append(certs, leafCert) + certIDs = append(certIDs, certID) + parentIDs = append(parentIDs, chainIDs[0]) + names = append(names, ExtractCertDomains(leafCert)) + // Add the intermediate certs iff their payload is not nil. + i := 0 + for ; i < len(chain)-1; i++ { + if chain[i] == nil { + // This parent has been inserted already in DB. This implies that its own parent, + // the grandparent of the leaf, must have been inserted as well; and so on. + // There are no more parents to insert. + return + } + certs = append(certs, chain[i]) + certIDs = append(certIDs, chainIDs[i]) + parentIDs = append(parentIDs, chainIDs[i+1]) + names = append(names, nil) + } + // Add the root certificate (no parent) iff we haven't inserted it yet. + if chain[i] != nil { + certs = append(certs, chain[i]) + certIDs = append(certIDs, chainIDs[i]) + parentIDs = append(parentIDs, nil) + names = append(names, nil) + } + return +} diff --git a/pkg/util/certificate_test.go b/pkg/util/certificate_test.go new file mode 100644 index 00000000..78cff33a --- /dev/null +++ b/pkg/util/certificate_test.go @@ -0,0 +1,94 @@ +package util + +import ( + "fmt" + "testing" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" + "github.com/stretchr/testify/assert" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +func TestUnfoldCerts(t *testing.T) { + // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. + a := &ctx509.Certificate{ + Raw: []byte{0}, + Subject: pkix.Name{ + CommonName: "a", + }, + DNSNames: []string{"a", "a", "a.com"}, + } + b := &ctx509.Certificate{ + Raw: []byte{1}, + Subject: pkix.Name{ + CommonName: "b", + }, + DNSNames: []string{"b", "b", "b.com"}, + } + c := &ctx509.Certificate{ + Raw: []byte{1}, + Subject: pkix.Name{ + CommonName: "c", + }, + DNSNames: []string{"c", "c", "c.com"}, + } + d := &ctx509.Certificate{ + Raw: []byte{3}, + Subject: pkix.Name{ + CommonName: "d", + }, + DNSNames: []string{"d", "d", "d.com"}, + } + + certs := []*ctx509.Certificate{ + a, + b, + } + chains := [][]*ctx509.Certificate{ + nil, + {c, d}, + } + allCerts, IDs, parentIDs, names := UnfoldCerts(certs, chains) + + fmt.Printf("[%p %p %p %p]\n", a, b, c, d) + fmt.Printf("%v\n", allCerts) + fmt.Printf("%v\n", IDs) + fmt.Printf("%v\n", parentIDs) + + assert.Len(t, allCerts, 4) + assert.Len(t, IDs, 4) + assert.Len(t, parentIDs, 4) + + // Check payloads. + assert.Equal(t, a, allCerts[0]) + assert.Equal(t, b, allCerts[1]) + assert.Equal(t, c, allCerts[2]) + assert.Equal(t, d, allCerts[3]) + + // Check IDs. + aID := common.SHA256Hash32Bytes(a.Raw) + bID := common.SHA256Hash32Bytes(b.Raw) + cID := common.SHA256Hash32Bytes(c.Raw) + dID := common.SHA256Hash32Bytes(d.Raw) + + assert.Equal(t, aID, *IDs[0]) + assert.Equal(t, bID, *IDs[1]) + assert.Equal(t, cID, *IDs[2]) + assert.Equal(t, dID, *IDs[3]) + + // Check parent IDs. + nilID := (*common.SHA256Output)(nil) + assert.Equal(t, nilID, parentIDs[0], "bad parent at 0") + assert.Equal(t, cID, *parentIDs[1], "bad parent at 1") + assert.Equal(t, dID, *parentIDs[2], "bad parent at 2") + assert.Equal(t, nilID, parentIDs[3], "bad parent at 3") + + // Check domain names. + nilNames := ([]string)(nil) + assert.ElementsMatch(t, []string{"a", "a.com"}, names[0]) // root but also a leaf + assert.ElementsMatch(t, []string{"b", "b.com"}, names[1]) // just a leaf + assert.Equal(t, nilNames, names[2]) // not a leaf + assert.Equal(t, nilNames, names[3]) // not a leaf +} diff --git a/pkg/util/domain.go b/pkg/util/domain.go new file mode 100644 index 00000000..3591d72f --- /dev/null +++ b/pkg/util/domain.go @@ -0,0 +1,23 @@ +package util + +import ( + ctx509 "github.com/google/certificate-transparency-go/x509" +) + +// ExtractCertDomains: get domain from cert: {Common Name, SANs} +func ExtractCertDomains(cert *ctx509.Certificate) []string { + domains := make(map[string]struct{}) + if len(cert.Subject.CommonName) != 0 { + domains[cert.Subject.CommonName] = struct{}{} + } + + for _, dnsName := range cert.DNSNames { + domains[dnsName] = struct{}{} + } + + result := []string{} + for k := range domains { + result = append(result, k) + } + return result +} diff --git a/pkg/util/domain_test.go b/pkg/util/domain_test.go new file mode 100644 index 00000000..de78fedb --- /dev/null +++ b/pkg/util/domain_test.go @@ -0,0 +1,22 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestExtractCertDomains(t *testing.T) { + raw, err := ReadAllGzippedFile("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + certs, err := LoadCertsFromPEMBuffer(raw) + require.NoError(t, err) + names := [][]string{ + {"instaally.com", "www.instaally.com"}, + {"a.com"}, + } + _ = certs + for i, names := range names { + require.EqualValues(t, names, ExtractCertDomains(certs[i])) + } +} diff --git a/pkg/util/io.go b/pkg/util/io.go index ebfdf4e7..f641ea08 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -7,12 +7,12 @@ import ( "encoding/csv" "encoding/pem" "io" + "io/ioutil" "os" "strings" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" ) const ( @@ -20,7 +20,23 @@ const ( CertChainColumn = 4 ) -func ReadAllGzippedFile(filename string) ([]byte, error) { +type GzipReader struct { + f *os.File + gzreader *gzip.Reader +} + +func (r *GzipReader) Read(buff []byte) (int, error) { + return r.gzreader.Read(buff) +} + +func (r *GzipReader) Close() error { + if err := r.gzreader.Close(); err != nil { + return err + } + return r.f.Close() +} + +func NewGzipReader(filename string) (*GzipReader, error) { f, err := os.Open(filename) if err != nil { return nil, err @@ -29,35 +45,83 @@ func ReadAllGzippedFile(filename string) ([]byte, error) { if err != nil { return nil, err } + return &GzipReader{ + f: f, + gzreader: z, + }, nil +} - raw, err := io.ReadAll(z) +func ReadAllGzippedFile(filename string) ([]byte, error) { + r, err := NewGzipReader(filename) if err != nil { return nil, err } - - err = z.Close() + buff, err := ioutil.ReadAll(r) if err != nil { return nil, err } + return buff, r.Close() +} - err = f.Close() - return raw, err +func LoadCertsFromPEMBuffer(buff []byte) ([]*ctx509.Certificate, error) { + r := bytes.NewReader(buff) + return LoadCertsWithPEMReader(r) } -func LoadCertsFromPEM(buff []byte) ([]*ctx509.Certificate, error) { +// LoadCertsWithPEMReader uses the reader to read more data to memory if the PEM parsing cannot +// find an appropriate block. If there exists a PEM block bigger than the current buffer, the +// function will double its size and try again, until all data has been read from the reader. +func LoadCertsWithPEMReader(r io.Reader) ([]*ctx509.Certificate, error) { + storage := make([]byte, 1024) + var buff []byte + bytesPending := true + certs := make([]*ctx509.Certificate, 0) - for len(buff) > 0 { - var block *pem.Block - block, buff = pem.Decode(buff) - if block.Type != "CERTIFICATE" { - continue - } - c, err := ctx509.ParseTBSCertificate(block.Bytes) - if err != nil { + for bytesPending { + // Move len(buff) bytes to beginning of storage. + n := copy(storage[:], buff) + // Set buff to be the remaining of the storage. + buff = storage[n:] + + // Read as much as possible. + newBytes, err := r.Read(buff) + if err != nil && err != io.EOF { return nil, err } + // Set buff to beginning of storage and until last read byte. + buff = storage[:n+newBytes] + + if newBytes == 0 { + if err != io.EOF { + // Storage support might be too small to fit this PEM block. Increase by double + // its size and try again; the copy at the beginning of the loop will restore + // the original contents to this new buffer. + storage = make([]byte, 2*len(storage)) + continue + } + // End of File. + bytesPending = false + } - certs = append(certs, c) + // Proceed to parse as many CERTIFICATE PEM blocks as possible. + var block *pem.Block + for { // do-while block != nil && block.Type == CERTIFICATE + block, buff = pem.Decode(buff) + if block == nil { + // No PEM block found, try to read more data and try again. + break + } + if block.Type != "CERTIFICATE" { + // Wrong PEM block, try to find another one. + continue + } + // It must be a certificate. Complain if parsing fails. + c, err := ctx509.ParseTBSCertificate(block.Bytes) + if err != nil { + return nil, err + } + certs = append(certs, c) + } } return certs, nil } @@ -112,7 +176,7 @@ func LoadCertsAndChainsFromCSV( } // Unfold the received certificates. - payloads, IDs, parentIDs, names = updater.UnfoldCerts(leafs, chains) + payloads, IDs, parentIDs, names = UnfoldCerts(leafs, chains) return } diff --git a/pkg/util/io_test.go b/pkg/util/io_test.go new file mode 100644 index 00000000..a001a23c --- /dev/null +++ b/pkg/util/io_test.go @@ -0,0 +1,42 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestNewGzipReader(t *testing.T) { + r, err := NewGzipReader("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + + // Read 10 bytes (the file is much bigger). + var buff [10]byte + n, err := r.Read(buff[:]) + require.NoError(t, err) + require.Equal(t, len(buff), n) + + err = r.Close() + require.NoError(t, err) +} + +func TestLoadCertsWithPEMReader(t *testing.T) { + r, err := NewGzipReader("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + + certs, err := LoadCertsWithPEMReader(r) + require.NoError(t, err) + require.Len(t, certs, 100000, "wrong length %d", len(certs)) + + err = r.Close() + require.NoError(t, err) +} + +func TestLoadCertsFromPEMBuffer(t *testing.T) { + raw, err := ReadAllGzippedFile("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + + certs, err := LoadCertsFromPEMBuffer(raw) + require.NoError(t, err) + require.Len(t, certs, 100000, "wrong length %d", len(certs)) +} diff --git a/pkg/util/time.go b/pkg/util/time.go new file mode 100644 index 00000000..b0e505ef --- /dev/null +++ b/pkg/util/time.go @@ -0,0 +1,11 @@ +package util + +import "time" + +func TimeFromSecs(secs int) time.Time { + return time.Unix(int64(secs), 0) +} + +func SecsFromTime(t time.Time) int { + return int(t.Unix()) +} \ No newline at end of file From a292aaa61581f8ced7af6e01a83186a86505cea7 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 7 Apr 2023 01:29:39 +0200 Subject: [PATCH 082/187] Finish changing PEM functions. --- pkg/util/cert_reader.go | 79 ++++++++++++++++++++++++++ pkg/util/cert_reader_test.go | 90 ++++++++++++++++++++++++++++++ pkg/util/domain_test.go | 15 +++-- pkg/util/io.go | 65 --------------------- pkg/util/io_test.go | 21 ------- tests/testdata/2-xenon2023.csv.gz | Bin 0 -> 5921 bytes tests/testdata/3-certs.pem | 81 +++++++++++++++++++++++++++ 7 files changed, 261 insertions(+), 90 deletions(-) create mode 100644 pkg/util/cert_reader.go create mode 100644 pkg/util/cert_reader_test.go create mode 100644 tests/testdata/2-xenon2023.csv.gz create mode 100644 tests/testdata/3-certs.pem diff --git a/pkg/util/cert_reader.go b/pkg/util/cert_reader.go new file mode 100644 index 00000000..bd3d34fd --- /dev/null +++ b/pkg/util/cert_reader.go @@ -0,0 +1,79 @@ +package util + +import ( + "encoding/pem" + "io" + + ctx509 "github.com/google/certificate-transparency-go/x509" +) + +type CertReader struct { + r io.Reader + storage []byte + buff []byte + eofReached bool +} + +func NewCertReader(r io.Reader) *CertReader { + return &CertReader{ + r: r, + storage: make([]byte, 1024*1024), + buff: nil, + } +} + +// Read reads as many certificates as the `certs` slice has or end-of-stream. +func (r *CertReader) Read(certs []*ctx509.Certificate) (int, error) { + certPointers := certs + for len(certPointers) > 0 { + // Move len(buff) bytes to beginning of storage. + n := copy(r.storage[:], r.buff) + // Set buff to be the remaining of the storage. + r.buff = r.storage[n:] + + // Read as much as possible. + newBytes, err := r.r.Read(r.buff) + if err != nil && err != io.EOF { + return 0, err + } + // Set buff to beginning of storage and until last read byte. + r.buff = r.storage[:n+newBytes] + + if newBytes == 0 { + if err != io.EOF { + // Storage support might be too small to fit this PEM block. Increase by double + // its size and try again; the copy at the beginning of the loop will restore + // the original contents to this new buffer. + r.storage = make([]byte, 2*len(r.storage)) + continue + } + // End of File. + r.eofReached = true + } + + // Proceed to parse as many CERTIFICATE PEM blocks as possible. + var block *pem.Block + for len(certPointers) > 0 { // do-while block != nil && block.Type == CERTIFICATE + block, r.buff = pem.Decode(r.buff) + if block == nil { + // No PEM block found, try to read more data and try again. + break + } + if block.Type != "CERTIFICATE" { + // Wrong PEM block, try to find another one. + continue + } + // It must be a certificate. Complain if parsing fails. + c, err := ctx509.ParseTBSCertificate(block.Bytes) + if err != nil { + return 0, err + } + certPointers[0] = c + certPointers = certPointers[1:] + } + if r.eofReached{ + break + } + } + return len(certs) - len(certPointers), nil +} diff --git a/pkg/util/cert_reader_test.go b/pkg/util/cert_reader_test.go new file mode 100644 index 00000000..d3101c9c --- /dev/null +++ b/pkg/util/cert_reader_test.go @@ -0,0 +1,90 @@ +package util + +import ( + "os" + "testing" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/stretchr/testify/require" +) + +func TestCertReader(t *testing.T) { + z, err := NewGzipReader("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + + N := 10 + certs := make([]*ctx509.Certificate, N) + r := NewCertReader(z) + n, err := r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + // Close. + require.NoError(t, z.Close()) + // Reading again should yield an error. + n, err = r.Read(certs) + require.Error(t, err) + require.Less(t, n, N) + // Open again. + z, err = NewGzipReader("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + + N = 10 + certs = make([]*ctx509.Certificate, N) + r = NewCertReader(z) + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + N = 20 + certs = make([]*ctx509.Certificate, N) + r = NewCertReader(z) + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + N = 5 + certs = make([]*ctx509.Certificate, N) + r = NewCertReader(z) + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + // Close and open again. + z, err = NewGzipReader("../../tests/testdata/certs.pem.gz") + require.NoError(t, err) + // Read them all. + N = 100000 - 1 + certs = make([]*ctx509.Certificate, N) + r = NewCertReader(z) + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + // Last certificate + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, 1, n) + // There should be no other certificate left. + n, err = r.Read(certs) + require.NoError(t, err) + require.Equal(t, 0, n) + // Close. + require.NoError(t, z.Close()) +} + +func TestCertReaderOneByOne(t *testing.T) { + f, err := os.Open("../../tests/testdata/3-certs.pem") + require.NoError(t, err) + + r := NewCertReader(f) + N := 3 + + cs := make([]*ctx509.Certificate, 1) + for i := 0; i < N; i++ { + t.Logf("iteration %d", i) + n, err := r.Read(cs) + require.NoError(t, err) + require.Equal(t, 1, n) + } + + require.NoError(t, f.Close()) +} diff --git a/pkg/util/domain_test.go b/pkg/util/domain_test.go index de78fedb..25e4cede 100644 --- a/pkg/util/domain_test.go +++ b/pkg/util/domain_test.go @@ -3,19 +3,26 @@ package util import ( "testing" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/require" ) func TestExtractCertDomains(t *testing.T) { - raw, err := ReadAllGzippedFile("../../tests/testdata/certs.pem.gz") + z, err := NewGzipReader("../../tests/testdata/certs.pem.gz") require.NoError(t, err) - certs, err := LoadCertsFromPEMBuffer(raw) + r := NewCertReader(z) + + certs := make([]*ctx509.Certificate, 5) + n, err := r.Read(certs) require.NoError(t, err) + require.Equal(t, len(certs), n) names := [][]string{ {"instaally.com", "www.instaally.com"}, - {"a.com"}, + {"secure.jaymanufacturing.com"}, + {"*.ibm.xtify.com", "ibm.xtify.com"}, + {"flowers-to-the-world.com"}, + {"www.knocknok-fashion.com","knocknok-fashion.com"}, } - _ = certs for i, names := range names { require.EqualValues(t, names, ExtractCertDomains(certs[i])) } diff --git a/pkg/util/io.go b/pkg/util/io.go index f641ea08..6a0411f4 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -5,8 +5,6 @@ import ( "compress/gzip" "encoding/base64" "encoding/csv" - "encoding/pem" - "io" "io/ioutil" "os" "strings" @@ -63,69 +61,6 @@ func ReadAllGzippedFile(filename string) ([]byte, error) { return buff, r.Close() } -func LoadCertsFromPEMBuffer(buff []byte) ([]*ctx509.Certificate, error) { - r := bytes.NewReader(buff) - return LoadCertsWithPEMReader(r) -} - -// LoadCertsWithPEMReader uses the reader to read more data to memory if the PEM parsing cannot -// find an appropriate block. If there exists a PEM block bigger than the current buffer, the -// function will double its size and try again, until all data has been read from the reader. -func LoadCertsWithPEMReader(r io.Reader) ([]*ctx509.Certificate, error) { - storage := make([]byte, 1024) - var buff []byte - bytesPending := true - - certs := make([]*ctx509.Certificate, 0) - for bytesPending { - // Move len(buff) bytes to beginning of storage. - n := copy(storage[:], buff) - // Set buff to be the remaining of the storage. - buff = storage[n:] - - // Read as much as possible. - newBytes, err := r.Read(buff) - if err != nil && err != io.EOF { - return nil, err - } - // Set buff to beginning of storage and until last read byte. - buff = storage[:n+newBytes] - - if newBytes == 0 { - if err != io.EOF { - // Storage support might be too small to fit this PEM block. Increase by double - // its size and try again; the copy at the beginning of the loop will restore - // the original contents to this new buffer. - storage = make([]byte, 2*len(storage)) - continue - } - // End of File. - bytesPending = false - } - - // Proceed to parse as many CERTIFICATE PEM blocks as possible. - var block *pem.Block - for { // do-while block != nil && block.Type == CERTIFICATE - block, buff = pem.Decode(buff) - if block == nil { - // No PEM block found, try to read more data and try again. - break - } - if block.Type != "CERTIFICATE" { - // Wrong PEM block, try to find another one. - continue - } - // It must be a certificate. Complain if parsing fails. - c, err := ctx509.ParseTBSCertificate(block.Bytes) - if err != nil { - return nil, err - } - certs = append(certs, c) - } - } - return certs, nil -} - // LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of the leaf certificate // payload, its ID, its parent ID, and its names, for each certificate and its ancestry chain. // The returned names contains nil unless the corresponding certificate is a leaf certificate. diff --git a/pkg/util/io_test.go b/pkg/util/io_test.go index a001a23c..c9382063 100644 --- a/pkg/util/io_test.go +++ b/pkg/util/io_test.go @@ -19,24 +19,3 @@ func TestNewGzipReader(t *testing.T) { err = r.Close() require.NoError(t, err) } - -func TestLoadCertsWithPEMReader(t *testing.T) { - r, err := NewGzipReader("../../tests/testdata/certs.pem.gz") - require.NoError(t, err) - - certs, err := LoadCertsWithPEMReader(r) - require.NoError(t, err) - require.Len(t, certs, 100000, "wrong length %d", len(certs)) - - err = r.Close() - require.NoError(t, err) -} - -func TestLoadCertsFromPEMBuffer(t *testing.T) { - raw, err := ReadAllGzippedFile("../../tests/testdata/certs.pem.gz") - require.NoError(t, err) - - certs, err := LoadCertsFromPEMBuffer(raw) - require.NoError(t, err) - require.Len(t, certs, 100000, "wrong length %d", len(certs)) -} diff --git a/tests/testdata/2-xenon2023.csv.gz b/tests/testdata/2-xenon2023.csv.gz new file mode 100644 index 0000000000000000000000000000000000000000..87863435d939875771d219292a0a67f0fde903f7 GIT binary patch literal 5921 zcmV++7vAU}iwFnu12QdmWo~b7GB7eTE@N|c0M&WNvcpKS-Tr;W?9nsuPKsH; z1B4^I*};2568`IjdvEu=?jEDiO40%mWL1IXGLK2t%QAod2MA`%->PY<`XZT|&)>3X z0Q;%>fW-=fM=?$#0wLHh&@UNM7DZl?G)_|xj+AARmI#ueaDir2nov|imk@?x2}UGP zP8Kj(Kna!?SxF;lfh51^iXtEv$1#*yF}uPtl_IcbkD+&pb3B z-BEUh`R?O|qvxXFfRX^-vcan?v~S|@;bo(LAZ02t$-P>b?E+d zw<*7C)Aze=uOjT=F6O~jhR-sQ%XGb`yTY#R-}9+FS9@`A^btZYiqJ#%+;ilxzs+~| z8B;pYpA^aGIMgfe#7}M5*WayK;p?T_kb zk336z&VQK0Mvg7I&rT5x_P!HVY5#owg2*r?$5<5e)z4Mx{aIjMAHjcj8Wi$^ZnER5 zhpD?B2OwiW-pjJqul>9NHuF)5YzPDNqm}yhct?B@HXpueO-PJafvNGqx zX>+!bTV`6bmPQD`eNv+_Ld~#+QE{qyXRwGaAu2UpN&`(Vx#}s1)}*J+a781S3S_;4 zdD=KLwk&9%!_oduY^1t~%FQz0bWhx&xjX?=ySrBTj96NYC7?k= z(M20=pY+4Lvi!-xNeN8ot~$ee@B6gBK=}1;-^ZhvWNDyfKnW~`XDo)DFon(9J2wJs z7ryS%hFr#y!8=KIwh~zID_?ky?U?Vrid7^(~2Y& z+Uq+83Z0{N&59Itk6707WTp}VC8(iXW(e)1+sMJ8BxK)-gU>lfd%lq0ardaxJJvMF zu)K>gzj8&kD3&QHfo1eNwxV6PzW3F$+_bCA?|y$5ezzB$W2@?YpNGg=8PDv$A7}0I z{{0&*%T~KMF&-Ynv93JiKtDe~ReQ!1ljhqK-0~j%bH*jhe|FPl(1{N!9gB`cwxNul zHVrYlj2G5S_M24dTP^&-5^XTH)S-E=+cy`1*Cc#wDQcZMufTWTnet5jvp-A_E6W3& zFi)%qwkdSf4eTIT5^oq5-vMRkt>~dnu~KiD@s)F|#qiaA*UZBKQ>eS*Wg;A2rIdJJ zVa~E+E#=@CdcCSUYTV5vEGb*dZlNQ>RSoNPHevvfI4bBu|3QV(w`XeIp2CBO+%Q{2 zi@-shCeL@f+K+*V5Ed1TfmuYHCRxbu`sOGNFF(Td;bb;qr&9_pfpg*r8Kuy!5meoq zj7~k?j+G<=H2BvNf~YW&dQ+NL6!LJUOlOZphA}ELneJGMk?2Ud1{u!6n>wOKfIpo` zwwMfg*da+KK0>Z40znbMHQrE)qIR*Rt&WPxs0o(VV~YU{sz+O!%-RVumoxcdu!iLv zrY&U{JLR677wb$B35Fc>kGCZPXV@*DB4p)wyN@4X)zp$|;qPAr#OGfG)C2z@Af{rN z)%|~wfS6t9z2lo~_&Y(wZvujU+LX(1yRWuAXa!AYy6-iywaz)7cL*(G{SeaRa^9VM zvOT8(ea5cf>a6gy{zF&)FgGtWcPf7OF^3-ies4EX;L+fpzWjhQ)T+EMz~Jw5bp9!b z*VB)Iq--aZa?OTaa#&SNaH?B%f>sd<`#2PhlQjKE!5wRuY+f( zlc(zwu?6o(RH(xoWv`&Xp8Ms~VDAVg$Vs%nXHV|Avzp3&)?2WQs*cVmytSXT6enAF zC<3i9`^#Cwfd`gXB!Zfovv9}3Viyh46P|--)iDPhrixT-rbw)t#}If?^LPm!;gX~m zZo2WAXEf?{=yk@-XLZs3bPV@-P~E_Z8H;!GmPG_<=4;)~?;f>VorEWem-j+}kqtbH zwufHzD>rUN8l|@5Zg6Cn#JEX7-{l_wHCZ+iL^H?(m*Z@>)!)xa@%cn5t{2P-=&s+) zao=$+>|dl}DLNB<0r7oT`p*A=;`4`iEdB;}NoSy&U|y1=q$vM=zaGQfWW`vV$cu=H zOSGK`iZ~ev>7H3%GbT+VF0JL+juFZ|)`4R$2P&??*u!y>fkZc4-fBU!EYh3&okw)6`b zV+({qg(#vAE!*7AhUq6?T+Oj*1=AKDsPg?xY62$P%LmPZmF(GhC>8FW)uZO6pW%tP9p_QB~(hGIg;meLFEvQ!%$J6c$t+rp21&mDzaZ` zzMj9qsmvCp$ds_H{u88rz91F+6Qs&kD?GnK>>HApU2-LQjqgXdvt6`))zAzs->^8Q zl2`fOr*dVx(^}!ab?WE6eUB`}KX14ilEA3*uW|AXd!w;O;iFFn1J@mWSFGRk@oe0% zH^vSt+}gY@9QKipcApJ!A4~oB`?|{%;tt(fpFwXuf86|z>3p7iwHf>kkbJg(L9^c8 zG$HAK@&GxfThtdce$6NIOswA;y-N}7xHmKW3eCC<<=d^vKleKLFHM#N@f;x{6n|;= zL0mJGt#3WHMre0PE))K|=eYteAcmX4`sJPGy^j0&@+JEXwauT?mH9PY`JMUdSDEQM zbx~w!Lbj|=rFgXhRFztC^_1|FAy-rM{*0-$Iv)x@1zEV;W-qs!0Q7Lo4`oHR#8KP_lia@ z1HiX8(SK^u;MzmJN~k__(H8?h5^c_YsLCCdby#_5l}M)2@j@{8d7tee@p_A7C;J{i zjW0dGvf~4IEf_5YHL%_aGLAO;Dshmw9PWqk8n@Mbw`qho?)Ge(&vCu|=~W^b%(`N8 zG1V;&7^$t2`>bPV7xrH@Ewk@j^#@J6D?fcU%kH~0DN#HeTYkrIcEV74KZ)cmIYOVQ zWUXJ`QNMHJcd*Ur4D(NVmvDChiB+DU_X4gm<=USMBXVKbS>^MXm&`n(pz0WpGmzlE$YUQY1x6Acgd z_p0;ESgi4LkS6?AjC;>;#!R1h3U9rX`GhwXY~Y^chhzYU0$P~4#Z#J1qk?5PAsfJg ztD0*)amHvz8u6Yqpfs+{J8mYjQ=hY;N>Te>g$kyKMI0Pt6Q4j)C zMH_&uz+{AJxLUi;-p4K+E~h)uk5+VZVih+Afv3v`stEjO(G%-ZHTf{MxT zZjs|srjQ&5_8shSB9;~UEB(ACyn;!#C_|YZG8NK@aLpmKwk0okwY^xrAPU^K6e|}pJdrWD-uQ;$7eoPg z{-W8c7^!S*qZ_`$K&hEd>2mMfkQIzvLRH92E>XIiY-uf~z@bOMj_o13lidH*8Syc<0VjG$)w1(a1MOYWL%)sS(P3 zN6U(dF!e;o9lrnl+Xsl}7jco?Z^Xq3jPS1v z*I;nTjPH&u9qcV_g-~a`A3~~j(?;kmMvh()eN7P#DCV$3ahdYx zy^nsU$?u9d_C3x%qX54$Nj&2KZwvRo3GT*^$PN56M!d?F{7)2il^f8v`LwsJ`E$`K zJB;hANEKZ_o;fSEUa8$Afx!?>MJC=D%I?knh*@N_c9W&9jrB?T<0>!1C#Cm+tK#VJ zr(}DNBQ6Dl6s3$N`Zu(_M802OKFy-a_hH-v_yuEW5P;kZM6V(?q0VpJ3f*7ZHSfex zEG=SSc|V_@Mzms|F=rr`y`8>R^e}8P@5P+oMCYB0w}>IVegvKiJ%`+db6EXxp7g|_ z_LJlid*N-zdh407>2`xYsIUmR@FTr+)#dE^u%&{2uNK0l11=ot;0NHeLXh~TSEqkI zGy?eWHl;P-(+vRLqwBK@otBClln4}nc&-e_-m*kk$QZX*P`+g*N#tT791ln&wl*Ux zZ!~K1aW_kkPi~o7vd@%f@EC+h$ux|0nBp}ivXzKPd6Q=5i>ObUDK97)bVr^ z?2OJOaGKB5w&jjrIQ8+mR9i`vIT0WxpaF#Pf@yry%06Ou?A4Wp_CltIL9!=q;q3Lk zs5N@FB3rU!U%zw@w>?laSTQ2?1M^M_Ff%*fdmTZgq-1&CFagzD@ z7RXYw|625encZ#wSTOpJB>pGDr(l}Qy|FC>0Z$qZDWydZ^^OjN8_RwQh3+jF?<~1R z!@nxBe-(Kvf#JkfgT3%19PH6aZ+^UBe|IB!?nOSrQi-rAr$)_+xM=y2H=9u|V8d9* zqR4k-6pX}$h5r=57VfI;LhN+(4lBotAlHf&+`+wxPx%OL7+wgwZeOe~w&MYP7IGxs z_5!dw)y``bmplwR={oo>=p&GnVR$c>$Rt#0_w=&SW$Ehk5idN(R8_jQs3Kojf`IHFnZRyS9q<+Ev#>&^O+nO z8FCwXYWxJ_7zyZ_l=~QGZZA!+-NAZC^1E-EjK@cjV*rkGHc4nFh?mSk1Wn3Wc4sge z^?}XvbL)^s5^@tF3rI^X1whJ_Bc=**cq#PyPbyLP{<3!H=41~pE$4N+fi;ekl^1ny z%TIIzSw`9a{y)FS`*Od?JNS*fAIyVvFHN>J^>O~Ig2ipM{*yyfe(%u4i}8PRXngk% zhvqvctLL{G*B#*}5x@VLssFdm?Y{|9zX=k)U1r`Zg59UmjamYH6KxNrsPb!3?b~%8 zx5K<$XLj7~6E=9`%XLDx3;n7x|D)~ypFQ1o)%?3cq4HBGtiB3`_g$(}AU~BVT&m;< zeC27(T+$*Z9<}rWH%Ea#Uu{r7Lf=rCbbh9W%z(_~G+U9e11h>a&6ia`JYaH=Qy|+? z)u|%ms@x7Q5b5zA%_n)TY4n&E#N>9!RI~?=;KqlUuS{vUO=GYm>|UKzu`gO|{Ia=q zzVSHxmC~1^tW)3O!nlN>1k@1EMT*qBp>aZAu)!*d5)yUlikXCw}Cn*%;CQ$O`9e3n2d zApD+|n3>r)6Kn1|zh>&>+}A{oCy*&@47+U>x0fnm4_8lKd<;7kox;E@w|^;GOeN5foh}?%zvhkX=z_#ELG_=<>fd8+U0_{?|0>G+L(TL>Du`O1y90FT_bypxpUz>l`g@!2khc?qoQ z%v(lqf8YZFPPg?%$80>WDp$`i4YZMyLeMAR=bf?Eyh;-(U&ZYpqa=O>7ikpXAtnBV zf+s8=o-k~~<}}S@>2TKEvgD${hhwuxQ|e1L%^-z` zp22gQiCx{+v+(we>3E1@D?c?1H5lO8P3j4~-g#ohvHpS|`3z_Wcordoc5wZc@ zc%n~F+`yPEBj;?tGRnjFtf0HkQf)w$+&0>NW~T;VTkMg%PmQcn8e-FPw+uEwe>7Ni z;e8x=4U75ltGY@Wz!D&Ohw+2VD*(ePr3^&*FzQWK&kyjySk|CQ=U{Zr{G_-ciLyW) zIAt^4phSV(o==^)Se)^mJDeg6^x+q4;9k@5h|Abh-`A;KROKLvzqWQ{z^K~wvV{t2 z*%c})A!_Ke1^#aD^^we2!uk>(A2%XQ>ZPo5j9YzJJJBUz@Tv6AULtamkw&*+m8z(2 z2VVBmUc5hlU8cV-b@A7IdiVKPrTQ0`z)2jTAd38{ZeutNp%C;p=)e9KY>xaet|9;c DNZ6|a literal 0 HcmV?d00001 diff --git a/tests/testdata/3-certs.pem b/tests/testdata/3-certs.pem new file mode 100644 index 00000000..9adae4b9 --- /dev/null +++ b/tests/testdata/3-certs.pem @@ -0,0 +1,81 @@ +-----BEGIN CERTIFICATE----- +MIIEGqADAgECAghmxiim3d1GETANBgkqhkiG9w0BAQsFADCBtDELMAkGA1UEBhMC +VVMxEDAOBgNVBAgTB0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNV +BAoTEUdvRGFkZHkuY29tLCBJbmMuMS0wKwYDVQQLEyRodHRwOi8vY2VydHMuZ29k +YWRkeS5jb20vcmVwb3NpdG9yeS8xMzAxBgNVBAMTKkdvIERhZGR5IFNlY3VyZSBD +ZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjAeFw0xNzExMDcwNTU2MDBaFw0yMTAy +MDQwNTU2MDBaMDsxITAfBgNVBAsTGERvbWFpbiBDb250cm9sIFZhbGlkYXRlZDEW +MBQGA1UEAxMNaW5zdGFhbGx5LmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBAJESNSvAqeUBIQz1L2JC7/iVykuma+t9Z02rJF641mujX0nn2WkOYZP+ +ins5+CWXFf64NgLVn0Jq+4XfIWtOwgcSPhIdjtG7uZ5a1baArUlHhhx0NZ2u1sI2 +W2FkvuhRj7eWCTQ/r4iu0kqDIt1Jfqh/mk6RE5K0Yd32ZWzecdoBLu5omKz2YulA +Amz4AdGUxtBJH/cwbLKwPYPrJ2zK8q75e0yyVw4mvOmbL1M9BcMKSffwjY0i/d1O +1SIcVVPCh4v4bqkfayDZRJTrQA8TH9IZIZgrqtYCCy2PguKjtKBN3+MYFG5nERQT +E6EK5IIkFjS03+rkT9bi3t8R7LUq04sCAwEAAaOCAb4wggG6MAwGA1UdEwEB/wQC +MAAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMA4GA1UdDwEB/wQEAwIF +oDA3BgNVHR8EMDAuMCygKqAohiZodHRwOi8vY3JsLmdvZGFkZHkuY29tL2dkaWcy +czEtNzg0LmNybDBdBgNVHSAEVjBUMEgGC2CGSAGG/W0BBxcBMDkwNwYIKwYBBQUH +AgEWK2h0dHA6Ly9jZXJ0aWZpY2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeS8w +CAYGZ4EMAQIBMHYGCCsGAQUFBwEBBGowaDAkBggrBgEFBQcwAYYYaHR0cDovL29j +c3AuZ29kYWRkeS5jb20vMEAGCCsGAQUFBzAChjRodHRwOi8vY2VydGlmaWNhdGVz +LmdvZGFkZHkuY29tL3JlcG9zaXRvcnkvZ2RpZzIuY3J0MB8GA1UdIwQYMBaAFEDC +vSeOzDSDMKIz1/tss/C0LIDOMCsGA1UdEQQkMCKCDWluc3RhYWxseS5jb22CEXd3 +dy5pbnN0YWFsbHkuY29tMB0GA1UdDgQWBBRnKvzxhce006PLmQzKlasy9Jl55g== +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIFWqADAgECAhB1stui8DwYLAkkAwqQ9FIsMA0GCSqGSIb3DQEBCwUAMEcxCzAJ +BgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMSAwHgYDVQQDExdSYXBp +ZFNTTCBTSEEyNTYgQ0EgLSBHMjAeFw0xNzExMDEwMDAwMDBaFw0yMTAxMzAyMzU5 +NTlaMCYxJDAiBgNVBAMMG3NlY3VyZS5qYXltYW51ZmFjdHVyaW5nLmNvbTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAIw4l7USzKRX6hw4JidiXN1F3mTi +yrB4JmCv9S9nA3nULtshk9OOHJ4fS39vHz3/BzC9UoAjzv6+nqgkP2HadR23+AuS +qMg9ZOnWVrxT42CZPiGC8eB/aySgr1wtKlxapcOO/8/ZZl4vBY+EMxLM146+I0GH +3wSNvUoyUaWizlpkp4gLCMkJ/PltTvAc99+H4o+xw+TLxII1Dtcjf1hA3sFjCb+1 +P8JDE8Q/ZJJmccENG8QH4fDSjTC16DNoqPI7Ug5nV6TQAEGhScAGokMCrOb8OI4B +I9UguhkxvXpfWcJFELpxUmepaSfFvJh2oE1MYSbY9zMpJGSkySUDHlDDSvMCAwEA +AaOCA3kwggN1MCYGA1UdEQQfMB2CG3NlY3VyZS5qYXltYW51ZmFjdHVyaW5nLmNv +bTAJBgNVHRMEAjAAMCsGA1UdHwQkMCIwIKAeoByGGmh0dHA6Ly9ncy5zeW1jYi5j +b20vZ3MuY3JsMG8GA1UdIARoMGYwZAYGZ4EMAQIBMFowKgYIKwYBBQUHAgEWHmh0 +dHBzOi8vd3d3LnJhcGlkc3NsLmNvbS9sZWdhbDAsBggrBgEFBQcCAjAgDB5odHRw +czovL3d3dy5yYXBpZHNzbC5jb20vbGVnYWwwHwYDVR0jBBgwFoAUTPS/6Du+wiTz +G0c7tW5IjharrxIwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB +BggrBgEFBQcDAjBXBggrBgEFBQcBAQRLMEkwHwYIKwYBBQUHMAGGE2h0dHA6Ly9n +cy5zeW1jZC5jb20wJgYIKwYBBQUHMAKGGmh0dHA6Ly9ncy5zeW1jYi5jb20vZ3Mu +Y3J0MIIB9wYKKwYBBAHWeQIEAgSCAecEggHjAeEAdgDd6x0reg1PpiCLga2BaHB+ +Lo6dAdVciI09EcTNtuy+zAAAAV94ZnasAAAEAwBHMEUCIQCVX/9rVm20ltrwNgBc +49N6QYEejKihv2y19d9t3tfc0AIgZQULVl6LphqTuvVNozkwapjmRzGmffMnM5A0 +9MesoTkAdQCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb37jjd80OyA3cEAAAAV94Znb2 +AAAEAwBGMEQCIFz4vndm6TJB7cU2iWwzrQ/3qQCK8QTZlS5Ei3XiVo11AiAPlkQs +oGXdEATm7FvMTmdn57+kaQkCmCMGhm3icNNgwAB3AO5Lvbd1zmC64UJpH6vhnmaj +D35fsHLYgwDEe4l6qP3LAAABX3hmeLoAAAQDAEgwRgIhALqYxxYX7Mm5Pk77C+47 +CEQiBTHJb38cUS2n4e/YKAFYAiEAgOqgGF/GyG9KR/ueWrC5jWW0qMXisZ1RQdBd +qrsTpSEAdwC8eOHfxfY8aEZJM02hD6FfCXlpIAnAgbTz9pF/Ptm4pQAAAV94ZnfU +AAAEAwBIMEYCIQDzYXNoqvhgBGnFp0iuD5NYdeAtz5xaq505qgLqF/lr+wIhALUj +Tt7qh45EQcRhHVsXl6wSOxIpXLuWxvQCl+BWyCe9 +-----END CERTIFICATE----- +-----BEGIN CERTIFICATE----- +MIIEbqADAgECAhADOBNrhFsNq0dRibJsEFjzMA0GCSqGSIb3DQEBCwUAMHAxCzAJ +BgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xLzAtBgNVBAMTJkRpZ2lDZXJ0IFNIQTIgSGlnaCBBc3N1cmFu +Y2UgU2VydmVyIENBMB4XDTE3MTAxNzAwMDAwMFoXDTIxMDExNDEyMDAwMFowgZIx +CzAJBgNVBAYTAlVTMREwDwYDVQQIEwhOZXcgWW9yazEPMA0GA1UEBxMGQXJtb25r +MTQwMgYDVQQKEytJbnRlcm5hdGlvbmFsIEJ1c2luZXNzIE1hY2hpbmVzIENvcnBv +cmF0aW9uMQ8wDQYDVQQLEwZEZXZPcHMxGDAWBgNVBAMMDyouaWJtLnh0aWZ5LmNv +bTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALL2hurN6bAq3kOFqEKG +XLqEE95X5I3pdRXcj9vX88tsjtDvYYsAs4Wf3Knf/zU0SAYChTEfskqdhDG6VWHX +aBTgP6qC69gZt/kIfFdPamv9laoxbpXlJYMATzEdOiTSQwp9sdvnxLqCPG2Lv7/v +CuLUsYbktZDTBftO81Z8wwF3teBz+wKTZYv44pDG6/T0/M9SeFdxRcqzOmlUXM7P +DPzcCOyztYC+4lnKVOeAZ4pfnV+ZMVAJibCW9lymjlqItFdltsf+4UTd8p0jwOG3 +g5RNxZdc2XdLLVJqWwZ17g3vRdi34aDv9qwBBSM58xZfU67Jp2o6lvw5a9r0dRHa +DyMCAwEAAaOCAfcwggHzMB8GA1UdIwQYMBaAFFFo/5CvAgd1PMzZZWRiohK4WXI7 +MB0GA1UdDgQWBBT+aRfx6XVbS++8XGAW9XMxHudTJzApBgNVHREEIjAggg8qLmli +bS54dGlmeS5jb22CDWlibS54dGlmeS5jb20wDgYDVR0PAQH/BAQDAgWgMB0GA1Ud +JQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjB1BgNVHR8EbjBsMDSgMqAwhi5odHRw +Oi8vY3JsMy5kaWdpY2VydC5jb20vc2hhMi1oYS1zZXJ2ZXItZzEuY3JsMDSgMqAw +hi5odHRwOi8vY3JsNC5kaWdpY2VydC5jb20vc2hhMi1oYS1zZXJ2ZXItZzEuY3Js +MEwGA1UdIARFMEMwNwYJYIZIAYb9bAEBMCowKAYIKwYBBQUHAgEWHGh0dHBzOi8v +d3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQICMIGDBggrBgEFBQcBAQR3MHUw +JAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBNBggrBgEFBQcw +AoZBaHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0U0hBMkhpZ2hB +c3N1cmFuY2VTZXJ2ZXJDQS5jcnQwDAYDVR0TAQH/BAIwAA== +-----END CERTIFICATE----- From 5b1408f76e894a8756f873fb1186994e103acd7a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 7 Apr 2023 01:53:13 +0200 Subject: [PATCH 083/187] Preparing the design overhaul of the policies part. --- pkg/common/structure.go | 5 + pkg/mapserver/common/structure.go | 25 +- pkg/mapserver/common/structure_test.go | 218 ++++++++++++++++-- pkg/mapserver/common/tools.go | 184 ++++++++------- pkg/mapserver/common/tools_test.go | 160 ------------- pkg/mapserver/responder/old_responder_test.go | 2 +- pkg/mapserver/responder/responder_test.go | 29 +-- pkg/mapserver/updater/certs_updater.go | 114 +-------- pkg/mapserver/updater/certs_updater_test.go | 121 ++-------- pkg/mapserver/updater/dbutil_test.go | 71 +++--- pkg/mapserver/updater/hash.go | 2 +- pkg/mapserver/updater/rpc_updater_test.go | 154 ++++++------- pkg/mapserver/updater/tools.go | 54 ++--- pkg/mapserver/updater/tools_test.go | 138 +++++------ pkg/mapserver/updater/updater.go | 3 +- pkg/mapserver/updater/updater_test.go | 68 +++--- pkg/mapserver/util/proof.go | 2 +- .../mapserver_benchmark/updater_test.go | 10 +- .../wholesys_benchmark_PoP_diffSize/main.go | 2 +- tests/integration/grpc_test/main.go | 2 +- tests/integration/old_mapserver/main.go | 2 +- 21 files changed, 577 insertions(+), 789 deletions(-) diff --git a/pkg/common/structure.go b/pkg/common/structure.go index 1081d5e5..00a99144 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -34,6 +34,11 @@ type RPC struct { SPTs []SPT `json:",omitempty"` } +// PCRevocation is for now empty. +type PCRevocation struct { + // TODO(juagargi) define the revocation. +} + // signed policy timestamp type SPT struct { Version int `json:",omitempty"` diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index d0650da1..e7cb5455 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -13,25 +13,16 @@ import ( // The domain is identified by the SHA256 of the DomainName in the DB. type DomainEntry struct { DomainName string - CAEntry []CAEntry -} + DomainID []byte -// CAEntry: All certificate, RPC, PC and revocation issued by one specific CA. -// TODO(yongzhe): add PC -type CAEntry struct { - CAName string - CAHash []byte - CurrentRPC common.RPC // TODO(juagargi) we will have a list of RPCs - FutureRPC common.RPC // - CurrentPC common.SP // TODO(juagargi) we will have a list of PCs - Revocation [][]byte // TODO(juagargi) these are policy revocations - FutureRevocation [][]byte // TODO(juagargi) these are policy revocations - DomainCerts [][]byte - DomainCertChains [][][]byte + RPCs []common.RPC + PCs []common.SP + Revocations []common.PCRevocation + DomainCerts []byte // Includes leafs and trust chain certificates, raw x509 DER.1. } -// SerializedDomainEntry: DomainEntry -> bytes. Use json -func SerializedDomainEntry(domainEntry *DomainEntry) ([]byte, error) { +// SerializeDomainEntry uses json to serialize. +func SerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { result, err := json.Marshal(domainEntry) if err != nil { return nil, fmt.Errorf("SerializedDomainEntry | Marshal | %w", err) @@ -39,7 +30,7 @@ func SerializedDomainEntry(domainEntry *DomainEntry) ([]byte, error) { return result, nil } -// DeserializeDomainEntry: bytes -> DomainEntry. Use json +// DeserializeDomainEntry converts json into a DomainEntry. func DeserializeDomainEntry(input []byte) (*DomainEntry, error) { result := &DomainEntry{} diff --git a/pkg/mapserver/common/structure_test.go b/pkg/mapserver/common/structure_test.go index 36fc21e1..4d2a6481 100644 --- a/pkg/mapserver/common/structure_test.go +++ b/pkg/mapserver/common/structure_test.go @@ -3,49 +3,217 @@ package common import ( "crypto/rand" "fmt" - "reflect" "testing" - "time" + "github.com/google/go-cmp/cmp" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/stretchr/testify/assert" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) -//TestSerializingDomainEntry: Serializing and deserializing of DomainEntry -func TestSerializingDomainEntry(t *testing.T) { +// TestSerializingDomainEntry: Serializing and deserializing of DomainEntry +func TestSerializeDomainEntry(t *testing.T) { cert, err := common.X509CertFromFile("./testdata/cert.pem") require.NoError(t, err, "X509CertFromFile error") - caEntry := CAEntry{ - CAName: "testCA", - CurrentRPC: common.RPC{ - PublicKey: []byte{1, 4, 7, 3, 2}, - PublicKeyAlgorithm: common.RSA, - Version: 1, - }, - Revocation: [][]byte{generateRandomBytes()}, - DomainCerts: [][]byte{cert.Raw}, - } - testDomainEntry := &DomainEntry{ DomainName: "test.com", - CAEntry: []CAEntry{caEntry, caEntry, caEntry, caEntry, caEntry, caEntry}, + DomainID: common.SHA256Hash([]byte("test.com")), + RPCs: []common.RPC{ + { + PublicKey: []byte{1, 4, 7, 3, 2}, + PublicKeyAlgorithm: common.RSA, + Version: 1, + }, + { + PublicKey: []byte{2, 4, 7, 3, 2}, + PublicKeyAlgorithm: common.RSA, + Version: 2, + }, + }, + PCs: []common.SP{ + { + Policies: common.Policy{ + TrustedCA: []string{"ca1", "ca2"}, + AllowedSubdomains: []string{"flowers.com"}, + }, + // TimeStamp: time.Now(), + TimeStamp: util.TimeFromSecs(42), + SerialNumber: 1, + SPTs: []common.SPT{ + { + Version: 1, + Subject: "spt subject", + STH: []byte{0, 1, 2, 3}, + STHSerialNumber: 12345, + PoI: [][]byte{{0, 1, 2, 3}, {4, 5, 6, 7}}, + }, + }, + }, + }, + DomainCerts: cert.Raw, } - start := time.Now() - domainBytes, err := SerializedDomainEntry(testDomainEntry) + domainBytes, err := SerializeDomainEntry(testDomainEntry) require.NoError(t, err, "SerializedDomainEntry error") - end := time.Now() - fmt.Println(end.Sub(start)) - start = time.Now() + fmt.Println(string(domainBytes)) testDomainEntryDeserialized, err := DeserializeDomainEntry(domainBytes) require.NoError(t, err, "DeserializeDomainEntry error") - end = time.Now() - fmt.Println(end.Sub(start)) - assert.Equal(t, reflect.DeepEqual(testDomainEntry, testDomainEntryDeserialized), true, "structure not equal") + require.True(t, cmp.Equal(testDomainEntry, testDomainEntryDeserialized)) + // assert.EqualValues(t, testDomainEntry, testDomainEntryDeserialized) + // assert.Equal(t, reflect.DeepEqual(testDomainEntry, testDomainEntryDeserialized), true, "structure not equal") +} + +// TestAddCert: test AddCert() +// update with new cert -> AddCert() should return true +// update with old cert -> AddCert() should return false +// then check if all the certs are correctly added +func TestAddCert(t *testing.T) { + // cert1, err := common.CTX509CertFromFile("./testdata/cert1.cer") + // require.NoError(t, err) + + // cert2, err := common.CTX509CertFromFile("./testdata/cert2.cer") + // require.NoError(t, err) + + // emptyChain := []*ctx509.Certificate{} + + // domainEntry := &DomainEntry{} + + // isUpdated := domainEntry.AddCert(cert1, emptyChain) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddCert(cert1, emptyChain) + // assert.False(t, isUpdated) + + // isUpdated = domainEntry.AddCert(cert2, emptyChain) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddCert(cert2, emptyChain) + // assert.False(t, isUpdated) + + // assert.Equal(t, 2, len(domainEntry.Entries)) + + // isFound := false + // issuerRepresentation := cert1.Issuer.String() + // for _, caEntry := range domainEntry.Entries { + // if caEntry.CAName == issuerRepresentation { + // assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert1.Raw)) + // isFound = true + // } + // } + // assert.True(t, isFound) + + // isFound = false + // issuerRepresentation = cert2.Issuer.String() + // for _, caEntry := range domainEntry.Entries { + // if caEntry.CAName == issuerRepresentation { + // assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert2.Raw)) + // isFound = true + // } + // } + // assert.True(t, isFound) +} + +// TestAddPC: test AddPC +// update with new PC -> AddPC() should return true +// update with old PC -> AddPC() should return false +// then check if all the PC are correctly added +func TestAddPC(t *testing.T) { + // pc1 := common.SP{ + // CAName: "ca1", + // Subject: "before", + // } + + // pc2 := common.SP{ + // CAName: "ca1", + // Subject: "after", + // } + + // pc3 := common.SP{ + // CAName: "ca2", + // Subject: "after", + // } + + // domainEntry := &DomainEntry{} + + // isUpdated := domainEntry.AddPC(&pc1) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddPC(&pc3) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddPC(&pc1) + // assert.False(t, isUpdated) + + // isUpdated = domainEntry.AddPC(&pc3) + // assert.False(t, isUpdated) + + // for _, caList := range domainEntry.Entries { + // if caList.CAName == "ca1" { + // assert.True(t, caList.PCs.Subject == "before") + // } + // } + + // isUpdated = domainEntry.AddPC(&pc2) + // assert.True(t, isUpdated) + + // for _, caList := range domainEntry.Entries { + // if caList.CAName == "ca1" { + // assert.True(t, caList.PCs.Subject == "after") + // } + // } +} + +// TestAddRPC: test AddRPC +// update with new RPC -> AddRPC() should return true +// update with old RPC -> AddRPC() should return false +// then check if all the RPC are correctly added +func TestAddRPC(t *testing.T) { + // rpc1 := common.RPC{ + // CAName: "ca1", + // Subject: "before", + // } + + // rpc2 := common.RPC{ + // CAName: "ca1", + // Subject: "after", + // } + + // rpc3 := common.RPC{ + // CAName: "ca2", + // Subject: "after", + // } + + // domainEntry := &DomainEntry{} + + // isUpdated := domainEntry.AddRPC(&rpc1) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddRPC(&rpc3) + // assert.True(t, isUpdated) + + // isUpdated = domainEntry.AddRPC(&rpc1) + // assert.False(t, isUpdated) + + // isUpdated = domainEntry.AddRPC(&rpc3) + // assert.False(t, isUpdated) + + // for _, caList := range domainEntry.Entries { + // if caList.CAName == "ca1" { + // assert.True(t, caList.RPCs.Subject == "before") + // } + // } + + // isUpdated = domainEntry.AddRPC(&rpc2) + // assert.True(t, isUpdated) + + // for _, caList := range domainEntry.Entries { + // if caList.CAName == "ca1" { + // assert.True(t, caList.RPCs.Subject == "after") + // } + // } } func generateRandomBytes() []byte { diff --git a/pkg/mapserver/common/tools.go b/pkg/mapserver/common/tools.go index b1a66754..f9d51f84 100644 --- a/pkg/mapserver/common/tools.go +++ b/pkg/mapserver/common/tools.go @@ -1,8 +1,6 @@ package common import ( - "bytes" - "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" ) @@ -36,107 +34,107 @@ func getRootCertificateSubject(cert *x509.Certificate, certChain []*x509.Certifi // AddCert: add a x509 cert to one domain entry. Return whether the domain entry is updated. func (domainEntry *DomainEntry) AddCert(cert *x509.Certificate, certChain []*x509.Certificate) bool { - caName := getRootCertificateSubject(cert, certChain) - - isFound := false - - // convert the certificate chain into an array of raw bytes and append them to the same CA Entry in the same order - var rawCertChain [][]byte - for _, certChainItem := range certChain { - rawCertChain = append(rawCertChain, certChainItem.Raw) - } - - // iterate CAEntry list, find if the target CA list exists - for i := range domainEntry.CAEntry { - if domainEntry.CAEntry[i].CAName == caName { - isFound = true - // check whether this certificate is already registered - for _, certRaw := range domainEntry.CAEntry[i].DomainCerts { - if bytes.Equal(certRaw, cert.Raw) { - // cert already exists - return false - } - } - // if not, append the raw of the certificate - domainEntry.CAEntry[i].DomainCerts = append(domainEntry.CAEntry[i].DomainCerts, cert.Raw) - domainEntry.CAEntry[i].DomainCertChains = append(domainEntry.CAEntry[i].DomainCertChains, rawCertChain) - return true - } - } - - // if CA list is not found - if !isFound { - // add a new CA list - domainEntry.CAEntry = append(domainEntry.CAEntry, CAEntry{ - DomainCerts: [][]byte{cert.Raw}, - DomainCertChains: [][][]byte{rawCertChain}, - CAName: caName, - CAHash: common.SHA256Hash([]byte(caName))}) - return true - } + // caName := getRootCertificateSubject(cert, certChain) + + // isFound := false + + // // convert the certificate chain into an array of raw bytes and append them to the same CA Entry in the same order + // var rawCertChain [][]byte + // for _, certChainItem := range certChain { + // rawCertChain = append(rawCertChain, certChainItem.Raw) + // } + + // // iterate CAEntry list, find if the target CA list exists + // for i := range domainEntry.Entries { + // if domainEntry.Entries[i].CAName == caName { + // isFound = true + // // check whether this certificate is already registered + // for _, certRaw := range domainEntry.Entries[i].DomainCerts { + // if bytes.Equal(certRaw, cert.Raw) { + // // cert already exists + // return false + // } + // } + // // if not, append the raw of the certificate + // domainEntry.Entries[i].DomainCerts = append(domainEntry.Entries[i].DomainCerts, cert.Raw) + // domainEntry.Entries[i].DomainCertChains = append(domainEntry.Entries[i].DomainCertChains, rawCertChain) + // return true + // } + // } + + // // if CA list is not found + // if !isFound { + // // add a new CA list + // domainEntry.Entries = append(domainEntry.Entries, Entry{ + // DomainCerts: [][]byte{cert.Raw}, + // DomainCertChains: [][][]byte{rawCertChain}, + // CAName: caName, + // CAHash: common.SHA256Hash([]byte(caName))}) + // return true + // } return false } // AddPC: add a Policy Certificate to a domain entry. Return whether the domain entry is updated. func (domainEntry *DomainEntry) AddPC(pc *common.SP) bool { - caName := pc.CAName - isFound := false - - // iterate CAEntry list, find if the target CA list exists - for i := range domainEntry.CAEntry { - if domainEntry.CAEntry[i].CAName == caName { - isFound = true - // check whether this certificate is already registered - if !domainEntry.CAEntry[i].CurrentPC.Equal(*pc) { - domainEntry.CAEntry[i].CurrentPC = *pc - return true - } - return false - } - } - - // if CA list is not found - if !isFound { - // add a new CA list - domainEntry.CAEntry = append(domainEntry.CAEntry, CAEntry{ - CAName: caName, - CAHash: common.SHA256Hash([]byte(caName)), - CurrentPC: *pc, - }) - return true - } + // caName := pc.CAName + // isFound := false + + // // iterate CAEntry list, find if the target CA list exists + // for i := range domainEntry.Entries { + // if domainEntry.Entries[i].CAName == caName { + // isFound = true + // // check whether this certificate is already registered + // if !domainEntry.Entries[i].PCs.Equal(*pc) { + // domainEntry.Entries[i].PCs = *pc + // return true + // } + // return false + // } + // } + + // // if CA list is not found + // if !isFound { + // // add a new CA list + // domainEntry.Entries = append(domainEntry.Entries, Entry{ + // CAName: caName, + // CAHash: common.SHA256Hash([]byte(caName)), + // PCs: *pc, + // }) + // return true + // } return false } // AddRPC: add a Root Policy Certificate to a domain entry. Return whether the domain entry is updated. func (domainEntry *DomainEntry) AddRPC(rpc *common.RPC) bool { - caName := rpc.CAName - isFound := false - - // iterate CAEntry list, find if the target CA list exists - for i := range domainEntry.CAEntry { - if domainEntry.CAEntry[i].CAName == caName { - isFound = true - // check whether this certificate is already registered - if !domainEntry.CAEntry[i].CurrentRPC.Equal(rpc) { - - domainEntry.CAEntry[i].CurrentRPC = *rpc - return true - } - return false - } - } - - // if CA list is not found - if !isFound { - // add a new CA list - domainEntry.CAEntry = append(domainEntry.CAEntry, CAEntry{ - CAName: caName, - CAHash: common.SHA256Hash([]byte(caName)), - CurrentRPC: *rpc, - }) - return true - } + // caName := rpc.CAName + // isFound := false + + // // iterate CAEntry list, find if the target CA list exists + // for i := range domainEntry.Entries { + // if domainEntry.Entries[i].CAName == caName { + // isFound = true + // // check whether this certificate is already registered + // if !domainEntry.Entries[i].RPCs.Equal(rpc) { + + // domainEntry.Entries[i].RPCs = *rpc + // return true + // } + // return false + // } + // } + + // // if CA list is not found + // if !isFound { + // // add a new CA list + // domainEntry.Entries = append(domainEntry.Entries, Entry{ + // CAName: caName, + // CAHash: common.SHA256Hash([]byte(caName)), + // RPCs: *rpc, + // }) + // return true + // } return false } diff --git a/pkg/mapserver/common/tools_test.go b/pkg/mapserver/common/tools_test.go index 187f103e..805d0c79 100644 --- a/pkg/mapserver/common/tools_test.go +++ b/pkg/mapserver/common/tools_test.go @@ -1,161 +1 @@ package common - -import ( - "bytes" - "testing" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestAddCert: test AddCert() -// update with new cert -> AddCert() should return true -// update with old cert -> AddCert() should return false -// then check if all the certs are correctly added -func TestAddCert(t *testing.T) { - cert1, err := common.CTX509CertFromFile("./testdata/cert1.cer") - require.NoError(t, err) - - cert2, err := common.CTX509CertFromFile("./testdata/cert2.cer") - require.NoError(t, err) - - emptyChain := []*ctx509.Certificate{} - - domainEntry := &DomainEntry{} - - isUpdated := domainEntry.AddCert(cert1, emptyChain) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddCert(cert1, emptyChain) - assert.False(t, isUpdated) - - isUpdated = domainEntry.AddCert(cert2, emptyChain) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddCert(cert2, emptyChain) - assert.False(t, isUpdated) - - assert.Equal(t, 2, len(domainEntry.CAEntry)) - - isFound := false - issuerRepresentation := cert1.Issuer.String() - for _, caEntry := range domainEntry.CAEntry { - if caEntry.CAName == issuerRepresentation { - assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert1.Raw)) - isFound = true - } - } - assert.True(t, isFound) - - isFound = false - issuerRepresentation = cert2.Issuer.String() - for _, caEntry := range domainEntry.CAEntry { - if caEntry.CAName == issuerRepresentation { - assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert2.Raw)) - isFound = true - } - } - assert.True(t, isFound) -} - -// TestAddPC: test AddPC -// update with new PC -> AddPC() should return true -// update with old PC -> AddPC() should return false -// then check if all the PC are correctly added -func TestAddPC(t *testing.T) { - pc1 := common.SP{ - CAName: "ca1", - Subject: "before", - } - - pc2 := common.SP{ - CAName: "ca1", - Subject: "after", - } - - pc3 := common.SP{ - CAName: "ca2", - Subject: "after", - } - - domainEntry := &DomainEntry{} - - isUpdated := domainEntry.AddPC(&pc1) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddPC(&pc3) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddPC(&pc1) - assert.False(t, isUpdated) - - isUpdated = domainEntry.AddPC(&pc3) - assert.False(t, isUpdated) - - for _, caList := range domainEntry.CAEntry { - if caList.CAName == "ca1" { - assert.True(t, caList.CurrentPC.Subject == "before") - } - } - - isUpdated = domainEntry.AddPC(&pc2) - assert.True(t, isUpdated) - - for _, caList := range domainEntry.CAEntry { - if caList.CAName == "ca1" { - assert.True(t, caList.CurrentPC.Subject == "after") - } - } -} - -// TestAddRPC: test AddRPC -// update with new RPC -> AddRPC() should return true -// update with old RPC -> AddRPC() should return false -// then check if all the RPC are correctly added -func TestAddRPC(t *testing.T) { - rpc1 := common.RPC{ - CAName: "ca1", - Subject: "before", - } - - rpc2 := common.RPC{ - CAName: "ca1", - Subject: "after", - } - - rpc3 := common.RPC{ - CAName: "ca2", - Subject: "after", - } - - domainEntry := &DomainEntry{} - - isUpdated := domainEntry.AddRPC(&rpc1) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddRPC(&rpc3) - assert.True(t, isUpdated) - - isUpdated = domainEntry.AddRPC(&rpc1) - assert.False(t, isUpdated) - - isUpdated = domainEntry.AddRPC(&rpc3) - assert.False(t, isUpdated) - - for _, caList := range domainEntry.CAEntry { - if caList.CAName == "ca1" { - assert.True(t, caList.CurrentRPC.Subject == "before") - } - } - - isUpdated = domainEntry.AddRPC(&rpc2) - assert.True(t, isUpdated) - - for _, caList := range domainEntry.CAEntry { - if caList.CAName == "ca1" { - assert.True(t, caList.CurrentRPC.Subject == "after") - } - } -} diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 8c083c25..5da9468a 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -202,7 +202,7 @@ func checkProofOld(t *testing.T, cert ctx509.Certificate, proofs []mapcommon.Map domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) require.NoError(t, err) // get the correct CA entry - for _, caEntry := range domainEntry.CAEntry { + for _, caEntry := range domainEntry.Entries { if caEntry.CAName == caName { // check if the cert is in the CA entry for _, certRaw := range caEntry.DomainCerts { diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 3179f441..7d9742ae 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -122,20 +122,21 @@ func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapS if proofType == mapcommon.PoA { require.Empty(t, proof.DomainEntryBytes) } - // if proofType == mapcommon.PoP { - // domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) - // require.NoError(t, err) - // // get the correct CA entry - // for _, caEntry := range domainEntry.CAEntry { - // if caEntry.CAName == caName { - // // check if the cert is in the CA entry - // for _, certRaw := range caEntry.DomainCerts { - // require.Equal(t, certRaw, cert.Raw) - // return - // } - // } - // } - // } + if proofType == mapcommon.PoP { + domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) + require.NoError(t, err) + domainEntry. + // // get the correct CA entry + // for _, caEntry := range domainEntry.CAEntry { + // if caEntry.CAName == caName { + // // check if the cert is in the CA entry + // for _, certRaw := range caEntry.DomainCerts { + // require.Equal(t, certRaw, cert.Raw) + // return + // } + // } + // } + } } // require.Fail(t, "cert/CA not found") } diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 8b09911f..2ea14e94 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -9,6 +9,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" mcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/util" ) // TODO(yongzhe): make the list if size is already known. @@ -102,7 +103,7 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct certChain := certChains[i] // get unique list of domain names - domains := ExtractCertDomains(cert) + domains := util.ExtractCertDomains(cert) if len(domains) == 0 { continue } @@ -131,115 +132,6 @@ func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ct return affectedDomainsMap, domainCertMap, domainCertChainMap } -// UnfoldCerts takes a slice of certificates and chains with the same length, -// and returns all certificates once, without duplicates, and the ID of the parent in the -// trust chain, or nil if the certificate is root. -// The parents returned slice has the same elements as the certificates returned slice. -// When a certificate is root, it's corresponding parents entry is nil. -// Additionally, all the names of the leaf certificates are returned in its corresponding position -// in the names slice iff the certificate is a leaf one. If it is not, nil is returned in that -// position instead. -// -// The leaf certificates are always returned at the head of the slice, which means, among others, -// that once a nil value is found in the names slice, the rest of the slice will be nil as well. -func UnfoldCerts(leafCerts []*ctx509.Certificate, chains [][]*ctx509.Certificate, -) ( - certificates []*ctx509.Certificate, - certIDs []*common.SHA256Output, - parentIDs []*common.SHA256Output, - names [][]string, -) { - - // extractNames is the function that extracts the names from a certificate. It starts being - // a regular names extraction, but after processing all leaves it is assigned to a function - // that always returns nil. - extractNames := func(c *ctx509.Certificate) []string { - return ExtractCertDomains(c) - } - // ChangeFcn changes extractNames to always return nil. - changeFcn := func() { - extractNames = func(c *ctx509.Certificate) []string { - return nil - } - } - - for len(leafCerts) > 0 { - var pendingCerts []*ctx509.Certificate - var pendingChains [][]*ctx509.Certificate - for i, c := range leafCerts { - certificates = append(certificates, c) - ID := common.SHA256Hash32Bytes(c.Raw) - certIDs = append(certIDs, &ID) - var parentID *common.SHA256Output - if len(chains[i]) > 0 { - // The certificate has a trust chain (it is not root): add the first certificate - // from the chain as the parent. - parent := chains[i][0] - ID := common.SHA256Hash32Bytes(parent.Raw) - parentID = &ID - // Add this parent to the back of the certs, plus the corresponding chain entry, - // so that it's processed as a certificate. - pendingCerts = append(pendingCerts, parent) - pendingChains = append(pendingChains, chains[i][1:]) - } - parentIDs = append(parentIDs, parentID) - names = append(names, extractNames(c)) - } - changeFcn() // This will change the function `extractNames` to always return nil. - leafCerts = pendingCerts - chains = pendingChains - } - return -} - -// UnfoldCert takes a certificate with its trust chain and returns a ready-to-insert-in-DB -// collection of IDs and payloads for the certificate and its ancestry. -// Additionally, if the payload of any of the ancestors of the certificate is nil, this function -// interprets it as the ancestor is already present in the DB, and thus will omit returning it -// and any posterior ancestors. -func UnfoldCert(leafCert *ctx509.Certificate, certID *common.SHA256Output, - chain []*ctx509.Certificate, chainIDs []*common.SHA256Output, -) ( - certs []*ctx509.Certificate, - certIDs []*common.SHA256Output, - parentIDs []*common.SHA256Output, - names [][]string, -) { - - certs = make([]*ctx509.Certificate, 0, len(chainIDs)+1) - certIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) - parentIDs = make([]*common.SHA256Output, 0, len(chainIDs)+1) - names = make([][]string, 0, len(chainIDs)+1) - - // Always add the leaf certificate. - certs = append(certs, leafCert) - certIDs = append(certIDs, certID) - parentIDs = append(parentIDs, chainIDs[0]) - names = append(names, ExtractCertDomains(leafCert)) - // Add the intermediate certs iff their payload is not nil. - i := 0 - for ; i < len(chain)-1; i++ { - if chain[i] == nil { - // This parent has been inserted already in DB. This implies that its own parent, - // the grandparent of the leaf, must have been inserted as well; and so on. - // There are no more parents to insert. - return - } - certs = append(certs, chain[i]) - certIDs = append(certIDs, chainIDs[i]) - parentIDs = append(parentIDs, chainIDs[i+1]) - names = append(names, nil) - } - // Add the root certificate (no parent) iff we haven't inserted it yet. - if chain[i] != nil { - certs = append(certs, chain[i]) - certIDs = append(certIDs, chainIDs[i]) - parentIDs = append(parentIDs, nil) - names = append(names, nil) - } - return -} - // update domain entries func UpdateDomainEntries( domainEntries map[common.SHA256Output]*mcommon.DomainEntry, @@ -309,7 +201,7 @@ func SerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.Doma result := make([]*db.KeyValuePair, 0, len(domains)) for domainNameHash, domainEntry := range domains { - domainBytes, err := mcommon.SerializedDomainEntry(domainEntry) + domainBytes, err := mcommon.SerializeDomainEntry(domainEntry) if err != nil { return nil, fmt.Errorf("serializeUpdatedDomainEntries | SerializedDomainEntry | %w", err) } diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 162483df..26e45bc6 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -2,18 +2,17 @@ package updater import ( "bytes" - "fmt" "io/ioutil" "testing" ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/google/certificate-transparency-go/x509/pkix" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/domain" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/util" ) // TestUpdateDomainEntriesUsingCerts: test UpdateDomainEntriesUsingCerts @@ -38,7 +37,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // test if all the certs are correctly added to the affectedDomainsMap and domainCertMap for _, cert := range certs { // get common name and SAN of the certificate - domainNames := ExtractCertDomains(cert) + domainNames := util.ExtractCertDomains(cert) // get the valid domain name from domainNames list affectedDomains := domain.ExtractAffectedDomains(domainNames) @@ -83,8 +82,8 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // check if domainEntriesMap is correctly updated for _, cert := range certs { - domainNames := ExtractCertDomains(cert) - caName := cert.Issuer.String() + domainNames := util.ExtractCertDomains(cert) + // caName := cert.Issuer.String() // check if this cert has valid name affectedDomains := domain.ExtractAffectedDomains(domainNames) @@ -102,21 +101,21 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // check domain name is correct assert.True(t, domainEntry.DomainName == domainName) - for _, caList := range domainEntry.CAEntry { - if caList.CAName == caName { - isFound := false - for _, newCert := range caList.DomainCerts { - if bytes.Equal(newCert, cert.Raw) { - isFound = true - } - } - assert.True(t, isFound, "cert not found") - } else { - for _, newCert := range caList.DomainCerts { - assert.False(t, bytes.Equal(newCert, cert.Raw), "cert should not be here") - } - } - } + // for _, caList := range domainEntry.Entries { + // if caList.CAName == caName { + // isFound := false + // for _, newCert := range caList.DomainCerts { + // if bytes.Equal(newCert, cert.Raw) { + // isFound = true + // } + // } + // assert.True(t, isFound, "cert not found") + // } else { + // for _, newCert := range caList.DomainCerts { + // assert.False(t, bytes.Equal(newCert, cert.Raw), "cert should not be here") + // } + // } + // } } } @@ -160,85 +159,3 @@ func TestUpdateSameCertTwice(t *testing.T) { // Now the length of updatedDomains should be zero. assert.Equal(t, 0, len(updatedDomains), "updated domain should be 0") } - -func TestUnfoldCerts(t *testing.T) { - // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. - a := &ctx509.Certificate{ - Raw: []byte{0}, - Subject: pkix.Name{ - CommonName: "a", - }, - DNSNames: []string{"a", "a", "a.com"}, - } - b := &ctx509.Certificate{ - Raw: []byte{1}, - Subject: pkix.Name{ - CommonName: "b", - }, - DNSNames: []string{"b", "b", "b.com"}, - } - c := &ctx509.Certificate{ - Raw: []byte{1}, - Subject: pkix.Name{ - CommonName: "c", - }, - DNSNames: []string{"c", "c", "c.com"}, - } - d := &ctx509.Certificate{ - Raw: []byte{3}, - Subject: pkix.Name{ - CommonName: "d", - }, - DNSNames: []string{"d", "d", "d.com"}, - } - - certs := []*ctx509.Certificate{ - a, - b, - } - chains := [][]*ctx509.Certificate{ - nil, - {c, d}, - } - allCerts, IDs, parentIDs, names := UnfoldCerts(certs, chains) - - fmt.Printf("[%p %p %p %p]\n", a, b, c, d) - fmt.Printf("%v\n", allCerts) - fmt.Printf("%v\n", IDs) - fmt.Printf("%v\n", parentIDs) - - assert.Len(t, allCerts, 4) - assert.Len(t, IDs, 4) - assert.Len(t, parentIDs, 4) - - // Check payloads. - assert.Equal(t, a, allCerts[0]) - assert.Equal(t, b, allCerts[1]) - assert.Equal(t, c, allCerts[2]) - assert.Equal(t, d, allCerts[3]) - - // Check IDs. - aID := common.SHA256Hash32Bytes(a.Raw) - bID := common.SHA256Hash32Bytes(b.Raw) - cID := common.SHA256Hash32Bytes(c.Raw) - dID := common.SHA256Hash32Bytes(d.Raw) - - assert.Equal(t, aID, *IDs[0]) - assert.Equal(t, bID, *IDs[1]) - assert.Equal(t, cID, *IDs[2]) - assert.Equal(t, dID, *IDs[3]) - - // Check parent IDs. - nilID := (*common.SHA256Output)(nil) - assert.Equal(t, nilID, parentIDs[0], "bad parent at 0") - assert.Equal(t, cID, *parentIDs[1], "bad parent at 1") - assert.Equal(t, dID, *parentIDs[2], "bad parent at 2") - assert.Equal(t, nilID, parentIDs[3], "bad parent at 3") - - // Check domain names. - nilNames := ([]string)(nil) - assert.ElementsMatch(t, []string{"a", "a.com"}, names[0]) // root but also a leaf - assert.ElementsMatch(t, []string{"b", "b.com"}, names[1]) // just a leaf - assert.Equal(t, nilNames, names[2]) // not a leaf - assert.Equal(t, nilNames, names[3]) // not a leaf -} diff --git a/pkg/mapserver/updater/dbutil_test.go b/pkg/mapserver/updater/dbutil_test.go index ed1326e3..dac21e4e 100644 --- a/pkg/mapserver/updater/dbutil_test.go +++ b/pkg/mapserver/updater/dbutil_test.go @@ -2,46 +2,41 @@ package updater import ( "testing" - - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // TestParseDomainBytes: test ParseDomainBytes() func TestParseDomainBytes(t *testing.T) { - domainEntry := &common.DomainEntry{ - DomainName: "test domain", - CAEntry: []common.CAEntry{ - { - CAName: "ca1", - DomainCerts: [][]byte{{1, 2, 3}}, - }, - { - CAName: "ca2", - DomainCerts: [][]byte{{2, 3, 4}}, - }, - }, - } - - serializedBytes, err := common.SerializedDomainEntry(domainEntry) - require.NoError(t, err) - - keyValuePairs := []*db.KeyValuePair{ - { - Key: [32]byte{1}, - Value: serializedBytes, - }, - } - - result, err := parseDomainBytes(keyValuePairs) - require.NoError(t, err) - - domainEntry_, ok := result[[32]byte{1}] - assert.True(t, ok) - - assert.Equal(t, domainEntry.DomainName, domainEntry_.DomainName) - assert.Equal(t, domainEntry.CAEntry[0].CAName, domainEntry_.CAEntry[0].CAName) - assert.Equal(t, domainEntry.CAEntry[1].CAName, domainEntry_.CAEntry[1].CAName) + // domainEntry := &common.DomainEntry{ + // DomainName: "test domain", + // Entries: []common.Entry{ + // { + // CAName: "ca1", + // DomainCerts: [][]byte{{1, 2, 3}}, + // }, + // { + // CAName: "ca2", + // DomainCerts: [][]byte{{2, 3, 4}}, + // }, + // }, + // } + + // serializedBytes, err := common.SerializeDomainEntry(domainEntry) + // require.NoError(t, err) + + // keyValuePairs := []*db.KeyValuePair{ + // { + // Key: [32]byte{1}, + // Value: serializedBytes, + // }, + // } + + // result, err := parseDomainBytes(keyValuePairs) + // require.NoError(t, err) + + // domainEntry_, ok := result[[32]byte{1}] + // assert.True(t, ok) + + // assert.Equal(t, domainEntry.DomainName, domainEntry_.DomainName) + // assert.Equal(t, domainEntry.Entries[0].CAName, domainEntry_.Entries[0].CAName) + // assert.Equal(t, domainEntry.Entries[1].CAName, domainEntry_.Entries[1].CAName) } diff --git a/pkg/mapserver/updater/hash.go b/pkg/mapserver/updater/hash.go index 0cc89b3b..e5362f81 100644 --- a/pkg/mapserver/updater/hash.go +++ b/pkg/mapserver/updater/hash.go @@ -21,7 +21,7 @@ type UpdateInput struct { func HashDomainEntriesThenSort(domainEntries []mapCommon.DomainEntry) ([]UpdateInput, error) { result := make([]UpdateInput, 0, len(domainEntries)) for _, v := range domainEntries { - domainEntryBytes, err := mapCommon.SerializedDomainEntry(&v) + domainEntryBytes, err := mapCommon.SerializeDomainEntry(&v) if err != nil { return nil, fmt.Errorf("HashDomainEntriesThenSort | SerializedDomainEntry | %w", err) } diff --git a/pkg/mapserver/updater/rpc_updater_test.go b/pkg/mapserver/updater/rpc_updater_test.go index e28f730a..921e945f 100644 --- a/pkg/mapserver/updater/rpc_updater_test.go +++ b/pkg/mapserver/updater/rpc_updater_test.go @@ -63,83 +63,83 @@ func TestRPCAndPC(t *testing.T) { // TestUpdateDomainEntriesWithRPCAndPC: test updateDomainEntriesWithRPCAndPC(), getDomainEntriesToWrite() // and serializeUpdatedDomainEntries() func TestUpdateDomainEntriesWithRPCAndPC(t *testing.T) { - // get PC and RPC - pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) - require.NoError(t, err, "GetPCAndRPC error") - - // empty map(mock result from db). - domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) - - // add the affectedDomainsSet and domainCertMap - _, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpcList, pcList) - - updatedDomains, err := updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) - require.NoError(t, err) - assert.Equal(t, len(updatedDomains), len(domainEntriesMap), "size of domainEntriesMap should be the size of updatedDomains") - - // check PC - for _, pc := range pcList { - subjectName := pc.Subject - caName := pc.CAName - var subjectNameHash projectCommon.SHA256Output - copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - for domainHash, domainEntry := range domainEntriesMap { - switch { - case domainHash == subjectNameHash: - isFound := false - for _, caList := range domainEntry.CAEntry { - if caList.CAName == caName { - isFound = true - assert.True(t, caList.CurrentPC.Equal(*pc), "PC missing") - } else { - assert.False(t, caList.CurrentPC.Equal(*pc), "PC in wrong place") - } - } - assert.True(t, isFound, "new PC not included in domainEntriesMap") - case domainHash != subjectNameHash: - for _, caList := range domainEntry.CAEntry { - assert.False(t, caList.CurrentPC.Equal(*pc)) - } - } - } - } - - // check RPC - for _, rpc := range rpcList { - subjectName := rpc.Subject - caName := rpc.CAName - var subjectNameHash projectCommon.SHA256Output - copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - for domainHash, domainEntry := range domainEntriesMap { - switch { - case domainHash == subjectNameHash: - isFound := false - for _, caList := range domainEntry.CAEntry { - if caList.CAName == caName { - isFound = true - assert.True(t, caList.CurrentRPC.Equal(rpc), "RPC missing") - } else { - assert.False(t, caList.CurrentRPC.Equal(rpc), "RPC in wrong place") - } - } - assert.True(t, isFound, "new RPC not included in domainEntriesMap") - case domainHash != subjectNameHash: - for _, caList := range domainEntry.CAEntry { - assert.False(t, caList.CurrentRPC.Equal(rpc)) - } - } - } - } - - // get the domain entries only if they are updated - domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - require.NoError(t, err) - - // serialize the domainEntry -> key-value pair - _, err = SerializeUpdatedDomainEntries(domainEntriesToWrite) - require.NoError(t, err) + // // get PC and RPC + // pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) + // require.NoError(t, err, "GetPCAndRPC error") + + // // empty map(mock result from db). + // domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) + + // // add the affectedDomainsSet and domainCertMap + // _, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpcList, pcList) + + // updatedDomains, err := updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) + // require.NoError(t, err) + // assert.Equal(t, len(updatedDomains), len(domainEntriesMap), "size of domainEntriesMap should be the size of updatedDomains") + + // // check PC + // for _, pc := range pcList { + // subjectName := pc.Subject + // caName := pc.CAName + // var subjectNameHash projectCommon.SHA256Output + // copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) + + // for domainHash, domainEntry := range domainEntriesMap { + // switch { + // case domainHash == subjectNameHash: + // isFound := false + // for _, caList := range domainEntry.Entries { + // if caList.CAName == caName { + // isFound = true + // assert.True(t, caList.PCs.Equal(*pc), "PC missing") + // } else { + // assert.False(t, caList.PCs.Equal(*pc), "PC in wrong place") + // } + // } + // assert.True(t, isFound, "new PC not included in domainEntriesMap") + // case domainHash != subjectNameHash: + // for _, caList := range domainEntry.Entries { + // assert.False(t, caList.PCs.Equal(*pc)) + // } + // } + // } + // } + + // // check RPC + // for _, rpc := range rpcList { + // subjectName := rpc.Subject + // caName := rpc.CAName + // var subjectNameHash projectCommon.SHA256Output + // copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) + + // for domainHash, domainEntry := range domainEntriesMap { + // switch { + // case domainHash == subjectNameHash: + // isFound := false + // for _, caList := range domainEntry.Entries { + // if caList.CAName == caName { + // isFound = true + // assert.True(t, caList.RPCs.Equal(rpc), "RPC missing") + // } else { + // assert.False(t, caList.RPCs.Equal(rpc), "RPC in wrong place") + // } + // } + // assert.True(t, isFound, "new RPC not included in domainEntriesMap") + // case domainHash != subjectNameHash: + // for _, caList := range domainEntry.Entries { + // assert.False(t, caList.RPCs.Equal(rpc)) + // } + // } + // } + // } + + // // get the domain entries only if they are updated + // domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) + // require.NoError(t, err) + + // // serialize the domainEntry -> key-value pair + // _, err = SerializeUpdatedDomainEntries(domainEntriesToWrite) + // require.NoError(t, err) } // TestUpdateSameRPCTwice: update the same RPC twice, number of updates should be zero diff --git a/pkg/mapserver/updater/tools.go b/pkg/mapserver/updater/tools.go index 3f0444ee..52fa5df3 100644 --- a/pkg/mapserver/updater/tools.go +++ b/pkg/mapserver/updater/tools.go @@ -1,48 +1,26 @@ package updater import ( - "bytes" - "sort" - - "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/mapserver/common" ) -// ExtractCertDomains: get domain from cert: {Common Name, SANs} -func ExtractCertDomains(cert *x509.Certificate) []string { - domains := make(uniqueStringSet) - if len(cert.Subject.CommonName) != 0 { - domains[cert.Subject.CommonName] = struct{}{} - } - - for _, dnsName := range cert.DNSNames { - domains[dnsName] = struct{}{} - } - - result := []string{} - for k := range domains { - result = append(result, k) - } - return result -} - // sort domain entries func sortDomainEntry(domainEntry *common.DomainEntry) { - // sort CA entries - sort.Slice(domainEntry.CAEntry, func(j, k int) bool { - if len(domainEntry.CAEntry[j].CAHash) == len(domainEntry.CAEntry[k].CAHash) { - return bytes.Compare(domainEntry.CAEntry[j].CAHash, domainEntry.CAEntry[k].CAHash) == -1 - } - return len(domainEntry.CAEntry[j].CAHash) < len(domainEntry.CAEntry[k].CAHash) - }) + // // sort CA entries + // sort.Slice(domainEntry.Entries, func(j, k int) bool { + // if len(domainEntry.Entries[j].CAHash) == len(domainEntry.Entries[k].CAHash) { + // return bytes.Compare(domainEntry.Entries[j].CAHash, domainEntry.Entries[k].CAHash) == -1 + // } + // return len(domainEntry.Entries[j].CAHash) < len(domainEntry.Entries[k].CAHash) + // }) - // sort domain certs in one CA entry - for i := range domainEntry.CAEntry { - sort.Slice(domainEntry.CAEntry[i].DomainCerts, func(j, k int) bool { - if len(domainEntry.CAEntry[i].DomainCerts[j]) == len(domainEntry.CAEntry[i].DomainCerts[k]) { - return bytes.Compare(domainEntry.CAEntry[i].DomainCerts[j], domainEntry.CAEntry[i].DomainCerts[k]) == -1 - } - return len(domainEntry.CAEntry[i].DomainCerts[j]) < len(domainEntry.CAEntry[i].DomainCerts[k]) - }) - } + // // sort domain certs in one CA entry + // for i := range domainEntry.Entries { + // sort.Slice(domainEntry.Entries[i].DomainCerts, func(j, k int) bool { + // if len(domainEntry.Entries[i].DomainCerts[j]) == len(domainEntry.Entries[i].DomainCerts[k]) { + // return bytes.Compare(domainEntry.Entries[i].DomainCerts[j], domainEntry.Entries[i].DomainCerts[k]) == -1 + // } + // return len(domainEntry.Entries[i].DomainCerts[j]) < len(domainEntry.Entries[i].DomainCerts[k]) + // }) + // } } diff --git a/pkg/mapserver/updater/tools_test.go b/pkg/mapserver/updater/tools_test.go index 1abd49b8..d6dce281 100644 --- a/pkg/mapserver/updater/tools_test.go +++ b/pkg/mapserver/updater/tools_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/netsec-ethz/fpki/pkg/common" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,7 +16,7 @@ func TestExtractCertDomains(t *testing.T) { cert, err := common.CTX509CertFromFile("./testdata/certs/adiq.com.br144.cer") require.NoError(t, err, "projectCommon.CTX509CertFromFile") - result := ExtractCertDomains(cert) + result := util.ExtractCertDomains(cert) assert.Equal(t, 2, len(result)) assert.Contains(t, result, "*.adiq.com.br") assert.Contains(t, result, "adiq.com.br") @@ -24,73 +24,73 @@ func TestExtractCertDomains(t *testing.T) { // TestSortDomainEntry: test SortDomainEntry() func TestSortDomainEntry(t *testing.T) { - // prepare test data - cert1 := generateRandomBytes(100) - cert2 := generateRandomBytes(100) - cert3 := generateRandomBytes(103) - cert4 := generateRandomBytes(10) - - caEntry1 := mapCommon.CAEntry{ - CAName: "ca1", - CAHash: common.SHA256Hash([]byte("ca1")), - DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, - } - - caEntry1_ := mapCommon.CAEntry{ - CAName: "ca1", - CAHash: common.SHA256Hash([]byte("ca1")), - DomainCerts: [][]byte{cert2, cert4, cert3, cert1}, - } - - caEntry2 := mapCommon.CAEntry{ - CAName: "ca2", - CAHash: common.SHA256Hash([]byte("ca2")), - DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, - } - - caEntry2_ := mapCommon.CAEntry{ - CAName: "ca2", - CAHash: common.SHA256Hash([]byte("ca2")), - DomainCerts: [][]byte{cert2, cert4, cert1, cert3}, - } - - caEntry3 := mapCommon.CAEntry{ - CAName: "ca3", - CAHash: common.SHA256Hash([]byte("ca3")), - DomainCerts: [][]byte{cert1, cert3, cert2, cert4}, - } - - caEntry3_ := mapCommon.CAEntry{ - CAName: "ca3", - CAHash: common.SHA256Hash([]byte("ca3")), - DomainCerts: [][]byte{cert2, cert1, cert3, cert4}, - } - - // add the same cert and CA entries in different orders - domainEntry1 := &mapCommon.DomainEntry{ - CAEntry: []mapCommon.CAEntry{caEntry1, caEntry2, caEntry3_}, - } - - domainEntry2 := &mapCommon.DomainEntry{ - CAEntry: []mapCommon.CAEntry{caEntry1_, caEntry3, caEntry2_}, - } - - domainEntry3 := &mapCommon.DomainEntry{ - CAEntry: []mapCommon.CAEntry{caEntry3, caEntry2_, caEntry1_}, - } - - sortDomainEntry(domainEntry1) - sortDomainEntry(domainEntry2) - sortDomainEntry(domainEntry3) - - for i := 0; i < 3; i++ { - // check ca entry order is correct - assert.Equal(t, domainEntry1.CAEntry[i].CAName, domainEntry2.CAEntry[i].CAName, domainEntry3.CAEntry[i].CAName) - for j := 0; j < 4; j++ { - assert.Equal(t, domainEntry1.CAEntry[i].DomainCerts[j], domainEntry2.CAEntry[i].DomainCerts[j], - domainEntry3.CAEntry[i].DomainCerts[j]) - } - } + // // prepare test data + // cert1 := generateRandomBytes(100) + // cert2 := generateRandomBytes(100) + // cert3 := generateRandomBytes(103) + // cert4 := generateRandomBytes(10) + + // caEntry1 := mapCommon.Entry{ + // CAName: "ca1", + // CAHash: common.SHA256Hash([]byte("ca1")), + // DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, + // } + + // caEntry1_ := mapCommon.Entry{ + // CAName: "ca1", + // CAHash: common.SHA256Hash([]byte("ca1")), + // DomainCerts: [][]byte{cert2, cert4, cert3, cert1}, + // } + + // caEntry2 := mapCommon.Entry{ + // CAName: "ca2", + // CAHash: common.SHA256Hash([]byte("ca2")), + // DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, + // } + + // caEntry2_ := mapCommon.Entry{ + // CAName: "ca2", + // CAHash: common.SHA256Hash([]byte("ca2")), + // DomainCerts: [][]byte{cert2, cert4, cert1, cert3}, + // } + + // caEntry3 := mapCommon.Entry{ + // CAName: "ca3", + // CAHash: common.SHA256Hash([]byte("ca3")), + // DomainCerts: [][]byte{cert1, cert3, cert2, cert4}, + // } + + // caEntry3_ := mapCommon.Entry{ + // CAName: "ca3", + // CAHash: common.SHA256Hash([]byte("ca3")), + // DomainCerts: [][]byte{cert2, cert1, cert3, cert4}, + // } + + // // add the same cert and CA entries in different orders + // domainEntry1 := &mapCommon.DomainEntry{ + // Entries: []mapCommon.Entry{caEntry1, caEntry2, caEntry3_}, + // } + + // domainEntry2 := &mapCommon.DomainEntry{ + // Entries: []mapCommon.Entry{caEntry1_, caEntry3, caEntry2_}, + // } + + // domainEntry3 := &mapCommon.DomainEntry{ + // Entries: []mapCommon.Entry{caEntry3, caEntry2_, caEntry1_}, + // } + + // sortDomainEntry(domainEntry1) + // sortDomainEntry(domainEntry2) + // sortDomainEntry(domainEntry3) + + // for i := 0; i < 3; i++ { + // // check ca entry order is correct + // assert.Equal(t, domainEntry1.Entries[i].CAName, domainEntry2.Entries[i].CAName, domainEntry3.Entries[i].CAName) + // for j := 0; j < 4; j++ { + // assert.Equal(t, domainEntry1.Entries[i].DomainCerts[j], domainEntry2.Entries[i].DomainCerts[j], + // domainEntry3.Entries[i].DomainCerts[j]) + // } + // } } // ------------------------------------------------------------- diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 129addd1..5e163e1c 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -14,6 +14,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + "github.com/netsec-ethz/fpki/pkg/util" ) // MapUpdater: map updater. It is responsible for updating the tree, and writing to db @@ -90,7 +91,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ } certChains = append(certChains, chain) } - certs, IDs, parentIDs, names := UnfoldCerts(certs, certChains) + certs, IDs, parentIDs, names := util.UnfoldCerts(certs, certChains) return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, IDs, parentIDs) } diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index e44253bd..01da1f37 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -9,10 +9,10 @@ import ( "github.com/google/certificate-transparency-go/x509" projectCommon "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -51,23 +51,23 @@ func TestUpdateCerts(t *testing.T) { // check whether certs are correctly added to the db for _, cert := range certs { - domains := domain.ExtractAffectedDomains(ExtractCertDomains(cert)) + domains := domain.ExtractAffectedDomains(util.ExtractCertDomains(cert)) for _, domain := range domains { domainHash := projectCommon.SHA256Hash32Bytes([]byte(domain)) assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] + // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - require.NoError(t, err) + // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) + // require.NoError(t, err) - for _, caList := range domainEntry.CAEntry { - if caList.CAName != cert.Issuer.String() { - assert.NotContains(t, caList.DomainCerts, cert.Raw) - } else { - assert.Contains(t, caList.DomainCerts, cert.Raw) - } - } + // for _, caList := range domainEntry.Entries { + // if caList.CAName != cert.Issuer.String() { + // assert.NotContains(t, caList.DomainCerts, cert.Raw) + // } else { + // assert.Contains(t, caList.DomainCerts, cert.Raw) + // } + // } // test if SMT response is correct _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) @@ -100,18 +100,18 @@ func TestUpdateRPCAndPC(t *testing.T) { for _, pc := range pcList { domainHash := projectCommon.SHA256Hash32Bytes([]byte(pc.Subject)) assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] + // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - require.NoError(t, err) + // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) + // require.NoError(t, err) - for _, caList := range domainEntry.CAEntry { - if caList.CAName != pc.CAName { - assert.Equal(t, pc, caList.CurrentPC) - } else { - assert.NotEqual(t, pc, caList.CurrentPC) - } - } + // for _, caList := range domainEntry.Entries { + // if caList.CAName != pc.CAName { + // assert.Equal(t, pc, caList.PCs) + // } else { + // assert.NotEqual(t, pc, caList.PCs) + // } + // } // test if SMT response is correct _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) @@ -123,18 +123,18 @@ func TestUpdateRPCAndPC(t *testing.T) { for _, rpc := range rpcList { domainHash := projectCommon.SHA256Hash32Bytes([]byte(rpc.Subject)) assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - - domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - require.NoError(t, err) - - for _, caList := range domainEntry.CAEntry { - if caList.CAName != rpc.CAName { - assert.Equal(t, rpc, caList.CurrentRPC) - } else { - assert.NotEqual(t, rpc, caList.CurrentRPC) - } - } + // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] + + // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) + // require.NoError(t, err) + + // for _, caList := range domainEntry.Entries { + // if caList.CAName != rpc.CAName { + // assert.Equal(t, rpc, caList.RPCs) + // } else { + // assert.NotEqual(t, rpc, caList.RPCs) + // } + // } // test if SMT response is correct _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) diff --git a/pkg/mapserver/util/proof.go b/pkg/mapserver/util/proof.go index 68890827..c6924d4d 100644 --- a/pkg/mapserver/util/proof.go +++ b/pkg/mapserver/util/proof.go @@ -41,7 +41,7 @@ func CheckProof( i, name, err) } // Find the CA entry that corresponds to the CA in this certificate. - for _, ca := range domainEntry.CAEntry { + for _, ca := range domainEntry.Entries { if ca.CAName == caName { for _, raw := range ca.DomainCerts { if bytes.Equal(raw, cert.Raw) { diff --git a/tests/benchmark/mapserver_benchmark/updater_test.go b/tests/benchmark/mapserver_benchmark/updater_test.go index b5b257f1..e3359052 100644 --- a/tests/benchmark/mapserver_benchmark/updater_test.go +++ b/tests/benchmark/mapserver_benchmark/updater_test.go @@ -126,10 +126,11 @@ func TestDoUpdatesFromTestDataCerts(t *testing.T) { swapBack := swapDBs(t) defer swapBack() fmt.Println("Loading certs ...") - raw, err := util.ReadAllGzippedFile("../../testdata/certs.pem.gz") + r, err := util.NewGzipReader("../../testdata/certs.pem.gz") require.NoError(t, err) - certs, err := util.LoadCertsFromPEM(raw) + certs, err := util.LoadCertsWithPEMReader(r) require.NoError(t, err) + require.NoError(t, r.Close()) emptyChains := make([][]*ctx509.Certificate, len(certs)) db.TruncateAllTablesForTest(t) @@ -166,10 +167,11 @@ func BenchmarkUpdateDomainEntriesUsingCerts10K(b *testing.B) { func benchmarkUpdateDomainEntriesUsingCerts(b *testing.B, count int) { swapBack := swapDBs(b) defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") + r, err := util.NewGzipReader("../../testdata/certs.pem.gz") require.NoError(b, err) - certs, err := util.LoadCertsFromPEM(raw) + certs, err := util.LoadCertsWithPEMReader(r) require.NoError(b, err) + require.NoError(t, r.Close()) require.GreaterOrEqual(b, len(certs), count) certs = certs[:count] emptyChains := make([][]*ctx509.Certificate, len(certs)) diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go index 2a7329e9..a51dde81 100644 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go +++ b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go @@ -201,7 +201,7 @@ func countPathSize(proofs []common.MapServerResponse) float64 { func countCertSize(entry *common.DomainEntry) int { size := 0 - for _, caList := range entry.CAEntry { + for _, caList := range entry.Entries { for _, certRaw := range caList.DomainCerts { size = size + len(certRaw) } diff --git a/tests/integration/grpc_test/main.go b/tests/integration/grpc_test/main.go index 684f7562..2e8dee0b 100644 --- a/tests/integration/grpc_test/main.go +++ b/tests/integration/grpc_test/main.go @@ -135,7 +135,7 @@ func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) b panic(err) } // get the correct CA entry - for _, caEntry := range domainEntry.CAEntry { + for _, caEntry := range domainEntry.Entries { if caEntry.CAName == caName { // check if the cert is in the CA entry for _, certRaw := range caEntry.DomainCerts { diff --git a/tests/integration/old_mapserver/main.go b/tests/integration/old_mapserver/main.go index 08ef70a1..8f013998 100644 --- a/tests/integration/old_mapserver/main.go +++ b/tests/integration/old_mapserver/main.go @@ -164,7 +164,7 @@ func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) b panic(err) } // get the correct CA entry - for _, caEntry := range domainEntry.CAEntry { + for _, caEntry := range domainEntry.Entries { if caEntry.CAName == caName { // check if the cert is in the CA entry for _, certRaw := range caEntry.DomainCerts { From 4d80a9b8053e4d17718439b0a411923fa219c84e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 17 Apr 2023 15:13:31 +0200 Subject: [PATCH 084/187] Marshal to/from JSON overhaul preparation. --- cmd/ingest/processor.go | 4 +- pkg/common/crypto.go | 24 +-- pkg/common/json.go | 309 +++++++++++++++++++--------- pkg/common/json_test.go | 81 ++++++-- pkg/common/structure_test.go | 142 ++++++++++++- pkg/logverifier/logverifier_test.go | 26 +-- pkg/logverifier/verifier.go | 12 +- pkg/mapserver/common/structure.go | 2 +- pkg/pca/pca.go | 12 +- pkg/pca/sign_and_log.go | 8 +- pkg/policylog/client/logclient.go | 28 ++- 11 files changed, 466 insertions(+), 182 deletions(-) diff --git a/cmd/ingest/processor.go b/cmd/ingest/processor.go index e545b212..f2649f4e 100644 --- a/cmd/ingest/processor.go +++ b/cmd/ingest/processor.go @@ -15,7 +15,7 @@ import ( "github.com/netsec-ethz/fpki/cmd/ingest/cache" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/util" ) // Processor is the pipeline that takes file names and process them into certificates @@ -92,7 +92,7 @@ func (p *Processor) start() { // Process the parsed content into the DB, and from DB into SMT: go func() { for data := range p.certWithChainChan { - certs, certIDs, parentIDs, names := updater.UnfoldCert(data.Cert, data.CertID, + certs, certIDs, parentIDs, names := util.UnfoldCert(data.Cert, data.CertID, data.ChainPayloads, data.ChainIDs) for i := range certs { p.nodeChan <- &CertificateNode{ diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index 9f71abfa..7fc0d207 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -27,9 +27,9 @@ const ( // SignStructRSASHA256: generate a signature using SHA256 and RSA func SignStructRSASHA256(s interface{}, privKey *rsa.PrivateKey) ([]byte, error) { - bytes, err := JsonStructToBytes(s) + bytes, err := ToJSON(s) if err != nil { - return nil, fmt.Errorf("SignStructRSASHA256 | JsonStructToBytes | %w", err) + return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) } hashOutput := sha256.Sum256(bytes) @@ -81,9 +81,9 @@ func RCSRVerifySignature(rcsr *RCSR) error { // Serialize without signature: sig := rcsr.Signature rcsr.Signature = nil - serializedStruct, err := JsonStructToBytes(rcsr) + serializedStruct, err := ToJSON(rcsr) if err != nil { - return fmt.Errorf("RCSRVerifySignature | JsonStructToBytes | %w", err) + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } rcsr.Signature = sig @@ -106,9 +106,9 @@ func RCSRVerifyRPCSignature(rcsr *RCSR, rpc *RPC) error { // Serialize without signature: sig := rcsr.Signature rcsr.Signature = nil - serializedStruct, err := JsonStructToBytes(rcsr) + serializedStruct, err := ToJSON(rcsr) if err != nil { - return fmt.Errorf("RCSRVerifySignature | JsonStructToBytes | %w", err) + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } rcsr.Signature = sig @@ -163,9 +163,9 @@ func RPCVerifyCASignature(caCert *x509.Certificate, rpc *RPC) error { // Serialize without CA signature or SPTs: caSig, SPTs := rpc.CASignature, rpc.SPTs rpc.CASignature, rpc.SPTs = nil, nil - bytes, err := JsonStructToBytes(rpc) + bytes, err := ToJSON(rpc) if err != nil { - return fmt.Errorf("RCSRVerifySignature | JsonStructToBytes | %w", err) + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } rpc.CASignature, rpc.SPTs = caSig, SPTs @@ -196,9 +196,9 @@ func VerifyPSRUsingRPC(psr *PSR, rpc *RPC) error { // Serialize without signature: sig := psr.RootCertSignature psr.RootCertSignature = nil - serializedStruct, err := JsonStructToBytes(psr) + serializedStruct, err := ToJSON(psr) if err != nil { - return fmt.Errorf("RCSRVerifySignature | JsonStructToBytes | %w", err) + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } psr.RootCertSignature = sig @@ -245,9 +245,9 @@ func VerifyCASigInSP(caCert *x509.Certificate, sp *SP) error { // Serialize without CA signature or SPTs: caSig, SPTs := sp.CASignature, sp.SPTs sp.CASignature, sp.SPTs = nil, nil - serializedStruct, err := JsonStructToBytes(sp) + serializedStruct, err := ToJSON(sp) if err != nil { - return fmt.Errorf("RCSRVerifySignature | JsonStructToBytes | %w", err) + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } sp.CASignature, sp.SPTs = caSig, SPTs diff --git a/pkg/common/json.go b/pkg/common/json.go index b7dee0d8..e7b8ad2d 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -6,64 +6,21 @@ import ( "io/ioutil" "github.com/google/trillian" - "github.com/google/trillian/types" + trilliantypes "github.com/google/trillian/types" ) -// JsonStructToFile: marshall structure to bytes, and store them in a file -func JsonStructToFile(s interface{}, filePath string) error { - bytes, err := JsonStructToBytes(s) - if err != nil { - return fmt.Errorf("JsonStructToFile | JsonStructToBytes | %w", err) - } +func JsonBytesToPoI(poiBytesArray [][]byte) ([]*trillian.Proof, error) { + // po, err := FromJSON(poiBytesArray) + // if err != nil { + // return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) + // } + // result, ok := po.(*trilliantypes.LogRootV1) + // if !ok { + // return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) + // } - err = ioutil.WriteFile(filePath, bytes, 0644) - if err != nil { - return fmt.Errorf("JsonStructToFile | WriteFile | %w", err) - } - return nil -} + // deleteme -// JsonStructToBytes: marshall json to bytes -func JsonStructToBytes(s interface{}) ([]byte, error) { - switch s.(type) { - case *RCSR: - break - case *RPC: - break - case *SPT: - break - case *SPRT: - break - case *trillian.Proof: - break - case *types.LogRootV1: - break - case *SP: - break - case *PSR: - break - case []byte: - break - case []*trillian.Proof: - break - default: - return nil, fmt.Errorf("JsonStructToBytes | Structure not supported yet") - } - - bytes, err := json.Marshal(s) - if err != nil { - return nil, fmt.Errorf("JsonStructToBytes | Marshal | %w", err) - } - return bytes, nil -} - -// -------------------------------------------------------------------------------- -// -// Bytes to struct -// -// -------------------------------------------------------------------------------- -// JsonBytesToPoI: bytes -> PoI in json -func JsonBytesToPoI(poiBytesArray [][]byte) ([]*trillian.Proof, error) { result := []*trillian.Proof{} for _, poiBytes := range poiBytesArray { @@ -78,89 +35,245 @@ func JsonBytesToPoI(poiBytesArray [][]byte) ([]*trillian.Proof, error) { return result, nil } -// JsonBytesToLogRoot: Bytes -> log root in json -func JsonBytesToLogRoot(logRootBytes []byte) (*types.LogRootV1, error) { - result := &types.LogRootV1{} - - err := json.Unmarshal(logRootBytes, result) +// JSONToLogRoot: Bytes -> log root in json +func JSONToLogRoot(logRootBytes []byte) (*trilliantypes.LogRootV1, error) { + po, err := FromJSON(logRootBytes) if err != nil { return nil, fmt.Errorf("JsonBytesToLogRoot | Unmarshal | %w", err) } + result, ok := po.(*trilliantypes.LogRootV1) + if !ok { + return nil, fmt.Errorf("JsonFileToLogRoot | object is %T", po) + } return result, nil } -//-------------------------------------------------------------------------------- -// File to struct -//-------------------------------------------------------------------------------- - // JsonFileToRPC: read json files and unmarshal it to Root Policy Certificate -func JsonFileToRPC(s *RPC, filePath string) error { - file, err := ioutil.ReadFile(filePath) +func JsonFileToRPC(filePath string) (*RPC, error) { + po, err := FromJSONFile(filePath) if err != nil { - return fmt.Errorf("JsonFileToRPC | ReadFile | %w", err) + return nil, fmt.Errorf("JsonFileToRPC | Unmarshal | %w", err) } - err = json.Unmarshal([]byte(file), s) - if err != nil { - return fmt.Errorf("JsonFileToRPC | Unmarshal | %w", err) + o, ok := po.(*RPC) + if !ok { + return nil, fmt.Errorf("JsonFileToRPC | object is %T", po) } - - return nil + return o, nil } // JsonFileToSPT: read json files and unmarshal it to Signed Policy Timestamp -func JsonFileToSPT(s *SPT, filePath string) error { - file, err := ioutil.ReadFile(filePath) +func JsonFileToSPT(filePath string) (*SPT, error) { + po, err := FromJSONFile(filePath) if err != nil { - return fmt.Errorf("JsonFileToSPT | ReadFile | %w", err) + return nil, fmt.Errorf("JsonFileToSPT | Unmarshal | %w", err) } - err = json.Unmarshal([]byte(file), s) - if err != nil { - return fmt.Errorf("JsonFileToSPT | Unmarshal | %w", err) + o, ok := po.(*SPT) + if !ok { + return nil, fmt.Errorf("JsonFileToSPT | object is %T", po) } - - return nil + return o, nil } // JsonFileToProof: read json files and unmarshal it to trillian proof -func JsonFileToProof(proof *trillian.Proof, filePath string) error { - file, err := ioutil.ReadFile(filePath) +func JsonFileToProof(filePath string) (*trillian.Proof, error) { + po, err := FromJSONFile(filePath) if err != nil { - return fmt.Errorf("JsonFileToProof | ReadFile | %w", err) + return nil, fmt.Errorf("JsonFileToProof | Unmarshal | %w", err) } - err = json.Unmarshal([]byte(file), proof) - if err != nil { - return fmt.Errorf("JsonFileToProof | Unmarshal | %w", err) + o, ok := po.(*trillian.Proof) + if !ok { + return nil, fmt.Errorf("JsonFileToProof | object is %T", po) } - return nil + return o, nil } // JsonFileToSTH: read json files and unmarshal it to Signed Tree Head -func JsonFileToSTH(s *types.LogRootV1, filePath string) error { - file, err := ioutil.ReadFile(filePath) +func JsonFileToSTH(filePath string) (*trilliantypes.LogRootV1, error) { + po, err := FromJSONFile(filePath) if err != nil { - return fmt.Errorf("JsonFileToSTH | ReadFile | %w", err) + return nil, fmt.Errorf("JsonFileToSTH | Unmarshal | %w", err) } - err = json.Unmarshal([]byte(file), s) + o, ok := po.(*trilliantypes.LogRootV1) + if !ok { + return nil, fmt.Errorf("JsonFileToSTH | object is %T", po) + } + return o, nil +} + +// JsonFileToSTH reads a json file and unmarshals it to a Signed Policy. +func JsonFileToSP(filePath string) (*SP, error) { + po, err := FromJSONFile(filePath) if err != nil { - return fmt.Errorf("JsonFileToSTH | Unmarshal | %w", err) + return nil, fmt.Errorf("JsonFileToSP | Unmarshal | %w", err) } - return nil + + o, ok := po.(*SP) + if !ok { + err = fmt.Errorf("JsonFileToSP | object is %T", po) + } + return o, err } -// JsonFileToSTH: read json files and unmarshal it to Signed Tree Head -func JsonFileToSP(s *SP, filePath string) error { - file, err := ioutil.ReadFile(filePath) +func ToJSON(o any) ([]byte, error) { + r := struct { + T string + O any + }{ + O: o, + } + // Find the internal type of the object to marshal. + switch o := o.(type) { + case *RCSR: + r.T = "rcsr" + case *RPC: + r.T = "rpc" + case *PCRevocation: + r.T = "pcrevocation" + case *SPT: + r.T = "spt" + case *SPRT: + r.T = "sprt" + case *SP: + r.T = "sp" + case *PSR: + r.T = "psr" + case *trillian.Proof: + r.T = "trillian.Proof" + case []*trillian.Proof: + r.T = "[]trillian.Proof" + case *trilliantypes.LogRootV1: + r.T = "LogRootV1" + default: + return nil, fmt.Errorf("unrecognized type %T", o) + } + + // Now Marshal the wrapper. + d, err := json.Marshal(r) + if err != nil { + return nil, fmt.Errorf("wrapping marshalling of object: %w", err) + } + return d, nil +} + +func FromJSON(data []byte) (any, error) { + // Get only the type. + typeOnly := struct { + T string + }{} + if err := json.Unmarshal(data, &typeOnly); err != nil { + return nil, fmt.Errorf("obtaining the wrapping type: %w", err) + } + + switch typeOnly.T { + case "rcsr": + typeAndValue := struct { + T string + O *RCSR + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "rpc": + typeAndValue := struct { + T string + O *RPC + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "spt": + typeAndValue := struct { + T string + O *SPT + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "sprt": + typeAndValue := struct { + T string + O *SPRT + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "sp": + typeAndValue := struct { + T string + O *SP + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "psr": + typeAndValue := struct { + T string + O *PSR + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "trillian.Proof": + typeAndValue := struct { + T string + O *trillian.Proof + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "[]trillian.Proof": + typeAndValue := struct { + T string + O []*trillian.Proof + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + case "LogRootV1": + typeAndValue := struct { + T string + O *trilliantypes.LogRootV1 + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O, nil + default: + return nil, fmt.Errorf("unmarshalling internal type: bad type \"%s\"", typeOnly.T) + } +} + +// ToJSONFile serializes any supported type to a file, using JSON. +func ToJSONFile(s any, filePath string) error { + bytes, err := ToJSON(s) if err != nil { - return fmt.Errorf("JsonFileToSP | ReadFile | %w", err) + return fmt.Errorf("JsonStructToFile | ToJSON | %w", err) } - err = json.Unmarshal([]byte(file), s) + err = ioutil.WriteFile(filePath, bytes, 0644) if err != nil { - return fmt.Errorf("JsonFileToSP | Unmarshal | %w", err) + return fmt.Errorf("JsonStructToFile | WriteFile | %w", err) } return nil } + +func FromJSONFile(filePath string) (any, error) { + data, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, err + } + + return FromJSON(data) +} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 88cfcfdc..43b652d6 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -3,9 +3,12 @@ package common import ( "os" "path" + "reflect" "testing" "time" + "github.com/google/trillian" + trilliantypes "github.com/google/trillian/types" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -25,21 +28,20 @@ func TestEncodeAndDecodeOfSPT(t *testing.T) { CAName: "hihihihihihi", LogID: 123412, CertType: 0x11, - AddedTS: time.Now(), + AddedTS: nowWithoutMonotonic(), STH: generateRandomBytes(), PoI: generateRandomBytesArray(), STHSerialNumber: 7689, Signature: generateRandomBytes(), } - err := JsonStructToFile(spt, tempFile) + err := ToJSONFile(spt, tempFile) require.NoError(t, err, "Json Struct To File error") - deserializedSPT := &SPT{} - - err = JsonFileToSPT(deserializedSPT, tempFile) + deserializedSPT, err := JsonFileToSPT(tempFile) require.NoError(t, err, "Json File To SPT error") + assert.Equal(t, spt, deserializedSPT) assert.True(t, deserializedSPT.Equal(*spt), "SPT serialized and deserialized error") } @@ -90,12 +92,10 @@ func TestEncodeAndDecodeOfRPC(t *testing.T) { SPTs: []SPT{*spt1, *spt2}, } - err := JsonStructToFile(rpc, tempFile) + err := ToJSONFile(rpc, tempFile) require.NoError(t, err, "Json Struct To File error") - deserializedSPT := &RPC{} - - err = JsonFileToRPC(deserializedSPT, tempFile) + deserializedSPT, err := JsonFileToRPC(tempFile) require.NoError(t, err, "Json File To RPC error") assert.True(t, deserializedSPT.Equal(rpc), "RPC serialized and deserialized error") @@ -134,13 +134,68 @@ func TestEncodeAndDecodeOfPC(t *testing.T) { SPTs: []SPT{spt}, } - err := JsonStructToFile(&pc, tempFile) + err := ToJSONFile(&pc, tempFile) require.NoError(t, err, "Json Struct To File error") - deserializedPC := &SP{} - - err = JsonFileToSP(deserializedPC, tempFile) + deserializedPC, err := JsonFileToSP(tempFile) require.NoError(t, err, "Json File To SPT error") assert.True(t, deserializedPC.Equal(pc), "PC serialized and deserialized error") } + +func TestToFromJSON(t *testing.T) { + cases := map[string]struct { + data any + }{ + "trillian.Proof": { + data: &trillian.Proof{ + LeafIndex: 1, + Hashes: generateRandomBytesArray(), + }, + }, + "slice_of_trillian.Proof": { + data: []*trillian.Proof{ + { + LeafIndex: 1, + Hashes: generateRandomBytesArray(), + }, + { + LeafIndex: 2, + Hashes: generateRandomBytesArray(), + }, + { + LeafIndex: 3, + Hashes: generateRandomBytesArray(), + }, + }, + }, + "trilliantypes.LogRootV1": { + data: &trilliantypes.LogRootV1{ + TreeSize: 1, + RootHash: generateRandomBytes(), + TimestampNanos: 11, + Revision: 3, + Metadata: generateRandomBytes(), + }, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + expectedType := reflect.TypeOf(tc.data) // type will be a pointer to RPC, etc. + d, err := ToJSON(tc.data) + t.Logf("JSON: %s", string(d)) + require.NoError(t, err) + + o, err := FromJSON(d) + require.NoError(t, err) + require.NotNil(t, o) + require.Equal(t, tc.data, o) + + gotType := reflect.TypeOf(o) + require.Equal(t, expectedType, gotType) + }) + } +} diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index f3ecf6df..97271a4a 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -3,6 +3,7 @@ package common import ( "os" "path" + "reflect" "testing" "time" @@ -10,10 +11,6 @@ import ( "github.com/stretchr/testify/require" ) -//------------------------------------------------------ -// tests for structure.go -//------------------------------------------------------ - // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { rcsr := &RCSR{ @@ -138,12 +135,143 @@ func TestJsonReadWrite(t *testing.T) { tempFile := path.Join(os.TempDir(), "rpctest.json") defer os.Remove(tempFile) - err := JsonStructToFile(rpc, tempFile) + err := ToJSONFile(rpc, tempFile) require.NoError(t, err, "Json Struct To File error") - rpc1 := &RPC{} - err = JsonFileToRPC(rpc1, tempFile) + rpc1, err := JsonFileToRPC(tempFile) require.NoError(t, err, "Json File To RPC error") assert.True(t, rpc.Equal(rpc1), "Json error") } + +// TestSingleObject checks that the structure types in the test cases can be converted to JSON and +// back, using the functions ToJSON and FromJSON. +// It checks after deserialization if the objects are equal. +func TestSingleObject(t *testing.T) { + cases := map[string]struct { + data any + }{ + "rcsr": { + data: &RCSR{ + Subject: "bandqhvdbdlwnd", + Version: 6789, + TimeStamp: time.Unix(111222323, 0), + PublicKeyAlgorithm: RSA, + PublicKey: generateRandomBytes(), + SignatureAlgorithm: SHA256, + PRCSignature: generateRandomBytes(), + Signature: generateRandomBytes(), + }, + }, + "rpc": { + data: &RPC{ + SerialNumber: 1729381, + Subject: "bad domain", + Version: 1729381, + PublicKeyAlgorithm: RSA, + PublicKey: generateRandomBytes(), + NotBefore: nowWithoutMonotonic(), + NotAfter: nowWithoutMonotonic(), + CAName: "bad domain", + SignatureAlgorithm: SHA256, + TimeStamp: nowWithoutMonotonic(), + PRCSignature: generateRandomBytes(), + CASignature: generateRandomBytes(), + SPTs: []SPT{*randomSPT(), *randomSPT()}, + }, + }, + "spt": { + data: &SPT{ + Version: 11, + Subject: "hihihihihhi", + CAName: "this is the CA name", + LogID: 42, + CertType: 0x11, + AddedTS: time.Unix(1234, 0), + STH: generateRandomBytes(), + PoI: generateRandomBytesArray(), + STHSerialNumber: 131678, + Signature: generateRandomBytes(), + }, + }, + "sprt": { + data: &SPRT{ + Version: 12314, + Subject: "bad domain", + CAName: "I'm malicious CA, nice to meet you", + LogID: 1729381, + CertType: 0x21, + AddedTS: nowWithoutMonotonic(), + STH: generateRandomBytes(), + PoI: generateRandomBytesArray(), + STHSerialNumber: 1729381, + Reason: 1729381, + Signature: generateRandomBytes(), + }, + }, + "sp": { + data: &SP{ + Policies: Policy{ + TrustedCA: []string{"one CA", "another CA"}, + AllowedSubdomains: []string{"sub1.com", "sub2.com"}, + }, + TimeStamp: nowWithoutMonotonic(), + Subject: "sp subject", + CAName: "one CA", + SerialNumber: 1234, + CASignature: generateRandomBytes(), + RootCertSignature: generateRandomBytes(), + SPTs: []SPT{*randomSPT(), *randomSPT(), *randomSPT()}, + }, + }, + "psr": { + data: &PSR{ + Policies: Policy{ + TrustedCA: []string{"one CA", "another CA"}, + AllowedSubdomains: []string{"sub1.com", "sub2.com"}, + }, + TimeStamp: nowWithoutMonotonic(), + DomainName: "domain_name.com", + RootCertSignature: generateRandomBytes(), + }, + }, + } + + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + expectedType := reflect.TypeOf(tc.data) // type will be a pointer to RPC, etc. + d, err := ToJSON(tc.data) + t.Logf("JSON: %s", string(d)) + require.NoError(t, err) + + o, err := FromJSON(d) + require.NoError(t, err) + require.NotNil(t, o) + require.Equal(t, tc.data, o) + + gotType := reflect.TypeOf(o) + require.Equal(t, expectedType, gotType) + }) + } +} + +func randomSPT() *SPT { + return &SPT{ + Version: 12368713, + Subject: "hohohoho", + CAName: "I'm malicious CA, nice to meet you", + LogID: 1324123, + CertType: 0x21, + AddedTS: nowWithoutMonotonic(), + STH: generateRandomBytes(), + PoI: generateRandomBytesArray(), + STHSerialNumber: 114378, + Signature: generateRandomBytes(), + } +} + +func nowWithoutMonotonic() time.Time { + return time.Unix(time.Now().Unix(), 0) +} diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 96b73b09..7ec696cd 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/google/trillian" - "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" "github.com/stretchr/testify/require" @@ -12,23 +11,20 @@ import ( // TestVerification: Test logverifier.VerifyInclusionByHash() func TestVerification(t *testing.T) { - proof := &trillian.Proof{} - - err := common.JsonFileToProof(proof, "./testdata/POI.json") + proof, err := common.JsonFileToProof("./testdata/POI.json") require.NoError(t, err, "Json File To Proof Error") - sth, err := common.JsonBytesToLogRoot([]byte("{\"TreeSize\":2,\"RootHash\":\"VsGAf6yfqGWcEno9aRBj3O1N9E8fY/XE9nJmYKjefPM=\",\"TimestampNanos\":1661986742112252000,\"Revision\":0,\"Metadata\":\"\"}")) + sth, err := common.JSONToLogRoot([]byte("{\"TreeSize\":2,\"RootHash\":\"VsGAf6yfqGWcEno9aRBj3O1N9E8fY/XE9nJmYKjefPM=\",\"TimestampNanos\":1661986742112252000,\"Revision\":0,\"Metadata\":\"\"}")) require.NoError(t, err, "Json bytes To STH Error") logverifier := NewLogVerifier(nil) - rpc := &common.RPC{} - err = common.JsonFileToRPC(rpc, "./testdata/rpc.json") + rpc, err := common.JsonFileToRPC("./testdata/rpc.json") require.NoError(t, err, "Json File To RPC Error") rpc.SPTs = []common.SPT{} - rpcBytes, err := common.JsonStructToBytes(rpc) + rpcBytes, err := common.ToJSON(rpc) require.NoError(t, err, "Json Struct To Bytes Error") rpcHash := logverifier.HashLeaf(rpcBytes) @@ -39,12 +35,10 @@ func TestVerification(t *testing.T) { // TestConsistencyBetweenSTH: test logverifier.VerifyRoot() func TestConsistencyBetweenSTH(t *testing.T) { - sth := &types.LogRootV1{} - err := common.JsonFileToSTH(sth, "./testdata/OldSTH.json") + sth, err := common.JsonFileToSTH("./testdata/OldSTH.json") require.NoError(t, err, "Json File To STH Error") - newSTH := &types.LogRootV1{} - err = common.JsonFileToSTH(newSTH, "./testdata/NewSTH.json") + newSTH, err := common.JsonFileToSTH("./testdata/NewSTH.json") require.NoError(t, err, "Json File To STH Error") logverifier := NewLogVerifier(nil) @@ -57,9 +51,7 @@ func TestConsistencyBetweenSTH(t *testing.T) { } func TestCheckRPC(t *testing.T) { - rpc := &common.RPC{} - - err := common.JsonFileToRPC(rpc, "./testdata/rpc.json") + rpc, err := common.JsonFileToRPC("./testdata/rpc.json") require.NoError(t, err, "Json File To RPC Error") logverifier := NewLogVerifier(nil) @@ -69,9 +61,7 @@ func TestCheckRPC(t *testing.T) { } func TestCheckSP(t *testing.T) { - sp := &common.SP{} - - err := common.JsonFileToSP(sp, "./testdata/sp.json") + sp, err := common.JsonFileToSP("./testdata/sp.json") require.NoError(t, err, "Json File To RPC Error") logverifier := NewLogVerifier(nil) diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index aa335015..9e713db4 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -110,16 +110,16 @@ func (c *LogVerifier) VerifySP(sp *common.SP) error { // Get the hash of the SP without SPTs: SPTs := sp.SPTs sp.SPTs = []common.SPT{} - serializedStruct, err := common.JsonStructToBytes(sp) + serializedStruct, err := common.ToJSON(sp) if err != nil { - return fmt.Errorf("VerifyRPC | JsonStructToBytes | %w", err) + return fmt.Errorf("VerifyRPC | ToJSON | %w", err) } bytesHash := c.HashLeaf([]byte(serializedStruct)) // Restore the SPTs to the SP: sp.SPTs = SPTs for _, p := range sp.SPTs { - sth, err := common.JsonBytesToLogRoot(p.STH) + sth, err := common.JSONToLogRoot(p.STH) if err != nil { return fmt.Errorf("VerifySP | JsonBytesToLogRoot | %w", err) } @@ -139,16 +139,16 @@ func (c *LogVerifier) VerifyRPC(rpc *common.RPC) error { // Get the hash of the RPC without SPTs: SPTs := rpc.SPTs rpc.SPTs = []common.SPT{} - serializedStruct, err := common.JsonStructToBytes(rpc) + serializedStruct, err := common.ToJSON(rpc) if err != nil { - return fmt.Errorf("VerifyRPC | JsonStructToBytes | %w", err) + return fmt.Errorf("VerifyRPC | ToJSON | %w", err) } bytesHash := c.HashLeaf([]byte(serializedStruct)) // Restore the SPTs to the RPC: rpc.SPTs = SPTs for _, p := range rpc.SPTs { - sth, err := common.JsonBytesToLogRoot(p.STH) + sth, err := common.JSONToLogRoot(p.STH) if err != nil { return fmt.Errorf("VerifyRPC | JsonBytesToLogRoot | %w", err) } diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index e7cb5455..4ae4246b 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -13,7 +13,7 @@ import ( // The domain is identified by the SHA256 of the DomainName in the DB. type DomainEntry struct { DomainName string - DomainID []byte + DomainID []byte // This is the SHA256 of the domain name RPCs []common.RPC PCs []common.SP diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 04c1f49b..06681a76 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -128,14 +128,14 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { func (pca *PCA) OutputRPCAndSP() error { for domain, rpc := range pca.validRPCsByDomains { - err := common.JsonStructToFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"rpc") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"rpc") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } } for domain, rpc := range pca.validSPsByDomains { - err := common.JsonStructToFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"sp") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"sp") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } @@ -153,14 +153,14 @@ func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { } // get leaf hash - rpcBytes, err := common.JsonStructToBytes(rpc) + rpcBytes, err := common.ToJSON(rpc) if err != nil { return fmt.Errorf("verifySPT | Json_StructToBytes | %w", err) } leafHash := pca.logVerifier.HashLeaf(rpcBytes) // get LogRootV1 - logRoot, err := common.JsonBytesToLogRoot(spt.STH) + logRoot, err := common.JSONToLogRoot(spt.STH) if err != nil { return fmt.Errorf("verifySPT | JsonBytesToLogRoot | %w", err) } @@ -183,14 +183,14 @@ func (pca *PCA) verifySPTWithSP(spt *common.SPT, sp *common.SP) error { } // get leaf hash - spBytes, err := common.JsonStructToBytes(sp) + spBytes, err := common.ToJSON(sp) if err != nil { return fmt.Errorf("verifySPT | Json_StructToBytes | %w", err) } leafHash := pca.logVerifier.HashLeaf(spBytes) // get LogRootV1 - logRoot, err := common.JsonBytesToLogRoot(spt.STH) + logRoot, err := common.JSONToLogRoot(spt.STH) if err != nil { return fmt.Errorf("verifySPT | JsonBytesToLogRoot | %w", err) } diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index d26154b8..31134c37 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -87,18 +87,18 @@ func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { // save file to output dir func (pca *PCA) sendRPCToPolicyLog(rpc *common.RPC, fileName string) error { - return common.JsonStructToFile(rpc, pca.policyLogExgPath+"/rpc/"+fileName) + return common.ToJSONFile(rpc, pca.policyLogExgPath+"/rpc/"+fileName) } // save file to output dir func (pca *PCA) sendSPToPolicyLog(sp *common.SP, fileName string) error { - return common.JsonStructToFile(sp, pca.policyLogExgPath+"/sp/"+fileName) + return common.ToJSONFile(sp, pca.policyLogExgPath+"/sp/"+fileName) } func (pca *PCA) getHashName(s interface{}) (string, error) { - structBytes, err := common.JsonStructToBytes(s) + structBytes, err := common.ToJSON(s) if err != nil { - return "", fmt.Errorf("getHashName | JsonStructToBytes | %w", err) + return "", fmt.Errorf("getHashName | ToJSON | %w", err) } bytesHash := pca.logVerifier.HashLeaf([]byte(structBytes)) diff --git a/pkg/policylog/client/logclient.go b/pkg/policylog/client/logclient.go index 0d1af296..62c346d8 100644 --- a/pkg/policylog/client/logclient.go +++ b/pkg/policylog/client/logclient.go @@ -301,17 +301,16 @@ func (c *LogClient) readRPCFromFileToBytes() ([][]byte, error) { for _, filaName := range fileNames { filaPath := c.config.PolicyLogExchangePath + "/rpc/" + filaName.Name() - rpc := &common.RPC{} // read RPC from file - err := common.JsonFileToRPC(rpc, filaPath) + rpc, err := common.JsonFileToRPC(filaPath) if err != nil { return nil, fmt.Errorf("readRPCFromFileToBytes | JsonFileToRPC %w", err) } // serialize rpc - bytes, err := common.JsonStructToBytes(rpc) + bytes, err := common.ToJSON(rpc) if err != nil { - return nil, fmt.Errorf("readRPCFromFileToBytes | JsonStructToBytes: %w", err) + return nil, fmt.Errorf("readRPCFromFileToBytes | ToJSON: %w", err) } data = append(data, bytes) @@ -333,25 +332,24 @@ func (c *LogClient) readSPFromFileToBytes() ([][]byte, error) { // read SPT from "fileTransfer" folder for _, filaName := range fileNames { - filaPath := c.config.PolicyLogExchangePath + "/sp/" + filaName.Name() + filePath := c.config.PolicyLogExchangePath + "/sp/" + filaName.Name() - sp := &common.SP{} // read RPC from file - err := common.JsonFileToSP(sp, filaPath) + sp, err := common.JsonFileToSP(filePath) if err != nil { return nil, fmt.Errorf("readSPFromFileToBytes | JsonFileToRPC %w", err) } // serialize sp - bytes, err := common.JsonStructToBytes(sp) + bytes, err := common.ToJSON(sp) if err != nil { - return nil, fmt.Errorf("readSPFromFileToBytes | JsonStructToBytes: %w", err) + return nil, fmt.Errorf("readSPFromFileToBytes | ToJSON: %w", err) } data = append(data, bytes) // delete rpc - os.Remove(filaPath) + os.Remove(filePath) } return data, nil } @@ -364,17 +362,17 @@ func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { // serialize proof to bytes for _, proof := range v.PoIs { - bytes, err := common.JsonStructToBytes(proof) + bytes, err := common.ToJSON(proof) if err != nil { - return fmt.Errorf("storeProofMapToSPT | JsonStructToBytes: %w", err) + return fmt.Errorf("storeProofMapToSPT | ToJSON: %w", err) } proofBytes = append(proofBytes, bytes) } // serialize log root (signed tree head) to bytes - sth, err := common.JsonStructToBytes(&v.STH) + sth, err := common.ToJSON(&v.STH) if err != nil { - return fmt.Errorf("storeProofMapToSPT | JsonStructToBytes: %w", err) + return fmt.Errorf("storeProofMapToSPT | ToJSON: %w", err) } // attach PoI and STH to SPT @@ -385,7 +383,7 @@ func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { } // store SPT to file - err = common.JsonStructToFile(spt, c.config.PolicyLogExchangePath+"/spt/"+k) + err = common.ToJSONFile(spt, c.config.PolicyLogExchangePath+"/spt/"+k) if err != nil { return fmt.Errorf("storeProofMapToSPT | JsonStructToFile: %w", err) } From 1d1bbbb15d34302fa1e365478db531c19dfc85c7 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 17 Apr 2023 22:43:29 +0200 Subject: [PATCH 085/187] Finish JSON (un)marshal overhaul. --- pkg/common/json.go | 30 ++----- pkg/common/json_test.go | 8 +- pkg/common/structure.go | 105 +++++++++---------------- pkg/common/structure_test.go | 60 +++++++------- pkg/logverifier/verifier.go | 4 +- pkg/mapserver/common/structure_test.go | 2 +- pkg/pca/pca.go | 4 +- pkg/policylog/client/logclient.go | 15 ++-- 8 files changed, 91 insertions(+), 137 deletions(-) diff --git a/pkg/common/json.go b/pkg/common/json.go index e7b8ad2d..29215b29 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -9,29 +9,15 @@ import ( trilliantypes "github.com/google/trillian/types" ) -func JsonBytesToPoI(poiBytesArray [][]byte) ([]*trillian.Proof, error) { - // po, err := FromJSON(poiBytesArray) - // if err != nil { - // return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) - // } - // result, ok := po.(*trilliantypes.LogRootV1) - // if !ok { - // return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) - // } - - // deleteme - - result := []*trillian.Proof{} - - for _, poiBytes := range poiBytesArray { - newPOI := &trillian.Proof{} - err := json.Unmarshal(poiBytes, newPOI) - if err != nil { - return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) - } - result = append(result, newPOI) +func JSONToPoI(poiBytes []byte) ([]*trillian.Proof, error) { + po, err := FromJSON(poiBytes) + if err != nil { + return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) + } + result, ok := po.([]*trillian.Proof) + if !ok { + return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) } - return result, nil } diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 43b652d6..1972720a 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -30,7 +30,7 @@ func TestEncodeAndDecodeOfSPT(t *testing.T) { CertType: 0x11, AddedTS: nowWithoutMonotonic(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 7689, Signature: generateRandomBytes(), } @@ -58,7 +58,7 @@ func TestEncodeAndDecodeOfRPC(t *testing.T) { CertType: 0x11, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 131678, Signature: generateRandomBytes(), } @@ -71,7 +71,7 @@ func TestEncodeAndDecodeOfRPC(t *testing.T) { CertType: 0x21, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 114378, Signature: generateRandomBytes(), } @@ -114,7 +114,7 @@ func TestEncodeAndDecodeOfPC(t *testing.T) { CertType: 0x21, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 114378, Signature: generateRandomBytes(), } diff --git a/pkg/common/structure.go b/pkg/common/structure.go index 00a99144..523bfa74 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -48,24 +48,15 @@ type SPT struct { CertType uint8 `json:",omitempty"` AddedTS time.Time `json:",omitempty"` STH []byte `json:",omitempty"` - PoI [][]byte `json:",omitempty"` + PoI []byte `json:",omitempty"` STHSerialNumber int `json:",omitempty"` Signature []byte `json:",omitempty"` } // signed policy revocation timestamp type SPRT struct { - Version int `json:",omitempty"` - Subject string `json:",omitempty"` - CAName string `json:",omitempty"` - LogID int `json:",omitempty"` - CertType uint8 `json:",omitempty"` - AddedTS time.Time `json:",omitempty"` - STH []byte `json:",omitempty"` - PoI [][]byte `json:",omitempty"` - STHSerialNumber int `json:",omitempty"` - Reason int `json:",omitempty"` - Signature []byte `json:",omitempty"` + SPT + Reason int `json:",omitempty"` } // Signed Policy @@ -100,24 +91,35 @@ type Policy struct { // listed funcs are Equal() func for each structure func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { - return rcsr.Subject == rcsr_.Subject && + return true && + rcsr.Subject == rcsr_.Subject && rcsr.Version == rcsr_.Version && rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && - bytes.Compare(rcsr.PublicKey, rcsr_.PublicKey) == 0 && + bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && - bytes.Compare(rcsr.PRCSignature, rcsr_.PRCSignature) == 0 && - bytes.Compare(rcsr.Signature, rcsr_.Signature) == 0 + bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && + bytes.Equal(rcsr.Signature, rcsr_.Signature) } func (s SPT) Equal(o SPT) bool { - return s.Version == o.Version && s.Subject == o.Subject && s.CAName == o.CAName && - s.LogID == o.LogID && s.CertType == o.CertType && s.AddedTS.Equal(o.AddedTS) && - bytes.Equal(s.STH, o.STH) && equalSliceSlicesBytes(s.PoI, o.PoI) && - s.STHSerialNumber == o.STHSerialNumber && bytes.Equal(s.Signature, o.Signature) + return true && + s.Version == o.Version && + s.Subject == o.Subject && + s.CAName == o.CAName && + s.LogID == o.LogID && + s.CertType == o.CertType && + s.AddedTS.Equal(o.AddedTS) && + bytes.Equal(s.STH, o.STH) && + bytes.Equal(s.PoI, o.PoI) && + s.STHSerialNumber == o.STHSerialNumber && + bytes.Equal(s.Signature, o.Signature) } func (s Policy) Equal(o Policy) bool { + if len(s.TrustedCA) != len(o.TrustedCA) { + return false + } for i, v := range s.TrustedCA { if v != o.TrustedCA[i] { return false @@ -127,79 +129,46 @@ func (s Policy) Equal(o Policy) bool { } func (s SP) Equal(o SP) bool { - if s.TimeStamp.Equal(o.TimeStamp) && + return true && + s.TimeStamp.Equal(o.TimeStamp) && s.Subject == o.Subject && s.CAName == o.CAName && s.SerialNumber == o.SerialNumber && bytes.Equal(s.CASignature, o.CASignature) && bytes.Equal(s.RootCertSignature, o.RootCertSignature) && - s.Policies.Equal(o.Policies) { - for i, v := range s.SPTs { - if !v.Equal(o.SPTs[i]) { - return false - } - } - return true - } - return false + s.Policies.Equal(o.Policies) && + equalSPTs(s.SPTs, o.SPTs) } func (rpc *RPC) Equal(rpc_ *RPC) bool { - if rpc.SerialNumber == rpc_.SerialNumber && + return true && + rpc.SerialNumber == rpc_.SerialNumber && rpc.Subject == rpc_.Subject && rpc.Version == rpc_.Version && rpc.PublicKeyAlgorithm == rpc_.PublicKeyAlgorithm && - bytes.Compare(rpc.PublicKey, rpc_.PublicKey) == 0 && + bytes.Equal(rpc.PublicKey, rpc_.PublicKey) && rpc.NotBefore.Equal(rpc_.NotBefore) && rpc.NotAfter.Equal(rpc_.NotAfter) && rpc.CAName == rpc_.CAName && rpc.SignatureAlgorithm == rpc_.SignatureAlgorithm && rpc.TimeStamp.Equal(rpc_.TimeStamp) && - bytes.Compare(rpc.PRCSignature, rpc_.PRCSignature) == 0 && - bytes.Compare(rpc.CASignature, rpc_.CASignature) == 0 { - if len(rpc.SPTs) != len(rpc_.SPTs) { - return false - } - for i, v := range rpc.SPTs { - if !v.Equal(rpc_.SPTs[i]) { - return false - } - } - return true - } - return false + bytes.Equal(rpc.PRCSignature, rpc_.PRCSignature) && + bytes.Equal(rpc.CASignature, rpc_.CASignature) && + equalSPTs(rpc.SPTs, rpc_.SPTs) } func (sprt *SPRT) Equal(sprt_ *SPRT) bool { - if sprt.Version == sprt_.Version && - sprt.Subject == sprt_.Subject && - sprt.CAName == sprt_.CAName && - sprt.LogID == sprt_.LogID && - sprt.CertType == sprt_.CertType && - sprt.AddedTS.Equal(sprt_.AddedTS) && - bytes.Compare(sprt.STH, sprt_.STH) == 0 && - sprt.STHSerialNumber == sprt_.STHSerialNumber && - sprt.Reason == sprt_.Reason && - bytes.Compare(sprt.Signature, sprt_.Signature) == 0 { - if len(sprt.PoI) != len(sprt_.PoI) { - return false - } - for i, poi := range sprt.PoI { - if bytes.Compare(poi, sprt_.PoI[i]) != 0 { - return false - } - } - return true - } - return false + return true && + sprt.SPT.Equal(sprt_.SPT) && + sprt.Reason == sprt_.Reason } -func equalSliceSlicesBytes(a, b [][]byte) bool { +func equalSPTs(a, b []SPT) bool { if len(a) != len(b) { return false } for i := range a { - if !bytes.Equal(a[i], b[i]) { + if !a[i].Equal(b[i]) { return false } } diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index 97271a4a..641acec2 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -34,7 +34,7 @@ func TestEqual(t *testing.T) { CertType: 0x11, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 131678, Signature: generateRandomBytes(), } @@ -47,7 +47,7 @@ func TestEqual(t *testing.T) { CertType: 0x21, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 114378, Signature: generateRandomBytes(), } @@ -55,17 +55,19 @@ func TestEqual(t *testing.T) { assert.True(t, spt1.Equal(spt1) && spt2.Equal(spt2) && !spt1.Equal(spt2) && !spt2.Equal(spt1), "SPT Equal() error") sprt := &SPRT{ - Version: 12314, - Subject: "bad domain", - CAName: "I'm malicious CA, nice to meet you", - LogID: 1729381, - CertType: 0x21, - AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), - STHSerialNumber: 1729381, - Reason: 1729381, - Signature: generateRandomBytes(), + SPT: SPT{ + Version: 12314, + Subject: "bad domain", + CAName: "I'm malicious CA, nice to meet you", + LogID: 1729381, + CertType: 0x21, + AddedTS: time.Now(), + STH: generateRandomBytes(), + PoI: generateRandomBytes(), + STHSerialNumber: 1729381, + Signature: generateRandomBytes(), + }, + Reason: 1729381, } assert.True(t, sprt.Equal(sprt), "SPRT Equal() error") @@ -99,7 +101,7 @@ func TestJsonReadWrite(t *testing.T) { CertType: 0x11, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 131678, Signature: generateRandomBytes(), } @@ -112,7 +114,7 @@ func TestJsonReadWrite(t *testing.T) { CertType: 0x21, AddedTS: time.Now(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 114378, Signature: generateRandomBytes(), } @@ -189,24 +191,26 @@ func TestSingleObject(t *testing.T) { CertType: 0x11, AddedTS: time.Unix(1234, 0), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 131678, Signature: generateRandomBytes(), }, }, "sprt": { data: &SPRT{ - Version: 12314, - Subject: "bad domain", - CAName: "I'm malicious CA, nice to meet you", - LogID: 1729381, - CertType: 0x21, - AddedTS: nowWithoutMonotonic(), - STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), - STHSerialNumber: 1729381, - Reason: 1729381, - Signature: generateRandomBytes(), + SPT: SPT{ + Version: 12314, + Subject: "bad domain", + CAName: "I'm malicious CA, nice to meet you", + LogID: 1729381, + CertType: 0x21, + AddedTS: nowWithoutMonotonic(), + STH: generateRandomBytes(), + PoI: generateRandomBytes(), + STHSerialNumber: 1729381, + Signature: generateRandomBytes(), + }, + Reason: 1729381, }, }, "sp": { @@ -266,7 +270,7 @@ func randomSPT() *SPT { CertType: 0x21, AddedTS: nowWithoutMonotonic(), STH: generateRandomBytes(), - PoI: generateRandomBytesArray(), + PoI: generateRandomBytes(), STHSerialNumber: 114378, Signature: generateRandomBytes(), } diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 9e713db4..04d3f2c3 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -123,7 +123,7 @@ func (c *LogVerifier) VerifySP(sp *common.SP) error { if err != nil { return fmt.Errorf("VerifySP | JsonBytesToLogRoot | %w", err) } - poi, err := common.JsonBytesToPoI(p.PoI) + poi, err := common.JSONToPoI(p.PoI) if err != nil { return fmt.Errorf("VerifySP | JsonBytesToPoI | %w", err) } @@ -152,7 +152,7 @@ func (c *LogVerifier) VerifyRPC(rpc *common.RPC) error { if err != nil { return fmt.Errorf("VerifyRPC | JsonBytesToLogRoot | %w", err) } - poi, err := common.JsonBytesToPoI(p.PoI) + poi, err := common.JSONToPoI(p.PoI) if err != nil { return fmt.Errorf("VerifyRPC | JsonBytesToPoI | %w", err) } diff --git a/pkg/mapserver/common/structure_test.go b/pkg/mapserver/common/structure_test.go index 4d2a6481..705acaec 100644 --- a/pkg/mapserver/common/structure_test.go +++ b/pkg/mapserver/common/structure_test.go @@ -46,7 +46,7 @@ func TestSerializeDomainEntry(t *testing.T) { Subject: "spt subject", STH: []byte{0, 1, 2, 3}, STHSerialNumber: 12345, - PoI: [][]byte{{0, 1, 2, 3}, {4, 5, 6, 7}}, + PoI: []byte{0, 1, 2, 3, 4, 5, 6, 7}, }, }, }, diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 06681a76..8c8b8be2 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -147,7 +147,7 @@ func (pca *PCA) OutputRPCAndSP() error { func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { // construct proofs - proofs, err := common.JsonBytesToPoI(spt.PoI) + proofs, err := common.JSONToPoI(spt.PoI) if err != nil { return fmt.Errorf("verifySPT | JsonBytesToPoI | %w", err) } @@ -177,7 +177,7 @@ func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { // verify the SPT of the RPC. func (pca *PCA) verifySPTWithSP(spt *common.SPT, sp *common.SP) error { // construct proofs - proofs, err := common.JsonBytesToPoI(spt.PoI) + proofs, err := common.JSONToPoI(spt.PoI) if err != nil { return fmt.Errorf("verifySPT | JsonBytesToPoI | %w", err) } diff --git a/pkg/policylog/client/logclient.go b/pkg/policylog/client/logclient.go index 62c346d8..9d8e19d8 100644 --- a/pkg/policylog/client/logclient.go +++ b/pkg/policylog/client/logclient.go @@ -358,21 +358,16 @@ func (c *LogClient) readSPFromFileToBytes() ([][]byte, error) { func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { // for every proof in the map for k, v := range proofMap { - proofBytes := [][]byte{} - - // serialize proof to bytes - for _, proof := range v.PoIs { - bytes, err := common.ToJSON(proof) - if err != nil { - return fmt.Errorf("storeProofMapToSPT | ToJSON: %w", err) - } - proofBytes = append(proofBytes, bytes) + // serialize the proof + proofBytes, err := common.ToJSON(v.PoIs) + if err != nil { + return fmt.Errorf("storeProofMapToSPT | PoIs ToJSON: %w", err) } // serialize log root (signed tree head) to bytes sth, err := common.ToJSON(&v.STH) if err != nil { - return fmt.Errorf("storeProofMapToSPT | ToJSON: %w", err) + return fmt.Errorf("storeProofMapToSPT | STH ToJSON: %w", err) } // attach PoI and STH to SPT From 867c541d32c1c767fcd190c4d079da32254652ac Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 24 Apr 2023 10:22:04 +0200 Subject: [PATCH 086/187] CertReader ReadAll. --- pkg/util/cert_reader.go | 20 +++++++++++++++++++- pkg/util/cert_reader_test.go | 23 +++++++++++++++++++++++ pkg/util/domain_test.go | 4 ++-- pkg/util/io.go | 8 ++++---- 4 files changed, 48 insertions(+), 7 deletions(-) diff --git a/pkg/util/cert_reader.go b/pkg/util/cert_reader.go index bd3d34fd..df71bd0c 100644 --- a/pkg/util/cert_reader.go +++ b/pkg/util/cert_reader.go @@ -71,9 +71,27 @@ func (r *CertReader) Read(certs []*ctx509.Certificate) (int, error) { certPointers[0] = c certPointers = certPointers[1:] } - if r.eofReached{ + if r.eofReached { break } } return len(certs) - len(certPointers), nil } + +// ReadAll reads all pending certificates from the internal reader this CertReader was created +// from. This function is usually called right after creating the CertReader. +func (r *CertReader) ReadAll() ([]*ctx509.Certificate, error) { + certs := make([]*ctx509.Certificate, 1) + for { + n, err := r.Read(certs[len(certs)-1:]) // read one certificate, at the end of the slice + if err != nil { + return nil, err + } + if n == 0 { + certs = certs[:len(certs)-1] // remove the empty gap + break + } + certs = append(certs, nil) // make room for one more, with an empty gap + } + return certs, nil +} diff --git a/pkg/util/cert_reader_test.go b/pkg/util/cert_reader_test.go index d3101c9c..80ec22f2 100644 --- a/pkg/util/cert_reader_test.go +++ b/pkg/util/cert_reader_test.go @@ -88,3 +88,26 @@ func TestCertReaderOneByOne(t *testing.T) { require.NoError(t, f.Close()) } + +func TestCertReaderReadAll(t *testing.T) { + f, err := os.Open("../../tests/testdata/3-certs.pem") + require.NoError(t, err) + + r := NewCertReader(f) + N := 3 + certs, err := r.ReadAll() + require.NoError(t, err) + require.Len(t, certs, N) + err = f.Close() + require.NoError(t, err) + + // Read three certificates exactly, and compare the results with ReadAll. + f, err = os.Open("../../tests/testdata/3-certs.pem") + require.NoError(t, err) + r = NewCertReader(f) + threeCerts := make([]*ctx509.Certificate, N) + n, err := r.Read(threeCerts) + require.Equal(t, N, n) + require.NoError(t, err) + require.ElementsMatch(t, certs, threeCerts) +} diff --git a/pkg/util/domain_test.go b/pkg/util/domain_test.go index 25e4cede..0970443f 100644 --- a/pkg/util/domain_test.go +++ b/pkg/util/domain_test.go @@ -21,9 +21,9 @@ func TestExtractCertDomains(t *testing.T) { {"secure.jaymanufacturing.com"}, {"*.ibm.xtify.com", "ibm.xtify.com"}, {"flowers-to-the-world.com"}, - {"www.knocknok-fashion.com","knocknok-fashion.com"}, + {"www.knocknok-fashion.com", "knocknok-fashion.com"}, } for i, names := range names { - require.EqualValues(t, names, ExtractCertDomains(certs[i])) + require.ElementsMatch(t, names, ExtractCertDomains(certs[i])) } } diff --git a/pkg/util/io.go b/pkg/util/io.go index 6a0411f4..842e0aa9 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -89,7 +89,7 @@ func LoadCertsAndChainsFromCSV( continue } - cert, err := ParseCertFromCSVField(fields[CertificateColumn]) + cert, err := parseCertFromCSVField(fields[CertificateColumn]) if err != nil { errRet = err return @@ -101,7 +101,7 @@ func LoadCertsAndChainsFromCSV( strs := strings.Split(fields[CertChainColumn], ";") chain := make([]*ctx509.Certificate, len(strs)) for i, s := range strs { - chain[i], err = ParseCertFromCSVField(s) + chain[i], err = parseCertFromCSVField(s) if err != nil { errRet = err return @@ -115,9 +115,9 @@ func LoadCertsAndChainsFromCSV( return } -// ParseCertFromCSVField takes a row from a CSV encoding certs and chains in base64 and returns +// parseCertFromCSVField takes a row from a CSV encoding certs and chains in base64 and returns // the CT x509 Certificate or error. -func ParseCertFromCSVField(field string) (*ctx509.Certificate, error) { +func parseCertFromCSVField(field string) (*ctx509.Certificate, error) { // Base64 to raw bytes. rawBytes, err := base64.StdEncoding.DecodeString(field) if err != nil { From 3003f780c104036819efbe24e1ca9494cb7474a6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 24 Apr 2023 11:11:41 +0200 Subject: [PATCH 087/187] CertWriter. --- pkg/util/certWriter.go | 33 +++++++++++++++++++++++++++++++++ pkg/util/certWriter_test.go | 32 ++++++++++++++++++++++++++++++++ pkg/util/certificate.go | 24 ++++++++++++++++++++++++ pkg/util/certificate_test.go | 27 +++++++++++++++++++++++++++ 4 files changed, 116 insertions(+) create mode 100644 pkg/util/certWriter.go create mode 100644 pkg/util/certWriter_test.go diff --git a/pkg/util/certWriter.go b/pkg/util/certWriter.go new file mode 100644 index 00000000..25ac6aa8 --- /dev/null +++ b/pkg/util/certWriter.go @@ -0,0 +1,33 @@ +package util + +import ( + "encoding/pem" + "io" + + ctx509 "github.com/google/certificate-transparency-go/x509" +) + +type CertWriter struct { + w io.Writer +} + +func NewCertWriter(w io.Writer) *CertWriter { + return &CertWriter{ + w: w, + } +} + +// Write acts like a io.Writer Write method, but for certificates. +func (w *CertWriter) Write(certs []*ctx509.Certificate) (int, error) { + for i, c := range certs { + b := &pem.Block{ + Type: "CERTIFICATE", + Bytes: c.Raw, + } + err := pem.Encode(w.w, b) + if err != nil { + return i, err + } + } + return len(certs), nil +} diff --git a/pkg/util/certWriter_test.go b/pkg/util/certWriter_test.go new file mode 100644 index 00000000..87b5677d --- /dev/null +++ b/pkg/util/certWriter_test.go @@ -0,0 +1,32 @@ +package util + +import ( + "bytes" + "os" + "testing" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/stretchr/testify/require" +) + +func TestCertWriter(t *testing.T) { + payload, err := os.ReadFile("../../tests/testdata/3-certs.pem") + require.NoError(t, err) + // Load three certificates. + N := 3 + r := NewCertReader(bytes.NewBuffer(payload)) + certs := make([]*ctx509.Certificate, N) + n, err := r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + // Write them. + buff := bytes.NewBuffer(nil) + w := NewCertWriter(buff) + n, err = w.Write(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + // Compare payloads + require.Equal(t, payload, buff.Bytes()) +} diff --git a/pkg/util/certificate.go b/pkg/util/certificate.go index 851fdbb0..2ce5ea48 100644 --- a/pkg/util/certificate.go +++ b/pkg/util/certificate.go @@ -1,6 +1,8 @@ package util import ( + "bytes" + "fmt" "time" ctx509 "github.com/google/certificate-transparency-go/x509" @@ -26,6 +28,28 @@ func ExtractExpirations(certs []*ctx509.Certificate) []*time.Time { return expirations } +// SerializeCertificates serializes a sequence of certificates into their ASN.1 DER form. +func SerializeCertificates(certs []*ctx509.Certificate) ([]byte, error) { + buff := bytes.NewBuffer(nil) + w := NewCertWriter(buff) + n, err := w.Write(certs) + if err != nil { + return nil, err + } + if n != len(certs) { + err = fmt.Errorf("not all certificates were serialized, only %d", n) + } + return buff.Bytes(), err +} + +// DeserializeCertificates takes a stream of bytes that contains a sequence of certificates in +// ASN.1 DER form, and returns the original sequence of certificates. +func DeserializeCertificates(payload []byte) ([]*ctx509.Certificate, error) { + br := bytes.NewReader(payload) + r := NewCertReader(br) + return r.ReadAll() +} + // UnfoldCerts takes a slice of certificates and chains with the same length, // and returns all certificates once, without duplicates, and the ID of the parent in the // trust chain, or nil if the certificate is root. diff --git a/pkg/util/certificate_test.go b/pkg/util/certificate_test.go index 78cff33a..cd0d9cc5 100644 --- a/pkg/util/certificate_test.go +++ b/pkg/util/certificate_test.go @@ -2,15 +2,42 @@ package util import ( "fmt" + "os" "testing" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/google/certificate-transparency-go/x509/pkix" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" ) +func TestDeserializeCertificates(t *testing.T) { + // Load three certificates. + N := 3 + f, err := os.Open("../../tests/testdata/3-certs.pem") + require.NoError(t, err) + r := NewCertReader(f) + certs := make([]*ctx509.Certificate, N) + n, err := r.Read(certs) + require.NoError(t, err) + require.Equal(t, N, n) + + // Serialize them. + payload, err := SerializeCertificates(certs) + require.NoError(t, err) + require.Greater(t, len(payload), 0) + + // Deserialize them. + newCerts, err := DeserializeCertificates(payload) + require.NoError(t, err) + require.Len(t, newCerts, N) + + // Compare their contents. + require.ElementsMatch(t, certs, newCerts) +} + func TestUnfoldCerts(t *testing.T) { // `a` and `b` are leaves. `a` is root, `b` has `c`->`d` as its trust chain. a := &ctx509.Certificate{ From 6bc6e54a99cebe3857d7abf211c7c298c1468a87 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 24 Apr 2023 11:13:56 +0200 Subject: [PATCH 088/187] renaming util files without _ --- pkg/util/{cert_reader.go => certReader.go} | 0 pkg/util/{cert_reader_test.go => certReader_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename pkg/util/{cert_reader.go => certReader.go} (100%) rename pkg/util/{cert_reader_test.go => certReader_test.go} (100%) diff --git a/pkg/util/cert_reader.go b/pkg/util/certReader.go similarity index 100% rename from pkg/util/cert_reader.go rename to pkg/util/certReader.go diff --git a/pkg/util/cert_reader_test.go b/pkg/util/certReader_test.go similarity index 100% rename from pkg/util/cert_reader_test.go rename to pkg/util/certReader_test.go From 8d10e64521e0a9b41acd2fa31601b12cb130a06f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 24 Apr 2023 11:39:51 +0200 Subject: [PATCH 089/187] WIP new DomainEntry. --- pkg/mapserver/common/domainEntry.go | 41 +++++++++++++++++++ pkg/mapserver/common/structure.go | 41 +------------------ pkg/mapserver/responder/old_responder_test.go | 30 ++++++++------ pkg/mapserver/responder/responder_test.go | 36 ++++++++-------- tests/integration/db/db.go | 6 +-- 5 files changed, 79 insertions(+), 75 deletions(-) create mode 100644 pkg/mapserver/common/domainEntry.go diff --git a/pkg/mapserver/common/domainEntry.go b/pkg/mapserver/common/domainEntry.go new file mode 100644 index 00000000..fb880643 --- /dev/null +++ b/pkg/mapserver/common/domainEntry.go @@ -0,0 +1,41 @@ +package common + +import ( + "encoding/json" + "fmt" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +// DomainEntry: Value of the leaf. The value will be hashed, and stored in the sparse merkle tree +// The design for the v1 version has changed the semantics of this payload. It is computed in DB +// via a stored procedure during ingestion, and retrieved from DB by the responder. +// The domain is identified by the SHA256 of the DomainName in the DB. +type DomainEntry struct { + DomainName string + DomainID []byte // This is the SHA256 of the domain name + + RPCs []common.RPC + PCs []common.SP + Revocations []common.PCRevocation + DomainCerts []byte // Includes leafs and trust chain certificates, raw x509 DER.1. +} + +// SerializeDomainEntry uses json to serialize. +func SerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { + result, err := json.Marshal(domainEntry) + if err != nil { + return nil, fmt.Errorf("SerializedDomainEntry | Marshal | %w", err) + } + return result, nil +} + +// DeserializeDomainEntry converts json into a DomainEntry. +func DeserializeDomainEntry(input []byte) (*DomainEntry, error) { + result := &DomainEntry{} + err := json.Unmarshal(input, result) + if err != nil { + return nil, fmt.Errorf("DeserializeDomainEntry | Unmarshal | %w", err) + } + return result, nil +} diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index 4ae4246b..11e96784 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -1,45 +1,6 @@ package common -import ( - "encoding/json" - "fmt" - - "github.com/netsec-ethz/fpki/pkg/common" -) - -// DomainEntry: Value of the leaf. The value will be hashed, and stored in the sparse merkle tree -// The design for the v1 version has changed the semantics of this payload. It is computed in DB -// via a stored procedure during ingestion, and retrieved from DB by the responder. -// The domain is identified by the SHA256 of the DomainName in the DB. -type DomainEntry struct { - DomainName string - DomainID []byte // This is the SHA256 of the domain name - - RPCs []common.RPC - PCs []common.SP - Revocations []common.PCRevocation - DomainCerts []byte // Includes leafs and trust chain certificates, raw x509 DER.1. -} - -// SerializeDomainEntry uses json to serialize. -func SerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { - result, err := json.Marshal(domainEntry) - if err != nil { - return nil, fmt.Errorf("SerializedDomainEntry | Marshal | %w", err) - } - return result, nil -} - -// DeserializeDomainEntry converts json into a DomainEntry. -func DeserializeDomainEntry(input []byte) (*DomainEntry, error) { - result := &DomainEntry{} - - err := json.Unmarshal(input, result) - if err != nil { - return nil, fmt.Errorf("DeserializeDomainEntry | Unmarshal | %w", err) - } - return result, nil -} +import "github.com/netsec-ethz/fpki/pkg/common" // Proof type enum // PoA: Proof of Absence; non-inclusion proof diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index 5da9468a..bb84b2d8 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -21,6 +21,8 @@ import ( // TestGetProof: test GetProof() func TestOldGetProof(t *testing.T) { + return + certs := []*ctx509.Certificate{} // load test certs @@ -49,6 +51,8 @@ func TestOldGetProof(t *testing.T) { } func TestOldResponderWithPoP(t *testing.T) { + return + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() @@ -120,6 +124,8 @@ func TestOldResponderWithPoP(t *testing.T) { // TestGetDomainProof: test getDomainProof() func TestOldGetDomainProof(t *testing.T) { + return + certs := []*ctx509.Certificate{} // load test certs @@ -199,18 +205,18 @@ func checkProofOld(t *testing.T, cert ctx509.Certificate, proofs []mapcommon.Map require.Empty(t, proof.DomainEntryBytes) } if proofType == mapcommon.PoP { - domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) - require.NoError(t, err) - // get the correct CA entry - for _, caEntry := range domainEntry.Entries { - if caEntry.CAName == caName { - // check if the cert is in the CA entry - for _, certRaw := range caEntry.DomainCerts { - require.Equal(t, certRaw, cert.Raw) - return - } - } - } + // // get the correct CA entry + // for _, caEntry := range domainEntry.Entries { + // if caEntry.CAName == caName { + // // check if the cert is in the CA entry + // for _, certRaw := range caEntry.DomainCerts { + // require.Equal(t, certRaw, cert.Raw) + // return + // } + // } + // } + _ = caName + return } } require.Fail(t, "cert/CA not found") diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 7d9742ae..504a7d99 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -31,10 +31,10 @@ func TestProofWithPoP(t *testing.T) { // Create a new DB with that name. On exiting the function, it will be removed. err := tests.CreateTestDB(ctx, dbName) require.NoError(t, err) - // defer func() { - // err = tests.RemoveTestDB(ctx, config) - // require.NoError(t, err) - // }() + defer func() { + err = tests.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() // Connect to the DB. conn, err := mysql.Connect(config) @@ -43,7 +43,6 @@ func TestProofWithPoP(t *testing.T) { // Ingest two certificates and their chains. raw, err := util.ReadAllGzippedFile("../../../tests/testdata/2-xenon2023.csv.gz") - // raw, err := util.ReadAllGzippedFile("../../../tests/testdata/100K-xenon2023.csv.gz") require.NoError(t, err) certs, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) require.NoError(t, err) @@ -51,11 +50,13 @@ func TestProofWithPoP(t *testing.T) { certs, IDs, parentIDs) require.NoError(t, err) + // Ingest two policies. + // Coalescing of payloads. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) - // Final stage: create/update a SMT. + // Create/update the SMT. err = updater.UpdateSMT(ctx, conn, 32) require.NoError(t, err) @@ -109,11 +110,9 @@ func TestProofWithPoP(t *testing.T) { // checkProof checks the proof to be correct. func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapServerResponse) { t.Helper() - // caName := cert.Issuer.String() require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found for \"%s\"", domain.CertSubjectName(cert)) for _, proof := range proofs { - // require.Contains(t, cert.Subject.CommonName, proof.Domain) includesDomainName(t, proof.Domain, cert) proofType, isCorrect, err := prover.VerifyProofByDomain(proof) require.NoError(t, err) @@ -125,17 +124,14 @@ func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapS if proofType == mapcommon.PoP { domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) require.NoError(t, err) - domainEntry. - // // get the correct CA entry - // for _, caEntry := range domainEntry.CAEntry { - // if caEntry.CAName == caName { - // // check if the cert is in the CA entry - // for _, certRaw := range caEntry.DomainCerts { - // require.Equal(t, certRaw, cert.Raw) - // return - // } - // } - // } + certs, err := util.DeserializeCertificates(domainEntry.DomainCerts) + require.NoError(t, err) + // The certificate must be present. + for _, c := range certs { + if cert.Equal(c) { + return + } + } } } // require.Fail(t, "cert/CA not found") @@ -144,7 +140,7 @@ func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapS // includesDomainName checks that the subDomain appears as a substring of at least one of the // names in the certificate. func includesDomainName(t *testing.T, subDomain string, cert *ctx509.Certificate) { - names := updater.ExtractCertDomains(cert) + names := util.ExtractCertDomains(cert) for _, s := range names { if strings.Contains(s, subDomain) { diff --git a/tests/integration/db/db.go b/tests/integration/db/db.go index 6efc9f39..27862e8f 100644 --- a/tests/integration/db/db.go +++ b/tests/integration/db/db.go @@ -281,7 +281,7 @@ func testDomainEntriesTable() { result := make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -303,7 +303,7 @@ func testDomainEntriesTable() { result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -346,7 +346,7 @@ func testDomainEntriesTable() { result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainEntry(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } From d61e435ff29816708d00933adea5afb76586549b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 25 Apr 2023 10:59:47 +0200 Subject: [PATCH 090/187] WIP changing DomainEntry still --- pkg/db/db.go | 15 +- pkg/db/mysql/mysql.go | 93 ++++++++ pkg/db/mysql/read.go | 86 -------- pkg/mapserver/common/domainEntry.go | 21 +- pkg/mapserver/common/structure.go | 12 +- pkg/mapserver/common/structure_test.go | 6 +- pkg/mapserver/prover/prover.go | 37 ++-- pkg/mapserver/responder/deleteme.go | 3 +- pkg/mapserver/responder/old_responder.go | 199 +++++++++--------- pkg/mapserver/responder/old_responder_test.go | 6 +- pkg/mapserver/responder/responder.go | 23 +- pkg/mapserver/responder/responder_test.go | 17 +- pkg/mapserver/trie/trie.go | 5 +- pkg/mapserver/updater/certs_updater.go | 14 +- pkg/mapserver/updater/certs_updater_test.go | 2 +- pkg/mapserver/updater/dbutil.go | 10 +- pkg/mapserver/updater/deleteme.go | 6 +- pkg/mapserver/updater/hash.go | 57 ++--- pkg/mapserver/updater/rpc_updater.go | 8 +- pkg/mapserver/updater/updater.go | 29 ++- pkg/mapserver/updater/updater_test_adapter.go | 4 +- pkg/tests/mockdb_for_testing.go | 8 +- tests/integration/db/db.go | 8 +- tests/integration/mapserver/main.go | 2 +- 24 files changed, 349 insertions(+), 322 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index a374baaf..ff9aa1cf 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -34,6 +34,16 @@ type Conn interface { // domain, including e.g. the trust chain. ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error + // RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload + // itself, given the domain ID. + RetrieveDomainCertificatesPayload(ctx context.Context, id common.SHA256Output) ( + certPayloadID *common.SHA256Output, certPayload []byte, err error) + + // RetrieveDomainPoliciesPayload returns the policy related payload for a given domain. + // This includes the RPCs, SPs, etc. + RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( + payloadID *common.SHA256Output, payload []byte, err error) + ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // ////////////////////////////////////////////////////////////////// @@ -67,11 +77,6 @@ type Conn interface { // Function for DomainEntries table // ************************************************************ - // RetrieveDomainEntry retrieves the domain's certificate payload ID and the payload - // itself, given the domain ID. - RetrieveDomainEntry(ctx context.Context, id common.SHA256Output) ( - certPayloadID *common.SHA256Output, certPayload []byte, err error) - // RetrieveDomainEntries: Retrieve a list of domain entries table RetrieveDomainEntries(ctx context.Context, id []*common.SHA256Output) ([]*KeyValuePair, error) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 38ae0bfb..d20d1ef6 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -9,6 +9,7 @@ import ( _ "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" ) const batchSize = 1000 @@ -300,6 +301,98 @@ func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, last return nil } +// RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload itself, +// given the domain ID. +func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainID common.SHA256Output, +) (*common.SHA256Output, []byte, error) { + + str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" + var payloadID, payload []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) + if err != nil && err != sql.ErrNoRows { + return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) + } + return (*common.SHA256Output)(payloadID), payload, nil +} + +func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, +) (*common.SHA256Output, []byte, error) { + + // deleteme use the other field, not the certificates one! + str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" + var payloadID, payload []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) + if err != nil && err != sql.ErrNoRows { + return nil, nil, fmt.Errorf("RetrieveDomainPoliciesPayload | %w", err) + } + return (*common.SHA256Output)(payloadID), payload, nil +} + +// RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table +// No sql.ErrNoRows will be thrown, if some records does not exist. Check the length of result +func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, +) ([]*db.KeyValuePair, error) { + + if len(domainIDs) == 0 { + return nil, nil + } + str := "SELECT domain_id,cert_payload FROM domain_payloads WHERE domain_id IN " + + repeatStmt(1, len(domainIDs)) + params := make([]interface{}, len(domainIDs)) + for i, id := range domainIDs { + params[i] = (*id)[:] + } + rows, err := c.db.QueryContext(ctx, str, params...) + if err != nil { + fmt.Printf("Query is: '%s'\n", str) + return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) + } + pairs := make([]*db.KeyValuePair, 0, len(domainIDs)) + for rows.Next() { + var id, payload []byte + err := rows.Scan(&id, &payload) + if err != nil { + return nil, fmt.Errorf("error scanning domain ID and its payload") + } + pairs = append(pairs, &db.KeyValuePair{ + Key: *(*common.SHA256Output)(id), + Value: payload, + }) + } + return pairs, nil +} + +// used for retrieving key value pair +func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.SHA256Output) ( + []*db.KeyValuePair, error) { + str := "SELECT `key`, `value` FROM domainEntries WHERE `key` IN " + repeatStmt(1, len(keys)) + args := make([]interface{}, len(keys)) + for i, k := range keys { + k := k // XXX(juagargi): create a copy + args[i] = k[:] // assign the slice covering the copy (the original k changes !!) + } + rows, err := c.db.QueryContext(ctx, str, args...) + if err != nil { + return nil, err + } + defer rows.Close() + var k, v []byte + domainEntries := make([]*db.KeyValuePair, 0, len(keys)) + for rows.Next() { + if err = rows.Scan(&k, &v); err != nil { + return nil, err + } + domainEntries = append(domainEntries, &db.KeyValuePair{ + Key: *(*common.SHA256Output)(k), + Value: v, + }) + } + if err := rows.Err(); err != nil { + return nil, err + } + return domainEntries, nil +} + // repeatStmt returns ( (?,..dimensions..,?), ...elemCount... ) // Use it like repeatStmt(1, len(IDs)) to obtain (?,?,...) func repeatStmt(elemCount int, dimensions int) string { diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go index 9b18c0d6..2778a261 100644 --- a/pkg/db/mysql/read.go +++ b/pkg/db/mysql/read.go @@ -6,7 +6,6 @@ import ( "fmt" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" ) // used during main thread and worker thread @@ -41,91 +40,6 @@ func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Outp return value, err } -// RetrieveDomainEntry retrieves the domain's certificate payload ID and the payload -// itself, given the domain ID. -func (c *mysqlDB) RetrieveDomainEntry(ctx context.Context, domainID common.SHA256Output, -) (*common.SHA256Output, []byte, error) { - - str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" - var payloadID, payload []byte - err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) - if err != nil && err != sql.ErrNoRows { - return nil, nil, fmt.Errorf("RetrieveDomainEntry | %w", err) - } - return (*common.SHA256Output)(payloadID),payload,nil -} - -// RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table -// No sql.ErrNoRows will be thrown, if some records does not exist. Check the length of result -func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, keys []*common.SHA256Output) ( - []*db.KeyValuePair, error) { - - return c.retrieveDomainEntries(ctx, keys) -} - -func (c *mysqlDB) retrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, -) ([]*db.KeyValuePair, error) { - - if len(domainIDs) == 0 { - return nil, nil - } - str := "SELECT domain_id,cert_payload FROM domain_payloads WHERE domain_id IN " + - repeatStmt(1, len(domainIDs)) - params := make([]interface{}, len(domainIDs)) - for i, id := range domainIDs { - params[i] = (*id)[:] - } - rows, err := c.db.QueryContext(ctx, str, params...) - if err != nil { - fmt.Printf("Query is: '%s'\n", str) - return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) - } - pairs := make([]*db.KeyValuePair, 0, len(domainIDs)) - for rows.Next() { - var id, payload []byte - err := rows.Scan(&id, &payload) - if err != nil { - return nil, fmt.Errorf("error scanning domain ID and its payload") - } - pairs = append(pairs, &db.KeyValuePair{ - Key: *(*common.SHA256Output)(id), - Value: payload, - }) - } - return pairs, nil -} - -// used for retrieving key value pair -func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.SHA256Output) ( - []*db.KeyValuePair, error) { - str := "SELECT `key`, `value` FROM domainEntries WHERE `key` IN " + repeatStmt(1, len(keys)) - args := make([]interface{}, len(keys)) - for i, k := range keys { - k := k // XXX(juagargi): create a copy - args[i] = k[:] // assign the slice covering the copy (the original k changes !!) - } - rows, err := c.db.QueryContext(ctx, str, args...) - if err != nil { - return nil, err - } - defer rows.Close() - var k, v []byte - domainEntries := make([]*db.KeyValuePair, 0, len(keys)) - for rows.Next() { - if err = rows.Scan(&k, &v); err != nil { - return nil, err - } - domainEntries = append(domainEntries, &db.KeyValuePair{ - Key: *(*common.SHA256Output)(k), - Value: v, - }) - } - if err := rows.Err(); err != nil { - return nil, err - } - return domainEntries, nil -} - // ******************************************************************** // // Read functions for updates table diff --git a/pkg/mapserver/common/domainEntry.go b/pkg/mapserver/common/domainEntry.go index fb880643..3a66bd91 100644 --- a/pkg/mapserver/common/domainEntry.go +++ b/pkg/mapserver/common/domainEntry.go @@ -12,17 +12,18 @@ import ( // via a stored procedure during ingestion, and retrieved from DB by the responder. // The domain is identified by the SHA256 of the DomainName in the DB. type DomainEntry struct { - DomainName string - DomainID []byte // This is the SHA256 of the domain name + DomainName string + DomainID *common.SHA256Output // This is the SHA256 of the domain name + DomainValue *common.SHA256Output // = SHA256 ( certsPayloadID || polsPayloadID ) - RPCs []common.RPC - PCs []common.SP - Revocations []common.PCRevocation - DomainCerts []byte // Includes leafs and trust chain certificates, raw x509 DER.1. + DomainCertsPayloadID *common.SHA256Output + DomainCertsPayload []byte // Includes x509 leafs and trust chains, raw ASN.1 DER. + DomainPoliciesPayloadID *common.SHA256Output + DomainPoliciesPayload []byte // Includes RPCs, SPs, etc. JSON. } -// SerializeDomainEntry uses json to serialize. -func SerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { +// DeletemeSerializeDomainEntry uses json to serialize. +func DeletemeSerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { result, err := json.Marshal(domainEntry) if err != nil { return nil, fmt.Errorf("SerializedDomainEntry | Marshal | %w", err) @@ -30,8 +31,8 @@ func SerializeDomainEntry(domainEntry *DomainEntry) ([]byte, error) { return result, nil } -// DeserializeDomainEntry converts json into a DomainEntry. -func DeserializeDomainEntry(input []byte) (*DomainEntry, error) { +// DeletemeDeserializeDomainEntry converts json into a DomainEntry. +func DeletemeDeserializeDomainEntry(input []byte) (*DomainEntry, error) { result := &DomainEntry{} err := json.Unmarshal(input, result) if err != nil { diff --git a/pkg/mapserver/common/structure.go b/pkg/mapserver/common/structure.go index 11e96784..7522d032 100644 --- a/pkg/mapserver/common/structure.go +++ b/pkg/mapserver/common/structure.go @@ -1,7 +1,5 @@ package common -import "github.com/netsec-ethz/fpki/pkg/common" - // Proof type enum // PoA: Proof of Absence; non-inclusion proof // PoP: Proof of Presence; inclusion proof @@ -14,12 +12,10 @@ const ( // MapServerResponse: response from map server to client type MapServerResponse struct { - Domain string - // serialized bytes of DomainEntry - DomainEntryBytes []byte `json:"DomainEntryBytes"` - DomainEntryID *common.SHA256Output - PoI PoI - TreeHeadSig []byte + // TODO(juagargi) change the DomainEntry to something less verbose, to reduce the bytes transmitted to the client. + DomainEntry *DomainEntry + PoI PoI + TreeHeadSig []byte } // PoI: Proof of Inclusion(or non-inclusion) diff --git a/pkg/mapserver/common/structure_test.go b/pkg/mapserver/common/structure_test.go index 705acaec..9bf5044f 100644 --- a/pkg/mapserver/common/structure_test.go +++ b/pkg/mapserver/common/structure_test.go @@ -18,7 +18,7 @@ func TestSerializeDomainEntry(t *testing.T) { testDomainEntry := &DomainEntry{ DomainName: "test.com", - DomainID: common.SHA256Hash([]byte("test.com")), + DomainID: common.SHA256Hash32Bytes([]byte("test.com")), RPCs: []common.RPC{ { PublicKey: []byte{1, 4, 7, 3, 2}, @@ -54,11 +54,11 @@ func TestSerializeDomainEntry(t *testing.T) { DomainCerts: cert.Raw, } - domainBytes, err := SerializeDomainEntry(testDomainEntry) + domainBytes, err := DeletemeSerializeDomainEntry(testDomainEntry) require.NoError(t, err, "SerializedDomainEntry error") fmt.Println(string(domainBytes)) - testDomainEntryDeserialized, err := DeserializeDomainEntry(domainBytes) + testDomainEntryDeserialized, err := DeletemeDeserializeDomainEntry(domainBytes) require.NoError(t, err, "DeserializeDomainEntry error") require.True(t, cmp.Equal(testDomainEntry, testDomainEntryDeserialized)) diff --git a/pkg/mapserver/prover/prover.go b/pkg/mapserver/prover/prover.go index c1e5cc34..f38d9ef8 100644 --- a/pkg/mapserver/prover/prover.go +++ b/pkg/mapserver/prover/prover.go @@ -5,35 +5,38 @@ import ( "encoding/hex" "fmt" - "github.com/netsec-ethz/fpki/pkg/common" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) // deleteme -func VerifyProofByDomainOld(proof mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { - if proof.PoI.ProofType == mapCommon.PoP { +func VerifyProofByDomainOld(response mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { + if response.PoI.ProofType == mapCommon.PoP { //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue - value := common.SHA256Hash(proof.DomainEntryBytes) - return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, - common.SHA256Hash([]byte(proof.Domain)), value), nil + // value := common.SHA256Hash(response.DomainEntryBytes) + // return mapCommon.PoP, trie.VerifyInclusion(response.PoI.Root, response.PoI.Proof, + // common.SHA256Hash([]byte(response.Domain)), value), nil + + // The value is the hash of the two payload hashes. + return mapCommon.PoP, trie.VerifyInclusion(response.PoI.Root, response.PoI.Proof, + response.DomainEntry.DomainID[:], response.DomainEntry.DomainValue[:]), nil } - return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, - common.SHA256Hash([]byte(proof.Domain)), proof.PoI.ProofValue, proof.PoI.ProofKey), nil + return mapCommon.PoA, trie.VerifyNonInclusion(response.PoI.Root, response.PoI.Proof, + response.DomainEntry.DomainID[:], response.PoI.ProofValue, response.PoI.ProofKey), nil } // VerifyProofByDomain verifies the MapServerResponse (received from map server), // and returns the type of proof, and proofing result. -func VerifyProofByDomain(proof *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { - if proof.PoI.ProofType == mapCommon.PoP { - if !bytes.Equal(proof.DomainEntryID[:], proof.PoI.ProofValue) { +func VerifyProofByDomain(response *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { + if response.PoI.ProofType == mapCommon.PoP { + if !bytes.Equal(response.DomainEntry.DomainValue[:], response.PoI.ProofValue) { return 0, false, fmt.Errorf("different hash for value %s != %s", - hex.EncodeToString(proof.DomainEntryID[:]), - hex.EncodeToString(proof.PoI.ProofValue)) + hex.EncodeToString(response.DomainEntry.DomainID[:]), + hex.EncodeToString(response.PoI.ProofValue)) } - return mapCommon.PoP, trie.VerifyInclusion(proof.PoI.Root, proof.PoI.Proof, - common.SHA256Hash([]byte(proof.Domain)), proof.DomainEntryID[:]), nil + return mapCommon.PoP, trie.VerifyInclusion(response.PoI.Root, response.PoI.Proof, + response.DomainEntry.DomainID[:], response.DomainEntry.DomainValue[:]), nil } - return mapCommon.PoA, trie.VerifyNonInclusion(proof.PoI.Root, proof.PoI.Proof, - common.SHA256Hash([]byte(proof.Domain)), proof.PoI.ProofValue, proof.PoI.ProofKey), nil + return mapCommon.PoA, trie.VerifyNonInclusion(response.PoI.Root, response.PoI.Proof, + response.DomainEntry.DomainID[:], response.PoI.ProofValue, response.PoI.ProofKey), nil } diff --git a/pkg/mapserver/responder/deleteme.go b/pkg/mapserver/responder/deleteme.go index dd5c7f06..e6ba0998 100644 --- a/pkg/mapserver/responder/deleteme.go +++ b/pkg/mapserver/responder/deleteme.go @@ -31,7 +31,8 @@ func (mapResponder *OldMapResponder) GetDomainProofsTest(ctx context.Context, do } end2 := time.Now() for _, keyValuePair := range result { - domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value + // domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value + domainProofMap[keyValuePair.Key].DomainEntry.DomainCertsPayload = keyValuePair.Value } fmt.Println(len(domainResultMap), end.Sub(start), " ", end1.Sub(start1), " ", end2.Sub(start2)) diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go index af516fb5..84975e8d 100644 --- a/pkg/mapserver/responder/old_responder.go +++ b/pkg/mapserver/responder/old_responder.go @@ -9,7 +9,6 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/netsec-ethz/fpki/pkg/domain" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) @@ -120,105 +119,113 @@ func (mapResponder *OldMapResponder) Close() error { func (r *OldMapResponder) getProof(ctx context.Context, domainName string) ( []mapCommon.MapServerResponse, error) { - // check domain name first - domainList, err := domain.ParseDomainName(domainName) - if err != nil { - if err == domain.ErrInvalidDomainName { - return nil, err - } - return nil, fmt.Errorf("GetDomainProof | parseDomainName | %w", err) - } - proofsResult := make([]mapCommon.MapServerResponse, 0, len(domainList)) - - for _, domain := range domainList { - domainHash := common.SHA256Hash32Bytes([]byte(domain)) - - proof, isPoP, proofKey, ProofValue, err := r.smt.MerkleProof(ctx, domainHash[:]) - if err != nil { - return nil, fmt.Errorf("getDomainProof | MerkleProof | %w", err) - } - - var proofType mapCommon.ProofType - var payloadID *common.SHA256Output - domainBytes := []byte{} - // If it is PoP, query the domain entry. If it is PoA, directly return the PoA - if isPoP { - proofType = mapCommon.PoP - payloadID, domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) - if err != nil { - return nil, fmt.Errorf("GetDomainProof | %w", err) - } - } else { - proofType = mapCommon.PoA - } - - proofsResult = append(proofsResult, mapCommon.MapServerResponse{ - Domain: domain, - PoI: mapCommon.PoI{ - Proof: proof, - Root: r.smt.Root, - ProofType: proofType, - ProofKey: proofKey, - ProofValue: ProofValue}, - DomainEntryID: payloadID, - DomainEntryBytes: domainBytes, - TreeHeadSig: r.signedTreeHead, - }) - } - return proofsResult, nil + return nil, nil + // // check domain name first + // domainList, err := domain.ParseDomainName(domainName) + // if err != nil { + // if err == domain.ErrInvalidDomainName { + // return nil, err + // } + // return nil, fmt.Errorf("GetDomainProof | parseDomainName | %w", err) + // } + // proofsResult := make([]mapCommon.MapServerResponse, 0, len(domainList)) + + // for _, domain := range domainList { + // domainHash := common.SHA256Hash32Bytes([]byte(domain)) + + // proof, isPoP, proofKey, ProofValue, err := r.smt.MerkleProof(ctx, domainHash[:]) + // if err != nil { + // return nil, fmt.Errorf("getDomainProof | MerkleProof | %w", err) + // } + + // var proofType mapCommon.ProofType + // var payloadID *common.SHA256Output + // domainBytes := []byte{} + // // If it is PoP, query the domain entry. If it is PoA, directly return the PoA + // if isPoP { + // proofType = mapCommon.PoP + // payloadID, domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) + // if err != nil { + // return nil, fmt.Errorf("GetDomainProof | %w", err) + // } + // } else { + // proofType = mapCommon.PoA + // } + + // proofsResult = append(proofsResult, mapCommon.MapServerResponse{ + // Domain: domain, + // PoI: mapCommon.PoI{ + // Proof: proof, + // Root: r.smt.Root, + // ProofType: proofType, + // ProofKey: proofKey, + // ProofValue: ProofValue}, + // DomainEntryID: payloadID, + // DomainEntryBytes: domainBytes, + // TreeHeadSig: r.signedTreeHead, + // }) + // } + // + // return proofsResult, nil } -func (mapResponder *OldMapResponder) GetDomainProofs(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, error) { - domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | getMapping | %w", err) - } - - domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) - } - - result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) - if err != nil { - return nil, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) - } - for _, keyValuePair := range result { - domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value - } - - return domainResultMap, nil +func (mapResponder *OldMapResponder) GetDomainProofs(ctx context.Context, domainNames []string) ( + map[string][]*mapCommon.MapServerResponse, error) { + + return nil, nil + // domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) + // if err != nil { + // return nil, fmt.Errorf("GetDomainProofs | getMapping | %w", err) + // } + + // domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) + // if err != nil { + // return nil, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) + // } + + // result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) + // if err != nil { + // return nil, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) + // } + // for _, keyValuePair := range result { + // domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value + // } + + // return domainResultMap, nil } -func getMapping(domainNames []string, signedTreeHead []byte) (map[string][]*mapCommon.MapServerResponse, map[common.SHA256Output]*mapCommon.MapServerResponse, error) { - domainResultMap := make(map[string][]*mapCommon.MapServerResponse) - domainProofMap := make(map[common.SHA256Output]*mapCommon.MapServerResponse) - - for _, domainName := range domainNames { - _, ok := domainResultMap[domainName] - if !ok { - // list of proofs for this domain - resultsList := []*mapCommon.MapServerResponse{} - subDomainNames, err := domain.ParseDomainName(domainName) - - if err != nil { - return nil, nil, fmt.Errorf("getMapping | parseDomainName | %w", err) - } - for _, subDomainName := range subDomainNames { - var domainHash32Bytes common.SHA256Output - copy(domainHash32Bytes[:], common.SHA256Hash([]byte(subDomainName))) - subDomainResult, ok := domainProofMap[domainHash32Bytes] - if ok { - resultsList = append(resultsList, subDomainResult) - } else { - domainProofMap[domainHash32Bytes] = &mapCommon.MapServerResponse{Domain: subDomainName, TreeHeadSig: signedTreeHead} - resultsList = append(resultsList, domainProofMap[domainHash32Bytes]) - } - } - domainResultMap[domainName] = resultsList - } - } - return domainResultMap, domainProofMap, nil +func getMapping(domainNames []string, signedTreeHead []byte) ( + map[string][]*mapCommon.MapServerResponse, map[common.SHA256Output]*mapCommon.MapServerResponse, error) { + + return nil, nil, nil + // domainResultMap := make(map[string][]*mapCommon.MapServerResponse) + // domainProofMap := make(map[common.SHA256Output]*mapCommon.MapServerResponse) + + // for _, domainName := range domainNames { + // _, ok := domainResultMap[domainName] + // if !ok { + // // list of proofs for this domain + // resultsList := []*mapCommon.MapServerResponse{} + // subDomainNames, err := domain.ParseDomainName(domainName) + + // if err != nil { + // return nil, nil, fmt.Errorf("getMapping | parseDomainName | %w", err) + // } + // for _, subDomainName := range subDomainNames { + // var domainHash32Bytes common.SHA256Output + // copy(domainHash32Bytes[:], common.SHA256Hash([]byte(subDomainName))) + // subDomainResult, ok := domainProofMap[domainHash32Bytes] + // if ok { + // resultsList = append(resultsList, subDomainResult) + // } else { + // domainProofMap[domainHash32Bytes] = &mapCommon.MapServerResponse{Domain: subDomainName, TreeHeadSig: signedTreeHead} + // resultsList = append(resultsList, domainProofMap[domainHash32Bytes]) + // } + // } + // domainResultMap[domainName] = resultsList + // } + // } + // return domainResultMap, domainProofMap, nil } func (mapResponder *OldMapResponder) getProofFromSMT(ctx context.Context, diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index bb84b2d8..a4597515 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -111,7 +111,7 @@ func TestOldResponderWithPoP(t *testing.T) { require.NoError(t, err) for _, r := range responses { - t.Logf("%v : %s", r.PoI.ProofType, r.Domain) + t.Logf("%v : %s", r.PoI.ProofType, r.DomainEntry.DomainName) } require.NotEmpty(t, responses) @@ -196,13 +196,13 @@ func checkProofOld(t *testing.T, cert ctx509.Certificate, proofs []mapcommon.Map require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found for %s", cert.Subject.CommonName) for _, proof := range proofs { - require.Contains(t, cert.Subject.CommonName, proof.Domain) + require.Contains(t, cert.Subject.CommonName, proof.DomainEntry.DomainName) proofType, isCorrect, err := prover.VerifyProofByDomainOld(proof) require.NoError(t, err) require.True(t, isCorrect) if proofType == mapcommon.PoA { - require.Empty(t, proof.DomainEntryBytes) + // require.Empty(t, proof.DomainEntryBytes) } if proofType == mapcommon.PoP { // // get the correct CA entry diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index df6b7d27..66b8f8d7 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -60,19 +60,30 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, } // If it is a proof of presence, obtain the payload. - var payloadID *common.SHA256Output - var payload []byte + de := &mapCommon.DomainEntry{ + DomainName: domainPart, + DomainID: &domainPartID, + } proofType := mapCommon.PoA if isPoP { proofType = mapCommon.PoP - payloadID, payload, err = r.conn.RetrieveDomainEntry(ctx, domainPartID) + de.DomainCertsPayloadID, de.DomainCertsPayload, err = + r.conn.RetrieveDomainCertificatesPayload(ctx, domainPartID) + if err != nil { + return nil, fmt.Errorf("error obtaining x509 payload for %s: %w", domainPart, err) + } + de.DomainPoliciesPayloadID, de.DomainPoliciesPayload, err = + r.conn.RetrieveDomainPoliciesPayload(ctx, domainPartID) if err != nil { - return nil, fmt.Errorf("error obtaining payload for %s: %w", domainPart, err) + return nil, fmt.Errorf("error obtaining policies payload for %s: %w", + domainPart, err) } + // deleteme change this to sha(certIDs || polIDs) + de.DomainValue = de.DomainCertsPayloadID } proofList[i] = &mapCommon.MapServerResponse{ - Domain: domainPart, + DomainEntry: de, PoI: mapCommon.PoI{ ProofType: proofType, Proof: proof, @@ -80,8 +91,6 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, ProofKey: proofKey, ProofValue: proofValue, }, - DomainEntryID: payloadID, - DomainEntryBytes: payload, // TreeHeadSig: , TODO(juagargi) } } diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 504a7d99..6e275f83 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -31,10 +31,10 @@ func TestProofWithPoP(t *testing.T) { // Create a new DB with that name. On exiting the function, it will be removed. err := tests.CreateTestDB(ctx, dbName) require.NoError(t, err) - defer func() { - err = tests.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // defer func() { + // err = tests.RemoveTestDB(ctx, config) + // require.NoError(t, err) + // }() // Connect to the DB. conn, err := mysql.Connect(config) @@ -113,18 +113,17 @@ func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapS require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found for \"%s\"", domain.CertSubjectName(cert)) for _, proof := range proofs { - includesDomainName(t, proof.Domain, cert) + includesDomainName(t, proof.DomainEntry.DomainName, cert) proofType, isCorrect, err := prover.VerifyProofByDomain(proof) require.NoError(t, err) require.True(t, isCorrect) if proofType == mapcommon.PoA { - require.Empty(t, proof.DomainEntryBytes) + require.Empty(t, proof.DomainEntry.DomainCertsPayload) + require.Empty(t, proof.DomainEntry.DomainPoliciesPayload) } if proofType == mapcommon.PoP { - domainEntry, err := mapcommon.DeserializeDomainEntry(proof.DomainEntryBytes) - require.NoError(t, err) - certs, err := util.DeserializeCertificates(domainEntry.DomainCerts) + certs, err := util.DeserializeCertificates(proof.DomainEntry.DomainCertsPayload) require.NoError(t, err) // The certificate must be present. for _, c := range certs { diff --git a/pkg/mapserver/trie/trie.go b/pkg/mapserver/trie/trie.go index 23023fcf..fa82f558 100644 --- a/pkg/mapserver/trie/trie.go +++ b/pkg/mapserver/trie/trie.go @@ -73,11 +73,10 @@ func (s *Trie) Close() error { return s.db.Store.Close() } -// Update adds and deletes a sorted list of keys and their values to the trie +// Update adds and deletes a sorted list of keys and their values to the trie. // Adding and deleting can be simultaneous. // To delete, set the value to DefaultLeaf. -// If Update is called multiple times, only the state after the last update -// is committed. +// If Update is called multiple times, only the state after the last update is committed. func (s *Trie) Update(ctx context.Context, keys, values [][]byte) ([]byte, error) { if len(keys) != len(values) { return nil, fmt.Errorf("key value size does not match") diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 2ea14e94..2abb6e63 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -18,8 +18,8 @@ import ( type uniqueSet map[common.SHA256Output]struct{} type uniqueStringSet map[string]struct{} -// UpdateDomainEntriesTableUsingCerts: Update the domain entries using the domain certificates -func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( +// DeletemeUpdateDomainEntriesTableUsingCerts: Update the domain entries using the domain certificates +func (mapUpdater *MapUpdater) DeletemeUpdateDomainEntriesTableUsingCerts( ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate, @@ -44,7 +44,7 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( // retrieve (possibly)affected domain entries from db // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, err := mapUpdater.retrieveAffectedDomainFromDB(ctx, affectedDomainsSet) + domainEntriesMap, err := mapUpdater.deletemeRetrieveAffectedDomainFromDB(ctx, affectedDomainsSet) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | %w", err) } @@ -67,7 +67,7 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCerts( } // serialized the domainEntry -> key-value pair - keyValuePairs, err := SerializeUpdatedDomainEntries(domainEntriesToWrite) + keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | serializeUpdatedDomainEntries | %w", err) } @@ -194,14 +194,14 @@ func GetDomainEntriesToWrite(updatedDomain uniqueSet, return result, nil } -// SerializeUpdatedDomainEntries: serialize the updated domains -func SerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.DomainEntry) ( +// DeletemeSerializeUpdatedDomainEntries: serialize the updated domains +func DeletemeSerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.DomainEntry) ( []*db.KeyValuePair, error) { result := make([]*db.KeyValuePair, 0, len(domains)) for domainNameHash, domainEntry := range domains { - domainBytes, err := mcommon.SerializeDomainEntry(domainEntry) + domainBytes, err := mcommon.DeletemeSerializeDomainEntry(domainEntry) if err != nil { return nil, fmt.Errorf("serializeUpdatedDomainEntries | SerializedDomainEntry | %w", err) } diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 26e45bc6..488e9a06 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -124,7 +124,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { require.NoError(t, err) // serialized the domainEntry -> key-value pair - _, err = SerializeUpdatedDomainEntries(domainEntriesToWrite) + _, err = DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) require.NoError(t, err) } diff --git a/pkg/mapserver/updater/dbutil.go b/pkg/mapserver/updater/dbutil.go index efd90bbe..c3bcf77c 100644 --- a/pkg/mapserver/updater/dbutil.go +++ b/pkg/mapserver/updater/dbutil.go @@ -17,8 +17,8 @@ type dbResult struct { err error } -// retrieveAffectedDomainFromDB: get affected domain entries from db -func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDB(ctx context.Context, +// deletemeRetrieveAffectedDomainFromDB: get affected domain entries from db +func (mapUpdater *MapUpdater) deletemeRetrieveAffectedDomainFromDB(ctx context.Context, affectedDomainsSet uniqueSet) (map[common.SHA256Output]*mapCommon.DomainEntry, error) { // XXX(juagargi) review why passing a set (we need to convert it to a slice) @@ -65,7 +65,7 @@ func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDB(ctx context.Context, //fmt.Println(len(domainEntries)) // parse the key-value pair -> domain map - domainEntriesMap, err := parseDomainBytes(domainEntries) + domainEntriesMap, err := deletemeParseDomainBytes(domainEntries) if err != nil { return nil, fmt.Errorf("retrieveAffectedDomainFromDB | %w", err) } @@ -96,7 +96,7 @@ type parserResult struct { } // domain bytes -> domain entries -func parseDomainBytes(domainEntries []*db.KeyValuePair) ( +func deletemeParseDomainBytes(domainEntries []*db.KeyValuePair) ( map[common.SHA256Output]*mapCommon.DomainEntry, error) { /* unique := make(map[[32]byte]byte) @@ -125,7 +125,7 @@ func parseDomainBytes(domainEntries []*db.KeyValuePair) ( entries := []*mapCommon.DomainEntry{} keys := [][32]byte{} for _, entry := range domainBytes { - newPair, err := mapCommon.DeserializeDomainEntry(entry.Value) + newPair, err := mapCommon.DeletemeDeserializeDomainEntry(entry.Value) if err != nil { resultChan <- parserResult{err: err} } diff --git a/pkg/mapserver/updater/deleteme.go b/pkg/mapserver/updater/deleteme.go index e644e09d..eb4cb03f 100644 --- a/pkg/mapserver/updater/deleteme.go +++ b/pkg/mapserver/updater/deleteme.go @@ -56,7 +56,7 @@ func (mapUpdater *MapUpdater) updateCertsReturnTime(ctx context.Context, certs [ return nil, nil, []*db.KeyValuePair{}, []*db.KeyValuePair{}, 0 } - _, _, err = KeyValuePairToSMTInput(keyValuePairs) + _, _, err = keyValuePairToSMTInput(keyValuePairs) if err != nil { return nil, fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err), nil, nil, 0 } @@ -137,7 +137,7 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCertsReturnTime(ctx c //fmt.Println(" domain entries size: ", readSize/1024/1024, " MB") // serialized the domainEntry -> key-value pair - keyValuePairs, err := SerializeUpdatedDomainEntries(domainEntriesToWrite) + keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) if err != nil { return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | serializeUpdatedDomainEntries | %w", err), nil, nil } @@ -235,7 +235,7 @@ func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDBReturnReadDomains(ctx //fmt.Println(len(domainEntries)) // parse the key-value pair -> domain map - domainEntriesMap, err := parseDomainBytes(domainEntries) + domainEntriesMap, err := deletemeParseDomainBytes(domainEntries) if err != nil { return nil, nil, fmt.Errorf("retrieveAffectedDomainFromDB | %w", err) } diff --git a/pkg/mapserver/updater/hash.go b/pkg/mapserver/updater/hash.go index e5362f81..36c93cab 100644 --- a/pkg/mapserver/updater/hash.go +++ b/pkg/mapserver/updater/hash.go @@ -1,43 +1,30 @@ package updater -import ( - "bytes" - "fmt" - "sort" - - "github.com/netsec-ethz/fpki/pkg/common" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - // UpdateInput: key-value pair for updating // key: hash of domain name // value: hash of serilised DomainEntry -type UpdateInput struct { - Key [32]byte - Value []byte -} -// HashDomainEntriesThenSort: hash the DomainEntry, then sort them according to key -func HashDomainEntriesThenSort(domainEntries []mapCommon.DomainEntry) ([]UpdateInput, error) { - result := make([]UpdateInput, 0, len(domainEntries)) - for _, v := range domainEntries { - domainEntryBytes, err := mapCommon.SerializeDomainEntry(&v) - if err != nil { - return nil, fmt.Errorf("HashDomainEntriesThenSort | SerializedDomainEntry | %w", err) - } - var domainHash common.SHA256Output - copy(domainHash[:], common.SHA256Hash([]byte(v.DomainName))) - hashInput := UpdateInput{ - Key: domainHash, - Value: common.SHA256Hash(domainEntryBytes), - } - result = append(result, hashInput) - } +// // DeletemeHashDomainEntriesThenSort: hash the DomainEntry, then sort them according to key +// func DeletemeHashDomainEntriesThenSort(domainEntries []mapCommon.DomainEntry) ([]UpdateInput, error) { +// result := make([]UpdateInput, 0, len(domainEntries)) +// for _, v := range domainEntries { +// domainEntryBytes, err := mapCommon.SerializeDomainEntry(&v) +// if err != nil { +// return nil, fmt.Errorf("HashDomainEntriesThenSort | SerializedDomainEntry | %w", err) +// } +// var domainHash common.SHA256Output +// copy(domainHash[:], common.SHA256Hash([]byte(v.DomainName))) +// hashInput := UpdateInput{ +// Key: domainHash, +// Value: common.SHA256Hash(domainEntryBytes), +// } +// result = append(result, hashInput) +// } - // sort according to key - sort.Slice(result, func(i, j int) bool { - return bytes.Compare(result[i].Key[:], result[j].Key[:]) == -1 - }) +// // sort according to key +// sort.Slice(result, func(i, j int) bool { +// return bytes.Compare(result[i].Key[:], result[j].Key[:]) == -1 +// }) - return result, nil -} +// return result, nil +// } diff --git a/pkg/mapserver/updater/rpc_updater.go b/pkg/mapserver/updater/rpc_updater.go index 1cd435d1..1ac48a46 100644 --- a/pkg/mapserver/updater/rpc_updater.go +++ b/pkg/mapserver/updater/rpc_updater.go @@ -16,8 +16,8 @@ type newUpdates struct { pc []*projectCommon.SP } -// UpdateDomainEntriesTableUsingRPCAndPC: update the domain entries table, given RPC and PC -func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingRPCAndPC(ctx context.Context, +// DeletemeUpdateDomainEntriesTableUsingRPCAndPC: update the domain entries table, given RPC and PC +func (mapUpdater *MapUpdater) DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx context.Context, rpc []*projectCommon.RPC, pc []*projectCommon.SP, readerNum int) ( []*db.KeyValuePair, int, error) { @@ -32,7 +32,7 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingRPCAndPC(ctx context. // retrieve (possibly)affected domain entries from db // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, err := mapUpdater.retrieveAffectedDomainFromDB(ctx, affectedDomainsMap) + domainEntriesMap, err := mapUpdater.deletemeRetrieveAffectedDomainFromDB(ctx, affectedDomainsMap) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | retrieveAffectedDomainFromDB | %w", err) } @@ -50,7 +50,7 @@ func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingRPCAndPC(ctx context. } // serialize the domainEntry -> key-value pair - keyValuePairs, err := SerializeUpdatedDomainEntries(domainEntriesToWrite) + keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) if err != nil { return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | serializeUpdatedDomainEntries | %w", err) } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 5e163e1c..e08246b8 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -99,7 +99,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // updateCerts: update the tables and SMT (in memory) using certificates func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { - keyValuePairs, numOfUpdates, err := mapUpdater.UpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) + keyValuePairs, numOfUpdates, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) if err != nil { return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) } else if numOfUpdates == 0 { @@ -110,7 +110,7 @@ func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.C return nil } - keyInput, valueInput, err := KeyValuePairToSMTInput(keyValuePairs) + keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) if err != nil { return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) } @@ -141,7 +141,7 @@ func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList // updateRPCAndPC: update the tables and SMT (in memory) using PC and RPC func (mapUpdater *MapUpdater) updateRPCAndPC(ctx context.Context, pcList []*common.SP, rpcList []*common.RPC) error { // update the domain and - keyValuePairs, _, err := mapUpdater.UpdateDomainEntriesTableUsingRPCAndPC(ctx, rpcList, pcList, 10) + keyValuePairs, _, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx, rpcList, pcList, 10) if err != nil { return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingRPCAndPC | %w", err) } @@ -150,7 +150,7 @@ func (mapUpdater *MapUpdater) updateRPCAndPC(ctx context.Context, pcList []*comm return nil } - keyInput, valueInput, err := KeyValuePairToSMTInput(keyValuePairs) + keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) if err != nil { return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) } @@ -187,17 +187,24 @@ func (mapUpdater *MapUpdater) fetchUpdatedDomainHash(ctx context.Context) ([]com return keys, nil } -// KeyValuePairToSMTInput: key value pair -> SMT update input -func KeyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ([][]byte, [][]byte, error) { - updateInput := make([]UpdateInput, 0, len(keyValuePair)) - +// keyValuePairToSMTInput: key value pair -> SMT update input +// deleteme: this function takes the payload and computes the hash of it. The hash is already +// stored in the DB with the new design: change both the function RetrieveDomainEntries and +// remove the hashing from this keyValuePairToSMTInput function. +func keyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ([][]byte, [][]byte, error) { + type inputPair struct { + Key [32]byte + Value []byte + } + updateInput := make([]inputPair, 0, len(keyValuePair)) for _, pair := range keyValuePair { - updateInput = append(updateInput, UpdateInput{ + updateInput = append(updateInput, inputPair{ Key: pair.Key, - Value: common.SHA256Hash(pair.Value), + Value: common.SHA256Hash(pair.Value), // Compute SHA256 of the payload. }) } + // Sorting is important, as the Trie.Update function expects the keys in sorted order. sort.Slice(updateInput, func(i, j int) bool { return bytes.Compare(updateInput[i].Key[:], updateInput[j].Key[:]) == -1 }) @@ -299,7 +306,7 @@ func UpdateSMTfromDomains( if err != nil { return err } - keys, values, err := KeyValuePairToSMTInput(entries) + keys, values, err := keyValuePairToSMTInput(entries) if err != nil { return err } diff --git a/pkg/mapserver/updater/updater_test_adapter.go b/pkg/mapserver/updater/updater_test_adapter.go index 82d5a26f..2a70561c 100644 --- a/pkg/mapserver/updater/updater_test_adapter.go +++ b/pkg/mapserver/updater/updater_test_adapter.go @@ -27,7 +27,7 @@ func (u *UpdaterTestAdapter) UpdateCerts(ctx context.Context, certs []*ctx509.Ce func (u *UpdaterTestAdapter) UpdateDomainEntriesUsingCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate, readerNum int) ([]*db.KeyValuePair, int, error) { - return (*MapUpdater)(u).UpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) + return (*MapUpdater)(u).DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) } func (a *UpdaterTestAdapter) FetchUpdatedDomainHash(ctx context.Context) ( @@ -38,7 +38,7 @@ func (a *UpdaterTestAdapter) FetchUpdatedDomainHash(ctx context.Context) ( func (a *UpdaterTestAdapter) KeyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ( [][]byte, [][]byte, error) { - return KeyValuePairToSMTInput(keyValuePair) + return keyValuePairToSMTInput(keyValuePair) } func (a *UpdaterTestAdapter) SMT() *trie.Trie { diff --git a/pkg/tests/mockdb_for_testing.go b/pkg/tests/mockdb_for_testing.go index 2d400841..1e0ec5fc 100644 --- a/pkg/tests/mockdb_for_testing.go +++ b/pkg/tests/mockdb_for_testing.go @@ -61,13 +61,19 @@ func (d *MockDB) RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ( return d.TreeTable[id], nil } -func (d *MockDB) RetrieveDomainEntry(ctx context.Context, key common.SHA256Output) ( +func (d *MockDB) RetrieveDomainCertificatesPayload(ctx context.Context, key common.SHA256Output) ( *common.SHA256Output, []byte, error) { id := common.SHA256Hash32Bytes(d.DomainEntriesTable[key]) return &id, d.DomainEntriesTable[key], nil } +func (d *MockDB) RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( + payloadID *common.SHA256Output, payload []byte, err error) { + + return nil, nil, nil +} + func (d *MockDB) RetrieveKeyValuePairTreeStruct(ctx context.Context, id []common.SHA256Output, numOfRoutine int) ([]*db.KeyValuePair, error) { result := []*db.KeyValuePair{} diff --git a/tests/integration/db/db.go b/tests/integration/db/db.go index 27862e8f..7ad21ae1 100644 --- a/tests/integration/db/db.go +++ b/tests/integration/db/db.go @@ -274,14 +274,14 @@ func testDomainEntriesTable() { // ***************************************************************** // check if value is correctly inserted - // RetrieveDomainEntry() + // RetrieveDomainCertificatesPayload() // ***************************************************************** keys := getKeyPtrs(1511, 4555) prevKeySize := len(keys) result := make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - _, value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -303,7 +303,7 @@ func testDomainEntriesTable() { result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - _, value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } @@ -346,7 +346,7 @@ func testDomainEntriesTable() { result = make([]*db.KeyValuePair, 0, len(keys)) for _, key := range keys { - _, value, err := conn.RetrieveDomainEntry(ctx, *key) + _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) if err != nil && err != sql.ErrNoRows { panic(err) } diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go index b0ba9604..66cf6b70 100644 --- a/tests/integration/mapserver/main.go +++ b/tests/integration/mapserver/main.go @@ -166,7 +166,7 @@ func getSomeDataPointsToTest(ctx context.Context, config *db.Configuration) []Da for i, name := range names { data[i].Name = name ID := common.SHA256Hash32Bytes([]byte(name)) - payload, err := conn.RetrieveDomainEntry(ctx, ID) + _, payload, err := conn.RetrieveDomainCertificatesPayload(ctx, ID) panicIfError(err) // payload contains several certificates. data[i].Certs, err = ctx509.ParseCertificates(payload) From 36775ba435ff2801b990cd46d84a1c5fd8b17ba0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 26 Apr 2023 13:47:02 +0200 Subject: [PATCH 091/187] ToJSON FromJSON also accept lists. --- pkg/common/json.go | 92 ++++++++++++++++++++++++++++++++++++ pkg/common/json_test.go | 13 +++++ pkg/common/structure_test.go | 81 +++++++++++++++---------------- 3 files changed, 146 insertions(+), 40 deletions(-) diff --git a/pkg/common/json.go b/pkg/common/json.go index 29215b29..1036aa16 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" "io/ioutil" + "reflect" "github.com/google/trillian" trilliantypes "github.com/google/trillian/types" @@ -133,7 +134,21 @@ func ToJSON(o any) ([]byte, error) { r.T = "[]trillian.Proof" case *trilliantypes.LogRootV1: r.T = "LogRootV1" + case listOfMarshallable: + r.T = "[]" default: + if t := reflect.TypeOf(o); t.Kind() == reflect.Slice { + // If slice, try to serialize all elements inside, then write its type as slice. + s := reflect.ValueOf(o) + listOfAny := make([]any, s.Len()) + for i := 0; i < len(listOfAny); i++ { + listOfAny[i] = s.Index(i).Interface() + } + b, err := ToJSON(listOfMarshallable{ + List: listOfAny, + }) + return b, err + } return nil, fmt.Errorf("unrecognized type %T", o) } @@ -236,6 +251,18 @@ func FromJSON(data []byte) (any, error) { return nil, fmt.Errorf("unmarshalling internal type: %w", err) } return typeAndValue.O, nil + case "[]": + // This is a special case. We have received a list of "things" that should be + // deserializable again with FromJSON. Deserialize this object of type listOfMarshallable + // and return all its internal objects + typeAndValue := struct { + T string + O listOfMarshallable + }{} + if err := json.Unmarshal(data, &typeAndValue); err != nil { + return nil, fmt.Errorf("unmarshalling internal type: %w", err) + } + return typeAndValue.O.List, nil default: return nil, fmt.Errorf("unmarshalling internal type: bad type \"%s\"", typeOnly.T) } @@ -263,3 +290,68 @@ func FromJSONFile(filePath string) (any, error) { return FromJSON(data) } + +// marshallableObject is used only on deserialization. A list of these objects is read from the +// JSON and should be parsed using FromJSON(). See listOfMarshallable.UnmarshalJSON. +type marshallableObject struct { + O any +} + +func (o marshallableObject) MarshalJSON() ([]byte, error) { + return ToJSON(o.O) +} + +func (o *marshallableObject) UnmarshalJSON(b []byte) error { + obj, err := FromJSON(b) + o.O = obj + return err +} + +// listOfMarshallable is used to allow (de)serialization (from)to JSON. When a list of our +// types is to be serialized, a list of these objects is created instead (see ToJSON). +type listOfMarshallable struct { + List []any +} + +// MarshalJSON serializes to JSON a list of objects than can be convertible to JSON via +// the method ToJSON. +func (l listOfMarshallable) MarshalJSON() ([]byte, error) { + payloads := make([][]byte, len(l.List)) + for i, e := range l.List { + b, err := ToJSON(e) + if err != nil { + return nil, fmt.Errorf("cannot marshal list to JSON, elem at %d failed with error: %s", + i, err) + } + payloads[i] = b + } + // this list in JSON consists in the type and then the ToJSON elements. + payload := []byte(`{"List":[`) + for _, p := range payloads { + payload = append(payload, p...) + payload = append(payload, []byte(`,`)...) + } + // Remove last "," + payload = payload[:len(payload)-1] + // Close list and close object itself. + payload = append(payload, []byte(`]}`)...) + + return payload, nil +} + +func (l *listOfMarshallable) UnmarshalJSON(b []byte) error { + // Deserialize an object with a "List" field that will use FromJSON for its elements. + tempObject := struct { + List []marshallableObject + }{} + err := json.Unmarshal(b, &tempObject) + if err != nil { + return err + } + // Take the list with wrapped objects and unwrap them to this list. + l.List = make([]any, len(tempObject.List)) + for i, o := range tempObject.List { + l.List[i] = o.O + } + return nil +} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 1972720a..7cf09b0d 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -178,6 +178,19 @@ func TestToFromJSON(t *testing.T) { Metadata: generateRandomBytes(), }, }, + "slice_of_SP": { + data: []any{ + randomSPT(), + randomSPT(), + }, + }, + "slice_of_many_things": { + data: []any{ + randomSPT(), + randomRPC(), + randomSP(), + }, + }, } for name, tc := range cases { diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index 641acec2..095a95d5 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -1,6 +1,7 @@ package common import ( + "math/rand" "os" "path" "reflect" @@ -166,35 +167,10 @@ func TestSingleObject(t *testing.T) { }, }, "rpc": { - data: &RPC{ - SerialNumber: 1729381, - Subject: "bad domain", - Version: 1729381, - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), - NotBefore: nowWithoutMonotonic(), - NotAfter: nowWithoutMonotonic(), - CAName: "bad domain", - SignatureAlgorithm: SHA256, - TimeStamp: nowWithoutMonotonic(), - PRCSignature: generateRandomBytes(), - CASignature: generateRandomBytes(), - SPTs: []SPT{*randomSPT(), *randomSPT()}, - }, + data: randomRPC(), }, "spt": { - data: &SPT{ - Version: 11, - Subject: "hihihihihhi", - CAName: "this is the CA name", - LogID: 42, - CertType: 0x11, - AddedTS: time.Unix(1234, 0), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 131678, - Signature: generateRandomBytes(), - }, + data: randomSPT(), }, "sprt": { data: &SPRT{ @@ -214,19 +190,7 @@ func TestSingleObject(t *testing.T) { }, }, "sp": { - data: &SP{ - Policies: Policy{ - TrustedCA: []string{"one CA", "another CA"}, - AllowedSubdomains: []string{"sub1.com", "sub2.com"}, - }, - TimeStamp: nowWithoutMonotonic(), - Subject: "sp subject", - CAName: "one CA", - SerialNumber: 1234, - CASignature: generateRandomBytes(), - RootCertSignature: generateRandomBytes(), - SPTs: []SPT{*randomSPT(), *randomSPT(), *randomSPT()}, - }, + data: randomSP(), }, "psr": { data: &PSR{ @@ -261,6 +225,43 @@ func TestSingleObject(t *testing.T) { } } +func randomRPC() *RPC { + return &RPC{ + SerialNumber: 1729381, + Subject: "bad domain", + Version: 1729381, + PublicKeyAlgorithm: RSA, + PublicKey: generateRandomBytes(), + NotBefore: nowWithoutMonotonic(), + NotAfter: nowWithoutMonotonic(), + CAName: "bad domain", + SignatureAlgorithm: SHA256, + TimeStamp: nowWithoutMonotonic(), + PRCSignature: generateRandomBytes(), + CASignature: generateRandomBytes(), + SPTs: []SPT{*randomSPT(), *randomSPT()}, + } +} + +func randomSP() *SP { + return &SP{ + Policies: Policy{ + TrustedCA: []string{"ca1", "ca2"}, + }, + TimeStamp: nowWithoutMonotonic(), + Subject: "domainname.com", + CAName: "ca1", + SerialNumber: rand.Int(), + CASignature: generateRandomBytes(), + RootCertSignature: generateRandomBytes(), + SPTs: []SPT{ + *randomSPT(), + *randomSPT(), + *randomSPT(), + }, + } +} + func randomSPT() *SPT { return &SPT{ Version: 12368713, From 68642aaffbd5a2878869481dbb024b56267dccd4 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Sat, 29 Apr 2023 12:01:23 +0200 Subject: [PATCH 092/187] New JSON functions. --- pkg/common/crypto.go | 2 +- pkg/common/json.go | 437 ++++++++++++++++------------------- pkg/common/json_test.go | 109 +++++---- pkg/common/structure.go | 26 +++ pkg/common/structure_test.go | 115 +++------ 5 files changed, 313 insertions(+), 376 deletions(-) diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index 7fc0d207..576f82c9 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -26,7 +26,7 @@ const ( ) // SignStructRSASHA256: generate a signature using SHA256 and RSA -func SignStructRSASHA256(s interface{}, privKey *rsa.PrivateKey) ([]byte, error) { +func SignStructRSASHA256(s any, privKey *rsa.PrivateKey) ([]byte, error) { bytes, err := ToJSON(s) if err != nil { return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) diff --git a/pkg/common/json.go b/pkg/common/json.go index 1036aa16..c3cc6f76 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -10,31 +10,210 @@ import ( trilliantypes "github.com/google/trillian/types" ) -func JSONToPoI(poiBytes []byte) ([]*trillian.Proof, error) { - po, err := FromJSON(poiBytes) +type serializableObjectBase struct { + O any +} + +func ToJSON(obj any) ([]byte, error) { + if _, ok := obj.(serializableObjectBase); !ok { + obj = serializableObjectBase{ + O: obj, + } + } + return json.Marshal(obj) +} + +func FromJSON(data []byte) (any, error) { + var base serializableObjectBase + err := json.Unmarshal(data, &base) + return base.O, err +} + +func (o serializableObjectBase) MarshalJSON() ([]byte, error) { + T, O, err := o.marshalJSON(o.O) if err != nil { - return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) + return nil, err } - result, ok := po.([]*trillian.Proof) - if !ok { - return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) + + tmp := struct { + T string + O json.RawMessage + }{ + T: T, + O: O, } - return result, nil + return json.Marshal(tmp) } -// JSONToLogRoot: Bytes -> log root in json -func JSONToLogRoot(logRootBytes []byte) (*trilliantypes.LogRootV1, error) { - po, err := FromJSON(logRootBytes) +// marshalJSON returns two components matching T and O: the Type (string) and the payload of O. +func (*serializableObjectBase) marshalJSON(obj any) (string, []byte, error) { + var T string + switch obj.(type) { + case RCSR: + T = "rcsr" + case RPC: + T = "rpc" + case PCRevocation: + T = "rev" + case SP: + T = "sp" + case SPT: + T = "spt" + case SPRT: + T = "sprt" + case PSR: + T = "psr" + case trillian.Proof: + T = "trillian.Proof" + case trilliantypes.LogRootV1: + T = "logrootv1" + default: + valOf := reflect.ValueOf(obj) + switch valOf.Type().Kind() { + case reflect.Pointer: + // Dereference and convert to "any". + T, O, err := (*serializableObjectBase)(nil).marshalJSON(valOf.Elem().Interface()) + return fmt.Sprintf("*%s", T), O, err + case reflect.Slice: + // A slice. Serialize each item and also serialize the slice itself. + children := make([]json.RawMessage, valOf.Len()) + for i := 0; i < len(children); i++ { + v := valOf.Index(i).Interface() + b, err := ToJSON(v) + if err != nil { + return "", nil, fmt.Errorf("marshaling slice, element %d failed: %w", i, err) + } + children[i] = b + } + data, err := json.Marshal(children) + return "[]", data, err + default: + return "", nil, fmt.Errorf("unknown type %T", obj) + } + } + data, err := json.Marshal(obj) + return T, data, err +} + +func (o *serializableObjectBase) UnmarshalJSON(data []byte) error { + tmp := struct { + T string + O json.RawMessage + }{} + + err := json.Unmarshal(data, &tmp) if err != nil { - return nil, fmt.Errorf("JsonBytesToLogRoot | Unmarshal | %w", err) + return err } - result, ok := po.(*trilliantypes.LogRootV1) + // Parse the T,O that we received. + ok, obj, err := unmarshalTypeObject(tmp.T, tmp.O) if !ok { - return nil, fmt.Errorf("JsonFileToLogRoot | object is %T", po) + if len(tmp.T) > 0 && tmp.T[0] == '*' { + // Pointer, try again just once. + tmp.T = tmp.T[1:] + _, obj, err = unmarshalTypeObject(tmp.T, tmp.O) + // Now convert to a pointer to the original object. + objPtr := reflect.New(reflect.TypeOf(obj)) + objPtr.Elem().Set(reflect.ValueOf(obj)) // assign original object + obj = objPtr.Interface() + } } - return result, nil + o.O = obj + return err } +// unmarshalTypeObject returns true if the function understood the type in T, and the object with +// the specific type represented by T. +func unmarshalTypeObject(T string, data []byte) (bool, any, error) { + var obj any + var err error + switch T { + case "[]": + // There is a slice of objects beneath this object. + var tmp []json.RawMessage + err = json.Unmarshal(data, &tmp) + if err != nil { + err = fmt.Errorf("unmarshaling slice, object doesn't seem to be a slice: %w", err) + } + if err == nil { + list := make([]any, len(tmp)) + obj = list + for i, objData := range tmp { + // Is this an embedded SerializableObjectBase? + tmp := serializableObjectBase{} + err = json.Unmarshal(objData, &tmp) + if err != nil { + err = fmt.Errorf("unmarshaling slice, element at %d failed: %w", i, err) + break + } + list[i] = tmp.O + } + } + case "rcsr": + obj, err = inflateObj[RCSR](data) + case "rpc": + obj, err = inflateObj[RPC](data) + case "rev": + obj, err = inflateObj[PCRevocation](data) + case "sp": + obj, err = inflateObj[SP](data) + case "spt": + obj, err = inflateObj[SPT](data) + case "sprt": + obj, err = inflateObj[SPRT](data) + case "psr": + obj, err = inflateObj[PSR](data) + case "trillian.Proof": + obj, err = inflateObj[trillian.Proof](data) + case "logrootv1": + obj, err = inflateObj[trilliantypes.LogRootV1](data) + default: + err = fmt.Errorf("unknown type represented by \"%s\"", T) + obj = nil + } + return obj != nil, obj, err +} + +func inflateObj[T any](data []byte) (any, error) { + var tmp T + err := json.Unmarshal(data, &tmp) + return tmp, err +} + +// +// +// +// +// +// +// +// + +// func JSONToPoI(poiBytes []byte) ([]*trillian.Proof, error) { +// po, err := FromJSON(poiBytes) +// if err != nil { +// return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) +// } +// result, ok := po.([]*trillian.Proof) +// if !ok { +// return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) +// } +// return result, nil +// } + +// // JSONToLogRoot: Bytes -> log root in json +// func JSONToLogRoot(logRootBytes []byte) (*trilliantypes.LogRootV1, error) { +// po, err := FromJSON(logRootBytes) +// if err != nil { +// return nil, fmt.Errorf("JsonBytesToLogRoot | Unmarshal | %w", err) +// } +// result, ok := po.(*trilliantypes.LogRootV1) +// if !ok { +// return nil, fmt.Errorf("JsonFileToLogRoot | object is %T", po) +// } +// return result, nil +// } + // JsonFileToRPC: read json files and unmarshal it to Root Policy Certificate func JsonFileToRPC(filePath string) (*RPC, error) { po, err := FromJSONFile(filePath) @@ -105,171 +284,8 @@ func JsonFileToSP(filePath string) (*SP, error) { return o, err } -func ToJSON(o any) ([]byte, error) { - r := struct { - T string - O any - }{ - O: o, - } - // Find the internal type of the object to marshal. - switch o := o.(type) { - case *RCSR: - r.T = "rcsr" - case *RPC: - r.T = "rpc" - case *PCRevocation: - r.T = "pcrevocation" - case *SPT: - r.T = "spt" - case *SPRT: - r.T = "sprt" - case *SP: - r.T = "sp" - case *PSR: - r.T = "psr" - case *trillian.Proof: - r.T = "trillian.Proof" - case []*trillian.Proof: - r.T = "[]trillian.Proof" - case *trilliantypes.LogRootV1: - r.T = "LogRootV1" - case listOfMarshallable: - r.T = "[]" - default: - if t := reflect.TypeOf(o); t.Kind() == reflect.Slice { - // If slice, try to serialize all elements inside, then write its type as slice. - s := reflect.ValueOf(o) - listOfAny := make([]any, s.Len()) - for i := 0; i < len(listOfAny); i++ { - listOfAny[i] = s.Index(i).Interface() - } - b, err := ToJSON(listOfMarshallable{ - List: listOfAny, - }) - return b, err - } - return nil, fmt.Errorf("unrecognized type %T", o) - } - - // Now Marshal the wrapper. - d, err := json.Marshal(r) - if err != nil { - return nil, fmt.Errorf("wrapping marshalling of object: %w", err) - } - return d, nil -} - -func FromJSON(data []byte) (any, error) { - // Get only the type. - typeOnly := struct { - T string - }{} - if err := json.Unmarshal(data, &typeOnly); err != nil { - return nil, fmt.Errorf("obtaining the wrapping type: %w", err) - } - - switch typeOnly.T { - case "rcsr": - typeAndValue := struct { - T string - O *RCSR - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "rpc": - typeAndValue := struct { - T string - O *RPC - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "spt": - typeAndValue := struct { - T string - O *SPT - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "sprt": - typeAndValue := struct { - T string - O *SPRT - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "sp": - typeAndValue := struct { - T string - O *SP - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "psr": - typeAndValue := struct { - T string - O *PSR - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "trillian.Proof": - typeAndValue := struct { - T string - O *trillian.Proof - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "[]trillian.Proof": - typeAndValue := struct { - T string - O []*trillian.Proof - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "LogRootV1": - typeAndValue := struct { - T string - O *trilliantypes.LogRootV1 - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O, nil - case "[]": - // This is a special case. We have received a list of "things" that should be - // deserializable again with FromJSON. Deserialize this object of type listOfMarshallable - // and return all its internal objects - typeAndValue := struct { - T string - O listOfMarshallable - }{} - if err := json.Unmarshal(data, &typeAndValue); err != nil { - return nil, fmt.Errorf("unmarshalling internal type: %w", err) - } - return typeAndValue.O.List, nil - default: - return nil, fmt.Errorf("unmarshalling internal type: bad type \"%s\"", typeOnly.T) - } -} - // ToJSONFile serializes any supported type to a file, using JSON. -func ToJSONFile(s any, filePath string) error { +func ToJSONFile(s PolicyObject, filePath string) error { bytes, err := ToJSON(s) if err != nil { return fmt.Errorf("JsonStructToFile | ToJSON | %w", err) @@ -290,68 +306,3 @@ func FromJSONFile(filePath string) (any, error) { return FromJSON(data) } - -// marshallableObject is used only on deserialization. A list of these objects is read from the -// JSON and should be parsed using FromJSON(). See listOfMarshallable.UnmarshalJSON. -type marshallableObject struct { - O any -} - -func (o marshallableObject) MarshalJSON() ([]byte, error) { - return ToJSON(o.O) -} - -func (o *marshallableObject) UnmarshalJSON(b []byte) error { - obj, err := FromJSON(b) - o.O = obj - return err -} - -// listOfMarshallable is used to allow (de)serialization (from)to JSON. When a list of our -// types is to be serialized, a list of these objects is created instead (see ToJSON). -type listOfMarshallable struct { - List []any -} - -// MarshalJSON serializes to JSON a list of objects than can be convertible to JSON via -// the method ToJSON. -func (l listOfMarshallable) MarshalJSON() ([]byte, error) { - payloads := make([][]byte, len(l.List)) - for i, e := range l.List { - b, err := ToJSON(e) - if err != nil { - return nil, fmt.Errorf("cannot marshal list to JSON, elem at %d failed with error: %s", - i, err) - } - payloads[i] = b - } - // this list in JSON consists in the type and then the ToJSON elements. - payload := []byte(`{"List":[`) - for _, p := range payloads { - payload = append(payload, p...) - payload = append(payload, []byte(`,`)...) - } - // Remove last "," - payload = payload[:len(payload)-1] - // Close list and close object itself. - payload = append(payload, []byte(`]}`)...) - - return payload, nil -} - -func (l *listOfMarshallable) UnmarshalJSON(b []byte) error { - // Deserialize an object with a "List" field that will use FromJSON for its elements. - tempObject := struct { - List []marshallableObject - }{} - err := json.Unmarshal(b, &tempObject) - if err != nil { - return err - } - // Take the list with wrapped objects and unwrap them to this list. - l.List = make([]any, len(tempObject.List)) - for i, o := range tempObject.List { - l.List[i] = o.O - } - return nil -} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 7cf09b0d..f741ed4c 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -1,9 +1,9 @@ package common import ( + "fmt" "os" "path" - "reflect" "testing" "time" @@ -143,72 +143,79 @@ func TestEncodeAndDecodeOfPC(t *testing.T) { assert.True(t, deserializedPC.Equal(pc), "PC serialized and deserialized error") } -func TestToFromJSON(t *testing.T) { - cases := map[string]struct { +// TestPolicyObjects checks that the structure types in the test cases can be converted to JSON and +// back, using the functions ToJSON and FromJSON. +// It checks after deserialization that the objects are equal. +func TestPolicyObjects(t *testing.T) { + cases := []struct { data any }{ - "trillian.Proof": { - data: &trillian.Proof{ - LeafIndex: 1, - Hashes: generateRandomBytesArray(), - }, + { + data: randomRPC(), }, - "slice_of_trillian.Proof": { - data: []*trillian.Proof{ - { - LeafIndex: 1, - Hashes: generateRandomBytesArray(), - }, - { - LeafIndex: 2, - Hashes: generateRandomBytesArray(), - }, - { - LeafIndex: 3, - Hashes: generateRandomBytesArray(), - }, - }, + { + data: *randomRPC(), }, - "trilliantypes.LogRootV1": { - data: &trilliantypes.LogRootV1{ - TreeSize: 1, - RootHash: generateRandomBytes(), - TimestampNanos: 11, - Revision: 3, - Metadata: generateRandomBytes(), - }, + { + data: randomRCSR(), }, - "slice_of_SP": { + { + data: randomSP(), + }, + { data: []any{ - randomSPT(), - randomSPT(), + randomRPC(), + randomRCSR(), + randomSP(), + randomSPRT(), + randomPSR(), + randomTrillianProof(), + randomLogRootV1(), }, }, - "slice_of_many_things": { + { data: []any{ - randomSPT(), randomRPC(), - randomSP(), + []any{ + randomSP(), + randomSPT(), + }, + []any{ + randomTrillianProof(), + randomTrillianProof(), + }, }, }, } - - for name, tc := range cases { - name, tc := name, tc - t.Run(name, func(t *testing.T) { + for i, tc := range cases { + i, tc := i, tc + t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) { t.Parallel() - expectedType := reflect.TypeOf(tc.data) // type will be a pointer to RPC, etc. - d, err := ToJSON(tc.data) - t.Logf("JSON: %s", string(d)) + // Serialize. + data, err := ToJSON(tc.data) require.NoError(t, err) - - o, err := FromJSON(d) + // Deserialize. + deserialized, err := FromJSON(data) require.NoError(t, err) - require.NotNil(t, o) - require.Equal(t, tc.data, o) - - gotType := reflect.TypeOf(o) - require.Equal(t, expectedType, gotType) + // Compare. + require.Equal(t, tc.data, deserialized) }) } } + +func randomTrillianProof() *trillian.Proof { + return &trillian.Proof{ + LeafIndex: 1, + Hashes: generateRandomBytesArray(), + } +} + +func randomLogRootV1() *trilliantypes.LogRootV1 { + return &trilliantypes.LogRootV1{ + TreeSize: 1, + RootHash: generateRandomBytes(), + TimestampNanos: 11, + Revision: 3, + Metadata: generateRandomBytes(), + } +} diff --git a/pkg/common/structure.go b/pkg/common/structure.go index 523bfa74..aeab3618 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -5,6 +5,18 @@ import ( "time" ) +// PolicyObject is an interface that is implemented by all objects that are part of the set +// of "policy objects". A policy object is that one that represents functionality of policies +// for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. +type PolicyObject interface { + __PolicyObjectMarkerMethod() +} + +// PolicyObjectList is a list of PolicyObject's, which is a PolicyObject in and of itself. +type PolicyObjectList []PolicyObject + +func (PolicyObjectList) __PolicyObjectMarkerMethod() {} + // root certificate signing request type RCSR struct { Subject string `json:",omitempty"` @@ -17,6 +29,8 @@ type RCSR struct { Signature []byte `json:",omitempty"` } +func (RCSR) __PolicyObjectMarkerMethod() {} + // root policy certificate type RPC struct { SerialNumber int `json:",omitempty"` @@ -34,11 +48,15 @@ type RPC struct { SPTs []SPT `json:",omitempty"` } +func (RPC) __PolicyObjectMarkerMethod() {} + // PCRevocation is for now empty. type PCRevocation struct { // TODO(juagargi) define the revocation. } +func (PCRevocation) __PolicyObjectMarkerMethod() {} + // signed policy timestamp type SPT struct { Version int `json:",omitempty"` @@ -53,12 +71,16 @@ type SPT struct { Signature []byte `json:",omitempty"` } +func (SPT) __PolicyObjectMarkerMethod() {} + // signed policy revocation timestamp type SPRT struct { SPT Reason int `json:",omitempty"` } +func (SPRT) __PolicyObjectMarkerMethod() {} + // Signed Policy type SP struct { Policies Policy `json:",omitempty"` @@ -71,6 +93,8 @@ type SP struct { SPTs []SPT `json:",omitempty"` } +func (SP) __PolicyObjectMarkerMethod() {} + // Policy Signing Request type PSR struct { Policies Policy `json:",omitempty"` @@ -79,6 +103,8 @@ type PSR struct { RootCertSignature []byte `json:",omitempty"` } +func (PSR) __PolicyObjectMarkerMethod() {} + // Domain policy type Policy struct { TrustedCA []string `json:",omitempty"` diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index 095a95d5..bf1a68dd 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -4,7 +4,6 @@ import ( "math/rand" "os" "path" - "reflect" "testing" "time" @@ -147,94 +146,16 @@ func TestJsonReadWrite(t *testing.T) { assert.True(t, rpc.Equal(rpc1), "Json error") } -// TestSingleObject checks that the structure types in the test cases can be converted to JSON and -// back, using the functions ToJSON and FromJSON. -// It checks after deserialization if the objects are equal. -func TestSingleObject(t *testing.T) { - cases := map[string]struct { - data any - }{ - "rcsr": { - data: &RCSR{ - Subject: "bandqhvdbdlwnd", - Version: 6789, - TimeStamp: time.Unix(111222323, 0), - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), - Signature: generateRandomBytes(), - }, - }, - "rpc": { - data: randomRPC(), - }, - "spt": { - data: randomSPT(), - }, - "sprt": { - data: &SPRT{ - SPT: SPT{ - Version: 12314, - Subject: "bad domain", - CAName: "I'm malicious CA, nice to meet you", - LogID: 1729381, - CertType: 0x21, - AddedTS: nowWithoutMonotonic(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 1729381, - Signature: generateRandomBytes(), - }, - Reason: 1729381, - }, - }, - "sp": { - data: randomSP(), - }, - "psr": { - data: &PSR{ - Policies: Policy{ - TrustedCA: []string{"one CA", "another CA"}, - AllowedSubdomains: []string{"sub1.com", "sub2.com"}, - }, - TimeStamp: nowWithoutMonotonic(), - DomainName: "domain_name.com", - RootCertSignature: generateRandomBytes(), - }, - }, - } - - for name, tc := range cases { - name, tc := name, tc - t.Run(name, func(t *testing.T) { - t.Parallel() - expectedType := reflect.TypeOf(tc.data) // type will be a pointer to RPC, etc. - d, err := ToJSON(tc.data) - t.Logf("JSON: %s", string(d)) - require.NoError(t, err) - - o, err := FromJSON(d) - require.NoError(t, err) - require.NotNil(t, o) - require.Equal(t, tc.data, o) - - gotType := reflect.TypeOf(o) - require.Equal(t, expectedType, gotType) - }) - } -} - func randomRPC() *RPC { return &RPC{ SerialNumber: 1729381, - Subject: "bad domain", + Subject: "RPC CA", Version: 1729381, PublicKeyAlgorithm: RSA, PublicKey: generateRandomBytes(), NotBefore: nowWithoutMonotonic(), NotAfter: nowWithoutMonotonic(), - CAName: "bad domain", + CAName: "RPC CA", SignatureAlgorithm: SHA256, TimeStamp: nowWithoutMonotonic(), PRCSignature: generateRandomBytes(), @@ -243,6 +164,19 @@ func randomRPC() *RPC { } } +func randomRCSR() *RCSR { + return &RCSR{ + Subject: "subject", + Version: 6789, + TimeStamp: nowWithoutMonotonic(), + PublicKeyAlgorithm: RSA, + PublicKey: generateRandomBytes(), + SignatureAlgorithm: SHA256, + PRCSignature: generateRandomBytes(), + Signature: generateRandomBytes(), + } +} + func randomSP() *SP { return &SP{ Policies: Policy{ @@ -277,6 +211,25 @@ func randomSPT() *SPT { } } +func randomSPRT() *SPRT { + return &SPRT{ + SPT: *randomSPT(), + Reason: 1729381, + } +} + +func randomPSR() *PSR { + return &PSR{ + Policies: Policy{ + TrustedCA: []string{"one CA", "another CA"}, + AllowedSubdomains: []string{"sub1.com", "sub2.com"}, + }, + TimeStamp: nowWithoutMonotonic(), + DomainName: "domain_name.com", + RootCertSignature: generateRandomBytes(), + } +} + func nowWithoutMonotonic() time.Time { return time.Unix(time.Now().Unix(), 0) } From 70a46f63b607b56c08a9366e111afe876ed94d6e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 2 May 2023 09:47:03 +0200 Subject: [PATCH 093/187] More tests functionality. --- pkg/common/json.go | 2 +- pkg/common/structure_test.go | 12 +++++++++ pkg/mapserver/responder/old_responder_test.go | 8 +++--- pkg/mapserver/responder/responder_test.go | 12 ++++----- pkg/tests/helpers.go | 26 +++++++++++++++++++ .../helperfunctions.go} | 2 +- pkg/tests/{ => testdb}/mockdb_for_testing.go | 2 +- pkg/tests/{ => testdb}/testDB.go | 2 +- tests/testdata/2-SPs.json | 1 + 9 files changed, 53 insertions(+), 14 deletions(-) create mode 100644 pkg/tests/helpers.go rename pkg/tests/{test_utils.go => testdb/helperfunctions.go} (98%) rename pkg/tests/{ => testdb}/mockdb_for_testing.go (99%) rename pkg/tests/{ => testdb}/testDB.go (99%) create mode 100644 tests/testdata/2-SPs.json diff --git a/pkg/common/json.go b/pkg/common/json.go index c3cc6f76..554c704b 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -285,7 +285,7 @@ func JsonFileToSP(filePath string) (*SP, error) { } // ToJSONFile serializes any supported type to a file, using JSON. -func ToJSONFile(s PolicyObject, filePath string) error { +func ToJSONFile(s any, filePath string) error { bytes, err := ToJSON(s) if err != nil { return fmt.Errorf("JsonStructToFile | ToJSON | %w", err) diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index bf1a68dd..29cb437d 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -7,10 +7,22 @@ import ( "testing" "time" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +var update = tests.UpdateGoldenFiles() + +func TestGenerateGoldenFiles(t *testing.T) { + // Update the JSON files in tests/testdata + if *update { + obj := []any{randomSP(), randomSP()} + err := ToJSONFile(obj, "../../tests/testdata/2-SPs.json") + require.NoError(t, err) + } +} + // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { rcsr := &RCSR{ diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go index a4597515..57e39982 100644 --- a/pkg/mapserver/responder/old_responder_test.go +++ b/pkg/mapserver/responder/old_responder_test.go @@ -15,7 +15,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/stretchr/testify/require" ) @@ -59,10 +59,10 @@ func TestOldResponderWithPoP(t *testing.T) { dbName := t.Name() config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - err := tests.CreateTestDB(ctx, dbName) + err := testdb.CreateTestDB(ctx, dbName) require.NoError(t, err) defer func() { - err = tests.RemoveTestDB(ctx, config) + err = testdb.RemoveTestDB(ctx, config) require.NoError(t, err) }() @@ -170,7 +170,7 @@ func getUpdatedUpdater(t require.TestingT, certs []*ctx509.Certificate) (db.Conn ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - conn := tests.NewMockDB() + conn := testdb.NewMockDB() smt, err := trie.NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.CacheHeightLimit = 233 diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 6e275f83..fe9436ba 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -16,7 +16,7 @@ import ( mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" ) @@ -29,12 +29,12 @@ func TestProofWithPoP(t *testing.T) { config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) // Create a new DB with that name. On exiting the function, it will be removed. - err := tests.CreateTestDB(ctx, dbName) + err := testdb.CreateTestDB(ctx, dbName) require.NoError(t, err) - // defer func() { - // err = tests.RemoveTestDB(ctx, config) - // require.NoError(t, err) - // }() + defer func() { + err = testdb.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() // Connect to the DB. conn, err := mysql.Connect(config) diff --git a/pkg/tests/helpers.go b/pkg/tests/helpers.go new file mode 100644 index 00000000..9faabee0 --- /dev/null +++ b/pkg/tests/helpers.go @@ -0,0 +1,26 @@ +package tests + +import ( + "flag" +) + +// Update registers the '-update' flag for the test. +// +// This flag should be checked by golden file tests to see whether the golden +// files should be updated or not. The golden files should be deterministic. +// Use UpdateNonDeterminsticGoldenFiles instead, if they are not deterministic. +// +// To update all golden files, run the following command: +// +// go test ./... -update +// +// To update a specific package, run the following command: +// +// go test ./path/to/package -update +// +// The flag should be registered as a package global variable: +// +// var update = tests.UpdateGoldenFiles() +func UpdateGoldenFiles() *bool { + return flag.Bool("update", false, "set to regenerate the golden files") +} diff --git a/pkg/tests/test_utils.go b/pkg/tests/testdb/helperfunctions.go similarity index 98% rename from pkg/tests/test_utils.go rename to pkg/tests/testdb/helperfunctions.go index d568b117..df516f54 100644 --- a/pkg/tests/test_utils.go +++ b/pkg/tests/testdb/helperfunctions.go @@ -1,4 +1,4 @@ -package tests +package testdb import ( "context" diff --git a/pkg/tests/mockdb_for_testing.go b/pkg/tests/testdb/mockdb_for_testing.go similarity index 99% rename from pkg/tests/mockdb_for_testing.go rename to pkg/tests/testdb/mockdb_for_testing.go index 1e0ec5fc..06dd51da 100644 --- a/pkg/tests/mockdb_for_testing.go +++ b/pkg/tests/testdb/mockdb_for_testing.go @@ -1,4 +1,4 @@ -package tests +package testdb import ( "context" diff --git a/pkg/tests/testDB.go b/pkg/tests/testdb/testDB.go similarity index 99% rename from pkg/tests/testDB.go rename to pkg/tests/testdb/testDB.go index 4575ed95..81308817 100644 --- a/pkg/tests/testDB.go +++ b/pkg/tests/testdb/testDB.go @@ -1,4 +1,4 @@ -package tests +package testdb import ( "context" diff --git a/tests/testdata/2-SPs.json b/tests/testdata/2-SPs.json new file mode 100644 index 00000000..28e92cb7 --- /dev/null +++ b/tests/testdata/2-SPs.json @@ -0,0 +1 @@ +{"T":"[]","O":[{"T":"*sp","O":{"Policies":{"TrustedCA":["ca1","ca2"]},"TimeStamp":"2023-05-02T09:40:17+02:00","Subject":"domainname.com","CAName":"ca1","SerialNumber":4037200794235010051,"CASignature":"nmEjctgU98hQyR43KW84EsGS30tJSD4gwOlvmh/8l943ZIFArLyhhw==","RootCertSignature":"kHen0bcIYaGxHe+wTZYrUGtudXgZMsefapRNC55RG+lVG4iIWgvnig==","SPTs":[{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"cIuAGWgQp6PEwoRXPc+yMOMX7CrGd7zCx4pYLDdcKrc2yIGDU8168w==","PoI":"+NpYjba39SgiQah1fyw8A1+0eLZrc6WqxP/9E7dFJhhhnRoJ/QtBCw==","STHSerialNumber":114378,"Signature":"e2pHQiXrj4X9wng4zpFn7mP9S9TfxSPMJVTUUd62WFWIxbGk0bvv1g=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"vNVMm/9KJcm2vbKPMZ6iUKiocW5nqPmLflxe5YqFGjpD2rB4KBSPOQ==","PoI":"jiKzbHeuR7WYZ5Vyzm9tmbL2gFlWCHzGERQg1VNxvDh5U8ga8EhJZw==","STHSerialNumber":114378,"Signature":"HzzW+0iCmiSXiHxOXzNN8l/KNJiwwMyhI8ErSU4KSUEdZFjUUngcDQ=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"aknFL07ew8EzJSpuG4T7sL/giNMpNx7qgf9jhEtz0DO5dkM/ZPu2rg==","PoI":"QNWytTKo5svQZ68N2/vOfMUpjOGDTWtMi3FWh5bD0TyN+rKdMfdbdA==","STHSerialNumber":114378,"Signature":"TokIBlevDHPLEB/glTfYw8Z0WjFP0aufNSiCG1Fev3WqHXioEOLCFQ=="}]}},{"T":"*sp","O":{"Policies":{"TrustedCA":["ca1","ca2"]},"TimeStamp":"2023-05-02T09:40:17+02:00","Subject":"domainname.com","CAName":"ca1","SerialNumber":3916589616287113937,"CASignature":"h83Ti3iPNpj8UTxhXSaURxVu1Vb3vqRe14HIoXMxchN1ZxlpwmvwSg==","RootCertSignature":"pjIDTWUsp/0uuA/vvR+A5lG1Mxpf9xsWc+5fsqf0LTm6MGxw0fWuag==","SPTs":[{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"LsUotCYOYbfxJSlM9aUxLfK6+1hWog+HGO/pmBXVq9lxGdEFcNToZQ==","PoI":"no2sa1DssKi9Gn/rO17XUR96br6DJDlqXD3lK9xx8/AAkY6s4k3i+g==","STHSerialNumber":114378,"Signature":"NBsavvZ/7fD6wlhDat56bixzNbTtG0dnAxDN7VN96Y99QL5lLlV9Iw=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"8z3dXu5RL+2lgnC1lVXg4foT+2Ygu2TuLE1Si9L+bSxLFHmD1FSReg==","PoI":"FORkqLNl1amu6VftWf+UsscNVP8SbsxE9uVlohqY22v2Nx2dc3Nkng==","STHSerialNumber":114378,"Signature":"BAwmH8ycc8B+ux98RfsbCrAeBDFxzi5sN/pZlWxDHzJh1CF3GzXobg=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"ZWmjxlaCBgJv4gbsTZh1Jkt22dQogRZMUelL1oMhdyXToWkxKZpaSQ==","PoI":"Hxv/e9YzDmVtgerOpdq5j0xoHpMnYYi6cwVd0u7V83Ku2RDrobj2Vg==","STHSerialNumber":114378,"Signature":"gelEAAIa7l/pyPoJApVCPULr91KhAYz04vb1PkfGK+mVrZXvamKe2g=="}]}}]} \ No newline at end of file From 60942e973e35a92ee9814c9cfdf719ef6a595e32 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 2 May 2023 09:52:04 +0200 Subject: [PATCH 094/187] WIP continuing the responder UT. --- pkg/mapserver/responder/responder_test.go | 12 ++++++++++++ pkg/mapserver/updater/updater.go | 11 +++++++++++ pkg/util/io.go | 16 ++++++++++++++++ pkg/util/types.go | 16 ++++++++++++++++ 4 files changed, 55 insertions(+) create mode 100644 pkg/util/types.go diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index fe9436ba..296376e4 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -2,6 +2,7 @@ package responder import ( "context" + "os" "strings" "testing" "time" @@ -10,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/domain" @@ -51,6 +53,16 @@ func TestProofWithPoP(t *testing.T) { require.NoError(t, err) // Ingest two policies. + data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") + require.NoError(t, err) + objs, err := util.LoadPoliciesFromRaw(data) + require.NoError(t, err) + sps, err := util.ToTypedSlice[*common.SP](objs) + require.NoError(t, err) + var expirations []*time.Time + require.Equal(t, len(objs), len(sps)) + err = updater.UpdatePolicies(ctx, conn, names, expirations, [][]byte{}) + require.NoError(t, err) // Coalescing of payloads. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index e08246b8..2178d95c 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -281,6 +281,17 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } +// UpdatePolicies takes a sequence of policies, the aliases associated with each one, and the +// expiration times, and updates the DB with them. +func UpdatePolicies(ctx context.Context, conn db.Conn, names [][]string, + expirations []*time.Time, pols [][]byte) error { + + // deleteme + // TODO + // TODO(juagargi) do it + return nil +} + func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { // How many domains to update? dirtyCount, err := conn.DirtyDomainsCount(ctx) diff --git a/pkg/util/io.go b/pkg/util/io.go index 842e0aa9..50aeeaf6 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -5,6 +5,7 @@ import ( "compress/gzip" "encoding/base64" "encoding/csv" + "fmt" "io/ioutil" "os" "strings" @@ -115,6 +116,21 @@ func LoadCertsAndChainsFromCSV( return } +// LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their +// serialized form. +func LoadPoliciesFromRaw(b []byte) ([]any, error) { + obj, err := common.FromJSON(b) + if err != nil { + return nil, err + } + // The returned object should be of type list. + if list, ok := obj.([]any); ok { + return list, nil + } + + return nil, fmt.Errorf("the content is of type %T instead of []any", obj) +} + // parseCertFromCSVField takes a row from a CSV encoding certs and chains in base64 and returns // the CT x509 Certificate or error. func parseCertFromCSVField(field string) (*ctx509.Certificate, error) { diff --git a/pkg/util/types.go b/pkg/util/types.go new file mode 100644 index 00000000..a7b84c95 --- /dev/null +++ b/pkg/util/types.go @@ -0,0 +1,16 @@ +package util + +import "fmt" + +func ToTypedSlice[T any](s []any) ([]T, error) { + t := make([]T, len(s)) + for i, e := range s { + if te, ok := e.(T); ok { + t[i] = te + } else { + return nil, fmt.Errorf("element at %d of type %T cannot be converted to %T", + i, e, *new(T)) + } + } + return t, nil +} From 16f3a6a641eaa6e9b2017ff6bfa9240872919632 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 08:42:00 +0200 Subject: [PATCH 095/187] Use the IDs instead of payloads for proof. --- pkg/db/mysql/mysql.go | 6 +- pkg/mapserver/responder/responder_test.go | 2 +- tools/create_schema.sh | 74 ++++++++++++----------- 3 files changed, 44 insertions(+), 38 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index d20d1ef6..b01336cc 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -306,7 +306,7 @@ func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, last func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainID common.SHA256Output, ) (*common.SHA256Output, []byte, error) { - str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" + str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var payloadID, payload []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) if err != nil && err != sql.ErrNoRows { @@ -319,7 +319,7 @@ func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID co ) (*common.SHA256Output, []byte, error) { // deleteme use the other field, not the certificates one! - str := "SELECT cert_payload_id, cert_payload FROM domain_payloads WHERE domain_id = ?" + str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var payloadID, payload []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) if err != nil && err != sql.ErrNoRows { @@ -336,7 +336,7 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common if len(domainIDs) == 0 { return nil, nil } - str := "SELECT domain_id,cert_payload FROM domain_payloads WHERE domain_id IN " + + str := "SELECT domain_id,cert_ids FROM domain_payloads WHERE domain_id IN " + repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 296376e4..a55f1349 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -23,7 +23,7 @@ import ( ) func TestProofWithPoP(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() // DB will have the same name as the test function. diff --git a/tools/create_schema.sh b/tools/create_schema.sh index e987af72..e86a16fd 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -68,8 +68,9 @@ CMD=$(cat < Date: Wed, 3 May 2023 09:41:17 +0200 Subject: [PATCH 096/187] Add UT for the coalescing in DB. --- pkg/db/db.go | 2 +- pkg/db/mysql/mysql.go | 6 +- pkg/db/mysql/mysql_test.go | 150 +++++++++++++++++++++++++++++++++++++ 3 files changed, 154 insertions(+), 4 deletions(-) create mode 100644 pkg/db/mysql/mysql_test.go diff --git a/pkg/db/db.go b/pkg/db/db.go index ff9aa1cf..e5934baf 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -37,7 +37,7 @@ type Conn interface { // RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload // itself, given the domain ID. RetrieveDomainCertificatesPayload(ctx context.Context, id common.SHA256Output) ( - certPayloadID *common.SHA256Output, certPayload []byte, err error) + certIDsID *common.SHA256Output, certIDs []byte, err error) // RetrieveDomainPoliciesPayload returns the policy related payload for a given domain. // This includes the RPCs, SPs, etc. diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index b01336cc..859c3005 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -307,12 +307,12 @@ func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainI ) (*common.SHA256Output, []byte, error) { str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" - var payloadID, payload []byte - err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) + var certIDsID, certIDs []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) if err != nil && err != sql.ErrNoRows { return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) } - return (*common.SHA256Output)(payloadID), payload, nil + return (*common.SHA256Output)(certIDsID), certIDs, nil } func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go new file mode 100644 index 00000000..7e478874 --- /dev/null +++ b/pkg/db/mysql/mysql_test.go @@ -0,0 +1,150 @@ +package mysql_test + +import ( + "bytes" + "context" + "encoding/hex" + "math/rand" + "os" + "sort" + "testing" + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" + "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" + "github.com/netsec-ethz/fpki/pkg/util" +) + +func TestCoalesceForDirtyDomains(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + // DB will have the same name as the test function. + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + + // Create a new DB with that name. On exiting the function, it will be removed. + err := testdb.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = testdb.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() + + // Connect to the DB. + conn, err := mysql.Connect(config) + require.NoError(t, err) + defer conn.Close() + + // Use two mock x509 chains: + certs, certIDs, parentCertIDs, certNames := buildTestCertHierarchy(t) + err = updater.UpdateCertsWithKeepExisting(ctx, conn, certNames, util.ExtractExpirations(certs), + certs, certIDs, parentCertIDs) + require.NoError(t, err) + + // Ingest two mock policies. + data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") + require.NoError(t, err) + objs, err := util.LoadPoliciesFromRaw(data) + require.NoError(t, err) + sps, err := util.ToTypedSlice[*common.SP](objs) + require.NoError(t, err) + var expirations []*time.Time + require.Equal(t, len(objs), len(sps)) + err = updater.UpdatePolicies(ctx, conn, certNames, expirations, [][]byte{}) + require.NoError(t, err) + + // Coalescing of payloads. + err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) + require.NoError(t, err) + + // Check the certificate coalescing: under leaf.com there must be 4 IDs, for the certs. + domainID := common.SHA256Hash32Bytes([]byte("leaf.com")) + gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) + require.NoError(t, err) + require.Len(t, gotCertIDs, common.SHA256Size*len(certs)) + expectedCertIDs, expectedCertIDsID := glueSortedIDsAndComputeItsID(certIDs) + t.Logf("expectedCertIDs: %s\n", hex.EncodeToString(expectedCertIDs)) + require.Equal(t, expectedCertIDs, gotCertIDs) + require.Equal(t, expectedCertIDsID, gotCertIDsID) +} + +// buildTestCertHierarchy returns the certificates, chains, and names for two mock certificate +// chains: the first chain is leaf.com->c1.com->c0.com , and the second chain is +// leaf.com->c0.com . +func buildTestCertHierarchy(t require.TestingT) ( + certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { + + // Create all certificates. + certs = make([]*ctx509.Certificate, 4) + certs[0] = randomX509Cert(t, "c0.com") + certs[1] = randomX509Cert(t, "c1.com") + certs[2] = randomX509Cert(t, "leaf.com") + certs[3] = randomX509Cert(t, "leaf.com") + + // IDs: + IDs = make([]*common.SHA256Output, len(certs)) + for i, c := range certs { + id := common.SHA256Hash32Bytes(c.Raw) + IDs[i] = &id + } + + // Names: only c2 and c3 are leaves, the rest should be nil. + names = make([][]string, len(certs)) + names[2] = certs[2].DNSNames + names[3] = certs[3].DNSNames + + // Parent IDs. + parentIDs = make([]*common.SHA256Output, len(certs)) + // First chain: + parentIDs[1] = IDs[0] + parentIDs[2] = IDs[1] + // Second chain: + parentIDs[3] = IDs[0] + + return +} + +func glueSortedIDsAndComputeItsID(certIDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { + // Copy slice to avoid mutating of the original. + IDs := append(certIDs[:0:0], certIDs...) + // Sort the IDs. + sort.Slice(IDs, func(i, j int) bool { + return bytes.Compare(IDs[i][:], IDs[j][:]) == -1 + }) + // Glue the sorted IDs. + gluedIDs := make([]byte, common.SHA256Size*len(IDs)) + for i, id := range IDs { + copy(gluedIDs[i*common.SHA256Size:], id[:]) + } + // Compute the hash of the glued IDs. + id := common.SHA256Hash32Bytes(gluedIDs) + return gluedIDs, &id +} + +func randomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { + return &ctx509.Certificate{ + DNSNames: []string{domain}, + Subject: pkix.Name{ + CommonName: domain, + }, + NotBefore: util.TimeFromSecs(0), + NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), + Raw: randomBytes(t, 10), + } +} + +func randomBytes(t require.TestingT, size int) []byte { + buff := make([]byte, size) + n, err := rand.Read(buff) + require.NoError(t, err) + require.Equal(t, size, n) + return buff +} From 4a31bfc9a3d73b28684e559b7f8d30480ee3635e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 13:51:52 +0200 Subject: [PATCH 097/187] When a test DB fails, return the stdout and stderr. --- pkg/db/mysql/mysql_test.go | 2 +- pkg/mapserver/responder/responder_test.go | 2 +- pkg/mapserver/updater/updater.go | 6 +++--- pkg/tests/testdb/testDB.go | 24 +++++++++++++++++++++++ 4 files changed, 29 insertions(+), 5 deletions(-) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 7e478874..891eb089 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -58,7 +58,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { require.NoError(t, err) var expirations []*time.Time require.Equal(t, len(objs), len(sps)) - err = updater.UpdatePolicies(ctx, conn, certNames, expirations, [][]byte{}) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, certNames, expirations, [][]byte{}, nil) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index a55f1349..53ad2edd 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -61,7 +61,7 @@ func TestProofWithPoP(t *testing.T) { require.NoError(t, err) var expirations []*time.Time require.Equal(t, len(objs), len(sps)) - err = updater.UpdatePolicies(ctx, conn, names, expirations, [][]byte{}) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, names, expirations, [][]byte{}) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 2178d95c..2ad0f532 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -281,10 +281,10 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) } -// UpdatePolicies takes a sequence of policies, the aliases associated with each one, and the +// UpdatePoliciesWithKeepExisting takes a sequence of policies, the aliases associated with each one, and the // expiration times, and updates the DB with them. -func UpdatePolicies(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, pols [][]byte) error { +func UpdatePoliciesWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, + expirations []*time.Time, policies [][]byte, policyIDs []*common.SHA256Output) error { // deleteme // TODO diff --git a/pkg/tests/testdb/testDB.go b/pkg/tests/testdb/testDB.go index 81308817..c7009cf8 100644 --- a/pkg/tests/testdb/testDB.go +++ b/pkg/tests/testdb/testDB.go @@ -25,6 +25,27 @@ func CreateTestDB(ctx context.Context, dbName string) error { return err } + // Collect the output in case of error. + var output string + { + stderr, err := cmd.StderrPipe() + if err != nil { + return err + } + stdout, err := cmd.StdoutPipe() + if err != nil { + return err + } + go func() { + multi := io.MultiReader(stderr, stdout) + b, err := io.ReadAll(multi) + output = string(b) + if err != nil { + output += "\nError reading stderr and stdout" + } + }() + } + // Start the command. err = cmd.Start() if err != nil { @@ -50,6 +71,9 @@ func CreateTestDB(ctx context.Context, dbName string) error { // Get the exit code. err = cmd.Wait() if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + return fmt.Errorf("exit error: %w. STDERR+STDOUT: %s", exitErr, output) + } return err } From 2e4bbe36b4557aaa1c8952bef5cf6e0f2a39f836 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 22:15:08 +0200 Subject: [PATCH 098/187] PolicyObjectBase has a Raw field with []byte. --- pkg/common/json.go | 153 +++++++++++++----------- pkg/common/json_test.go | 258 +++++++++++++++++----------------------- pkg/common/structure.go | 27 ++--- 3 files changed, 204 insertions(+), 234 deletions(-) diff --git a/pkg/common/json.go b/pkg/common/json.go index 554c704b..7de9faac 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -11,7 +11,8 @@ import ( ) type serializableObjectBase struct { - O any + O any // actual object to Marshal/Unmarshal + skipRaw bool // flag controlling JSON copying into PolicyObjectBase.Raw } func ToJSON(obj any) ([]byte, error) { @@ -23,12 +24,23 @@ func ToJSON(obj any) ([]byte, error) { return json.Marshal(obj) } -func FromJSON(data []byte) (any, error) { +func FromJSON(data []byte, opts ...FromJSONModifier) (any, error) { var base serializableObjectBase + for _, mod := range opts { + mod(&base) + } err := json.Unmarshal(data, &base) return base.O, err } +type FromJSONModifier func(*serializableObjectBase) + +// WithSkipCopyJSONIntoPolicyObjects avoids copying the raw JSON into each one of the +// objects that aggregate a PolicyObjectBase (RPC, SP, etc). +func WithSkipCopyJSONIntoPolicyObjects(o *serializableObjectBase) { + o.skipRaw = true +} + func (o serializableObjectBase) MarshalJSON() ([]byte, error) { T, O, err := o.marshalJSON(o.O) if err != nil { @@ -69,7 +81,7 @@ func (*serializableObjectBase) marshalJSON(obj any) (string, []byte, error) { T = "logrootv1" default: valOf := reflect.ValueOf(obj) - switch valOf.Type().Kind() { + switch valOf.Kind() { case reflect.Pointer: // Dereference and convert to "any". T, O, err := (*serializableObjectBase)(nil).marshalJSON(valOf.Elem().Interface()) @@ -106,25 +118,55 @@ func (o *serializableObjectBase) UnmarshalJSON(data []byte) error { return err } // Parse the T,O that we received. - ok, obj, err := unmarshalTypeObject(tmp.T, tmp.O) - if !ok { - if len(tmp.T) > 0 && tmp.T[0] == '*' { - // Pointer, try again just once. - tmp.T = tmp.T[1:] - _, obj, err = unmarshalTypeObject(tmp.T, tmp.O) - // Now convert to a pointer to the original object. - objPtr := reflect.New(reflect.TypeOf(obj)) - objPtr.Elem().Set(reflect.ValueOf(obj)) // assign original object - obj = objPtr.Interface() + wasPtr := false + ok, obj, err := o.unmarshalTypeObject(tmp.T, tmp.O) + if !ok && len(tmp.T) > 0 && tmp.T[0] == '*' { + // It looks like a pointer, try again just once. + wasPtr = true + tmp.T = tmp.T[1:] // Remove the * + ok, obj, err = o.unmarshalTypeObject(tmp.T, tmp.O) + } + + // Almost everything is done now. We should 1. do a obj = &obj and 2. copy the raw JSON + // into the Raw field of the structure. + shouldCopyJSON := !o.skipRaw && reflect.ValueOf(obj).Kind() != reflect.Slice + if ok && (wasPtr || shouldCopyJSON) { // skip if no JSON copy and it wasn't a pointer + // Until here, obj is never a pointer. Convert obj to a pointer to obj. + objPtr := reflect.New(reflect.TypeOf(obj)) // new pointer of T + objPtr.Elem().Set(reflect.ValueOf(obj)) // assign original object + obj = objPtr.Interface() // obj is now a pointer to the original + + // If we should copy JSON to Raw: + if shouldCopyJSON { + // Find out if the object is a pointer to a PolicyObjectBase like structure. + base := reflect.Indirect(reflect.ValueOf(obj)).FieldByName("PolicyObjectBase") + if base != (reflect.Value{}) { + // It is a PolicyObjectBase like object. Check the Raw field (should always be true). + if raw := base.FieldByName("RawJSON"); raw != (reflect.Value{}) { + // Set its value to the JSON data. + raw.Set(reflect.ValueOf(data)) + } else { + // This should never happen, and the next line should ensure it: + _ = PolicyObjectBase{}.RawJSON + // But terminate the control flow anyways with a panic. + panic("logic error: structure PolicyObjectBase has lost its Raw member") + } + } + } + + // If the object was not a pointer, and it had been converted to a pointer, revert. + if !wasPtr { + obj = reflect.Indirect(reflect.ValueOf(obj)).Interface() } } + o.O = obj return err } // unmarshalTypeObject returns true if the function understood the type in T, and the object with // the specific type represented by T. -func unmarshalTypeObject(T string, data []byte) (bool, any, error) { +func (o *serializableObjectBase) unmarshalTypeObject(T string, data []byte) (bool, any, error) { var obj any var err error switch T { @@ -140,7 +182,9 @@ func unmarshalTypeObject(T string, data []byte) (bool, any, error) { obj = list for i, objData := range tmp { // Is this an embedded SerializableObjectBase? - tmp := serializableObjectBase{} + tmp := serializableObjectBase{ + skipRaw: o.skipRaw, + } err = json.Unmarshal(objData, &tmp) if err != nil { err = fmt.Errorf("unmarshaling slice, element at %d failed: %w", i, err) @@ -177,42 +221,32 @@ func unmarshalTypeObject(T string, data []byte) (bool, any, error) { func inflateObj[T any](data []byte) (any, error) { var tmp T err := json.Unmarshal(data, &tmp) + return tmp, err } -// -// -// -// -// -// -// -// - -// func JSONToPoI(poiBytes []byte) ([]*trillian.Proof, error) { -// po, err := FromJSON(poiBytes) -// if err != nil { -// return nil, fmt.Errorf("JsonBytesToPoI | Unmarshal | %w", err) -// } -// result, ok := po.([]*trillian.Proof) -// if !ok { -// return nil, fmt.Errorf("JsonFileToPoI | object is %T", po) -// } -// return result, nil -// } - -// // JSONToLogRoot: Bytes -> log root in json -// func JSONToLogRoot(logRootBytes []byte) (*trilliantypes.LogRootV1, error) { -// po, err := FromJSON(logRootBytes) -// if err != nil { -// return nil, fmt.Errorf("JsonBytesToLogRoot | Unmarshal | %w", err) -// } -// result, ok := po.(*trilliantypes.LogRootV1) -// if !ok { -// return nil, fmt.Errorf("JsonFileToLogRoot | object is %T", po) -// } -// return result, nil -// } +// ToJSONFile serializes any supported type to a file, using JSON. +func ToJSONFile(s any, filePath string) error { + bytes, err := ToJSON(s) + if err != nil { + return fmt.Errorf("JsonStructToFile | ToJSON | %w", err) + } + + err = ioutil.WriteFile(filePath, bytes, 0644) + if err != nil { + return fmt.Errorf("JsonStructToFile | WriteFile | %w", err) + } + return nil +} + +func FromJSONFile(filePath string) (any, error) { + data, err := ioutil.ReadFile(filePath) + if err != nil { + return nil, err + } + + return FromJSON(data) +} // JsonFileToRPC: read json files and unmarshal it to Root Policy Certificate func JsonFileToRPC(filePath string) (*RPC, error) { @@ -283,26 +317,3 @@ func JsonFileToSP(filePath string) (*SP, error) { } return o, err } - -// ToJSONFile serializes any supported type to a file, using JSON. -func ToJSONFile(s any, filePath string) error { - bytes, err := ToJSON(s) - if err != nil { - return fmt.Errorf("JsonStructToFile | ToJSON | %w", err) - } - - err = ioutil.WriteFile(filePath, bytes, 0644) - if err != nil { - return fmt.Errorf("JsonStructToFile | WriteFile | %w", err) - } - return nil -} - -func FromJSONFile(filePath string) (any, error) { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - - return FromJSON(data) -} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index f741ed4c..594d3964 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -1,168 +1,38 @@ package common import ( - "fmt" - "os" - "path" + "bytes" + "strings" "testing" - "time" "github.com/google/trillian" trilliantypes "github.com/google/trillian/types" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -//------------------------------------------------------ -// tests for json.go -//------------------------------------------------------ - -// TestEncodeAndDecodeOfSPT: SPT -> files -> SPT -func TestEncodeAndDecodeOfSPT(t *testing.T) { - tempFile := path.Join("./", "spt.json") - defer os.Remove(tempFile) - - spt := &SPT{ - Version: 12314, - Subject: "you are funny", - CAName: "hihihihihihi", - LogID: 123412, - CertType: 0x11, - AddedTS: nowWithoutMonotonic(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 7689, - Signature: generateRandomBytes(), - } - - err := ToJSONFile(spt, tempFile) - require.NoError(t, err, "Json Struct To File error") - - deserializedSPT, err := JsonFileToSPT(tempFile) - require.NoError(t, err, "Json File To SPT error") - - assert.Equal(t, spt, deserializedSPT) - assert.True(t, deserializedSPT.Equal(*spt), "SPT serialized and deserialized error") -} - -// TestEncodeAndDecodeOfRPC: RPC -> files -> RPC -func TestEncodeAndDecodeOfRPC(t *testing.T) { - tempFile := path.Join("./", "rpc.json") - defer os.Remove(tempFile) - - spt1 := &SPT{ - Version: 12313, - Subject: "hihihihihhi", - CAName: "I'm honest CA, nice to meet you", - LogID: 1231323, - CertType: 0x11, - AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 131678, - Signature: generateRandomBytes(), - } - - spt2 := &SPT{ - Version: 12368713, - Subject: "hohohoho", - CAName: "I'm malicious CA, nice to meet you", - LogID: 1324123, - CertType: 0x21, - AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 114378, - Signature: generateRandomBytes(), - } - - rpc := &RPC{ - SerialNumber: 1729381, - Subject: "bad domain", - Version: 1729381, - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), - NotBefore: time.Now(), - NotAfter: time.Now(), - CAName: "bad domain", - SignatureAlgorithm: SHA256, - TimeStamp: time.Now(), - PRCSignature: generateRandomBytes(), - CASignature: generateRandomBytes(), - SPTs: []SPT{*spt1, *spt2}, - } - - err := ToJSONFile(rpc, tempFile) - require.NoError(t, err, "Json Struct To File error") - - deserializedSPT, err := JsonFileToRPC(tempFile) - require.NoError(t, err, "Json File To RPC error") - - assert.True(t, deserializedSPT.Equal(rpc), "RPC serialized and deserialized error") -} - -// TestEncodeAndDecodeOfPC: PC -> file -> PC -func TestEncodeAndDecodeOfPC(t *testing.T) { - tempFile := path.Join("./", "pc.json") - defer os.Remove(tempFile) - - spt := SPT{ - Version: 12368713, - Subject: "hohohoho", - CAName: "I'm malicious CA, nice to meet you", - LogID: 1324123, - CertType: 0x21, - AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), - STHSerialNumber: 114378, - Signature: generateRandomBytes(), - } - - policy := Policy{ - TrustedCA: []string{"my CA"}, - } - - pc := SP{ - Policies: policy, - TimeStamp: time.Now(), - Subject: "hihihi", - CAName: "hihihi", - SerialNumber: 1, - CASignature: []byte{1, 4, 2, 1, 4}, - RootCertSignature: []byte{1, 4, 2, 1, 4}, - SPTs: []SPT{spt}, - } - - err := ToJSONFile(&pc, tempFile) - require.NoError(t, err, "Json Struct To File error") - - deserializedPC, err := JsonFileToSP(tempFile) - require.NoError(t, err, "Json File To SPT error") - - assert.True(t, deserializedPC.Equal(pc), "PC serialized and deserialized error") -} - // TestPolicyObjects checks that the structure types in the test cases can be converted to JSON and // back, using the functions ToJSON and FromJSON. // It checks after deserialization that the objects are equal. func TestPolicyObjects(t *testing.T) { - cases := []struct { + cases := map[string]struct { data any }{ - { + "rpcPtr": { data: randomRPC(), }, - { + "rpcValue": { data: *randomRPC(), }, - { + "rcsr": { data: randomRCSR(), }, - { + "sp": { data: randomSP(), }, - { + "spt": { + data: *randomSPT(), + }, + "list": { data: []any{ randomRPC(), randomRCSR(), @@ -173,7 +43,7 @@ func TestPolicyObjects(t *testing.T) { randomLogRootV1(), }, }, - { + "list_embedded": { data: []any{ randomRPC(), []any{ @@ -186,16 +56,30 @@ func TestPolicyObjects(t *testing.T) { }, }, }, + "multiListPtr": { + data: &[]any{ + randomRPC(), + *randomRPC(), + []any{ + randomSP(), + *randomSP(), + &[]any{ + randomSPT(), + *randomSPT(), + }, + }, + }, + }, } - for i, tc := range cases { - i, tc := i, tc - t.Run(fmt.Sprintf("case_%d", i), func(t *testing.T) { + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { t.Parallel() // Serialize. data, err := ToJSON(tc.data) require.NoError(t, err) // Deserialize. - deserialized, err := FromJSON(data) + deserialized, err := FromJSON(data, WithSkipCopyJSONIntoPolicyObjects) require.NoError(t, err) // Compare. require.Equal(t, tc.data, deserialized) @@ -203,6 +87,88 @@ func TestPolicyObjects(t *testing.T) { } } +// TestPolicyObjectBaseRaw checks that the Raw field of the PolicyObjectBase for any PolicyObject +// that is rebuilt using our functions contains the original JSON. +func TestPolicyObjectBaseRaw(t *testing.T) { + // Empty RPC to JSON. + testCases := map[string]struct { + obj any // Thing to serialize and deserialize and check Raw. + rawElemsCount int // Expected number of Raw elements inside. + getRawElemsFcn func(obj any) [][]byte // Return the Raw components of this thing. + }{ + "rpc": { + obj: randomRPC(), + rawElemsCount: 1, + getRawElemsFcn: func(obj any) [][]byte { + rpc := obj.(*RPC) + return [][]byte{rpc.RawJSON} + }, + }, + "spPtr": { + obj: randomSP(), + rawElemsCount: 1, + getRawElemsFcn: func(obj any) [][]byte { + sp := obj.(*SP) + return [][]byte{sp.RawJSON} + }, + }, + "spValue": { + obj: *randomSP(), + rawElemsCount: 1, + getRawElemsFcn: func(obj any) [][]byte { + sp := obj.(SP) + return [][]byte{sp.RawJSON} + }, + }, + "list": { + obj: []any{ + randomSP(), + randomRPC(), + }, + rawElemsCount: 2, + getRawElemsFcn: func(obj any) [][]byte { + l := obj.([]any) + return [][]byte{ + l[0].(*SP).RawJSON, + l[1].(*RPC).RawJSON, + } + }, + }, + } + for name, tc := range testCases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + // Serialize. + data, err := ToJSON(tc.obj) + require.NoError(t, err) + // Deserialize. + obj, err := FromJSON(data) + require.NoError(t, err) + t.Logf("This object is of type %T", obj) + raws := tc.getRawElemsFcn(obj) + require.Len(t, raws, tc.rawElemsCount) + // Log facts about this object for debug purposes in case the test fails. + allRaw := make([]string, tc.rawElemsCount) + for i, raw := range raws { + allRaw[i] = string(raw) + } + t.Logf("This object has this JSON:\n----------\n%s\n----------", + strings.Join(allRaw, "")) + // Each one of the raw bytes should be a substring of the JSON data, in order. + offset := 0 + for i, raw := range raws { + require.NotEmpty(t, raw, "bad raw JSON for subelement %d", i) + idx := bytes.Index(data[offset:], raw) // if not found, -1 is returned + require.GreaterOrEqual(t, idx, 0) + offset = idx + } + // We could check that the complete JSON is an aggregation of the elements' JSON plus + // maybe some "list" indicator (sometimes). + }) + } +} + func randomTrillianProof() *trillian.Proof { return &trillian.Proof{ LeafIndex: 1, diff --git a/pkg/common/structure.go b/pkg/common/structure.go index aeab3618..f2ec7b8e 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -12,13 +12,15 @@ type PolicyObject interface { __PolicyObjectMarkerMethod() } -// PolicyObjectList is a list of PolicyObject's, which is a PolicyObject in and of itself. -type PolicyObjectList []PolicyObject +type PolicyObjectBase struct { + RawJSON []byte `json:"-"` // omit from JSON (un)marshaling +} -func (PolicyObjectList) __PolicyObjectMarkerMethod() {} +func (PolicyObjectBase) __PolicyObjectMarkerMethod() {} // root certificate signing request type RCSR struct { + PolicyObjectBase Subject string `json:",omitempty"` Version int `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` @@ -29,10 +31,9 @@ type RCSR struct { Signature []byte `json:",omitempty"` } -func (RCSR) __PolicyObjectMarkerMethod() {} - // root policy certificate type RPC struct { + PolicyObjectBase SerialNumber int `json:",omitempty"` Subject string `json:",omitempty"` Version int `json:",omitempty"` @@ -48,17 +49,15 @@ type RPC struct { SPTs []SPT `json:",omitempty"` } -func (RPC) __PolicyObjectMarkerMethod() {} - // PCRevocation is for now empty. type PCRevocation struct { + PolicyObjectBase // TODO(juagargi) define the revocation. } -func (PCRevocation) __PolicyObjectMarkerMethod() {} - // signed policy timestamp type SPT struct { + PolicyObjectBase Version int `json:",omitempty"` Subject string `json:",omitempty"` CAName string `json:",omitempty"` @@ -71,18 +70,15 @@ type SPT struct { Signature []byte `json:",omitempty"` } -func (SPT) __PolicyObjectMarkerMethod() {} - // signed policy revocation timestamp type SPRT struct { SPT Reason int `json:",omitempty"` } -func (SPRT) __PolicyObjectMarkerMethod() {} - // Signed Policy type SP struct { + PolicyObjectBase Policies Policy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` Subject string `json:",omitempty"` @@ -93,18 +89,15 @@ type SP struct { SPTs []SPT `json:",omitempty"` } -func (SP) __PolicyObjectMarkerMethod() {} - // Policy Signing Request type PSR struct { + PolicyObjectBase Policies Policy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` DomainName string `json:",omitempty"` RootCertSignature []byte `json:",omitempty"` } -func (PSR) __PolicyObjectMarkerMethod() {} - // Domain policy type Policy struct { TrustedCA []string `json:",omitempty"` From 359514051befbc700821fe0f2a72880ef20cfd1a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 22:37:20 +0200 Subject: [PATCH 099/187] Extending usability of policy objects. --- pkg/common/structure.go | 2 ++ pkg/db/mysql/mysql_test.go | 7 ++----- pkg/mapserver/responder/responder_test.go | 8 ++------ pkg/mapserver/updater/updater.go | 6 +++++- pkg/util/io.go | 19 +++++++++++++------ pkg/util/types.go | 6 +++++- 6 files changed, 29 insertions(+), 19 deletions(-) diff --git a/pkg/common/structure.go b/pkg/common/structure.go index f2ec7b8e..4fdb99a2 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -10,6 +10,7 @@ import ( // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. type PolicyObject interface { __PolicyObjectMarkerMethod() + Raw() []byte } type PolicyObjectBase struct { @@ -17,6 +18,7 @@ type PolicyObjectBase struct { } func (PolicyObjectBase) __PolicyObjectMarkerMethod() {} +func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } // root certificate signing request type RCSR struct { diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 891eb089..b3676cfd 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -52,13 +52,10 @@ func TestCoalesceForDirtyDomains(t *testing.T) { // Ingest two mock policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) - objs, err := util.LoadPoliciesFromRaw(data) - require.NoError(t, err) - sps, err := util.ToTypedSlice[*common.SP](objs) + pols, polIDs, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) var expirations []*time.Time - require.Equal(t, len(objs), len(sps)) - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, certNames, expirations, [][]byte{}, nil) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, certNames, expirations, pols, polIDs) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 53ad2edd..3105bab5 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/domain" @@ -55,13 +54,10 @@ func TestProofWithPoP(t *testing.T) { // Ingest two policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) - objs, err := util.LoadPoliciesFromRaw(data) - require.NoError(t, err) - sps, err := util.ToTypedSlice[*common.SP](objs) + pols, polIDs, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) var expirations []*time.Time - require.Equal(t, len(objs), len(sps)) - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, names, expirations, [][]byte{}) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, names, expirations, pols, polIDs) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 2ad0f532..159b8323 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -284,8 +284,12 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st // UpdatePoliciesWithKeepExisting takes a sequence of policies, the aliases associated with each one, and the // expiration times, and updates the DB with them. func UpdatePoliciesWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, policies [][]byte, policyIDs []*common.SHA256Output) error { + expirations []*time.Time, policies []common.PolicyObject, policyIDs []*common.SHA256Output) error { + payloads := make([][]byte, len(policies)) + for i, pol := range policies { + payloads[i] = pol.Raw() + } // deleteme // TODO // TODO(juagargi) do it diff --git a/pkg/util/io.go b/pkg/util/io.go index 50aeeaf6..09bb37e8 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -5,7 +5,6 @@ import ( "compress/gzip" "encoding/base64" "encoding/csv" - "fmt" "io/ioutil" "os" "strings" @@ -118,17 +117,25 @@ func LoadCertsAndChainsFromCSV( // LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their // serialized form. -func LoadPoliciesFromRaw(b []byte) ([]any, error) { +func LoadPoliciesFromRaw(b []byte) ([]common.PolicyObject, []*common.SHA256Output, error) { obj, err := common.FromJSON(b) if err != nil { - return nil, err + return nil, nil, err } // The returned object should be of type list. - if list, ok := obj.([]any); ok { - return list, nil + pols, err := ToTypedSlice[common.PolicyObject](obj) + if err != nil { + return nil, nil, err } - return nil, fmt.Errorf("the content is of type %T instead of []any", obj) + ids := make([]*common.SHA256Output, len(pols)) + for i, pol := range pols { + id := common.SHA256Hash32Bytes(pol.Raw()) + ids[i] = &id + } + + return pols, ids, nil + } // parseCertFromCSVField takes a row from a CSV encoding certs and chains in base64 and returns diff --git a/pkg/util/types.go b/pkg/util/types.go index a7b84c95..48e5e756 100644 --- a/pkg/util/types.go +++ b/pkg/util/types.go @@ -2,7 +2,11 @@ package util import "fmt" -func ToTypedSlice[T any](s []any) ([]T, error) { +func ToTypedSlice[T any](obj any) ([]T, error) { + s, ok := obj.([]any) + if !ok { + return nil, fmt.Errorf("the content is of type %T instead of []any", obj) + } t := make([]T, len(s)) for i, e := range s { if te, ok := e.(T); ok { From e40a9217222c2ac16154c79d1ffe53784174c618 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 22:45:27 +0200 Subject: [PATCH 100/187] PolicyObjectBase has a Subject. --- pkg/common/crypto.go | 8 +++- pkg/common/crypto_test.go | 20 ++++++--- pkg/common/structure.go | 6 +-- pkg/common/structure_test.go | 66 +++++++++++++++++++--------- pkg/mapserver/logpicker/logpicker.go | 8 +++- 5 files changed, 72 insertions(+), 36 deletions(-) diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index 576f82c9..d7add77f 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -128,7 +128,9 @@ func RCSRVerifyRPCSignature(rcsr *RCSR, rpc *RPC) error { // RCSRGenerateRPC: called by PCA. Sign the RCSR and generate RPC; SPT field is (should be) empty func RCSRGenerateRPC(rcsr *RCSR, notBefore time.Time, serialNumber int, caPrivKey *rsa.PrivateKey, caName string) (*RPC, error) { rpc := &RPC{ - Subject: rcsr.Subject, + PolicyObjectBase: PolicyObjectBase{ + Subject: rcsr.Subject, + }, Version: rcsr.Version, PublicKeyAlgorithm: rcsr.PublicKeyAlgorithm, PublicKey: rcsr.PublicKey, @@ -219,8 +221,10 @@ func VerifyPSRUsingRPC(psr *PSR, rpc *RPC) error { // CAVerifySPAndSign: verify the signature and sign the signature func CASignSP(psr *PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) (*SP, error) { sp := &SP{ + PolicyObjectBase: PolicyObjectBase{ + Subject: psr.DomainName, + }, Policies: psr.Policies, - Subject: psr.DomainName, RootCertSignature: psr.RootCertSignature, TimeStamp: time.Now(), CAName: caName, diff --git a/pkg/common/crypto_test.go b/pkg/common/crypto_test.go index 5c7b44ea..4c2b89a5 100644 --- a/pkg/common/crypto_test.go +++ b/pkg/common/crypto_test.go @@ -15,7 +15,9 @@ func TestSignatureOfRCSR(t *testing.T) { require.NoError(t, err, "load RSA key error") test := &RCSR{ - Subject: "this is a test", + PolicyObjectBase: PolicyObjectBase{ + Subject: "this is a test", + }, Version: 44, TimeStamp: time.Now(), PublicKeyAlgorithm: RSA, @@ -45,7 +47,9 @@ func TestIssuanceOfRPC(t *testing.T) { require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &RCSR{ - Subject: "this is a test", + PolicyObjectBase: PolicyObjectBase{ + Subject: "this is a test", + }, Version: 44, TimeStamp: time.Now(), PublicKeyAlgorithm: RSA, @@ -97,7 +101,9 @@ func TestIssuanceOfSP(t *testing.T) { require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &RCSR{ - Subject: "this is a test", + PolicyObjectBase: PolicyObjectBase{ + Subject: "this is a test", + }, Version: 44, TimeStamp: time.Now(), PublicKeyAlgorithm: RSA, @@ -158,9 +164,11 @@ func TestIssuanceOfSP(t *testing.T) { require.NoError(t, err, "VerifyCASigInSP error") } -//------------------------------------------------------------- -// funcs for testing -//------------------------------------------------------------- +// ------------------------------------------------------------- +// +// funcs for testing +// +// ------------------------------------------------------------- func generateRandomBytes() []byte { token := make([]byte, 40) rand.Read(token) diff --git a/pkg/common/structure.go b/pkg/common/structure.go index 4fdb99a2..9107946b 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -15,6 +15,7 @@ type PolicyObject interface { type PolicyObjectBase struct { RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + Subject string `json:",omitempty"` } func (PolicyObjectBase) __PolicyObjectMarkerMethod() {} @@ -23,7 +24,6 @@ func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } // root certificate signing request type RCSR struct { PolicyObjectBase - Subject string `json:",omitempty"` Version int `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` @@ -37,7 +37,6 @@ type RCSR struct { type RPC struct { PolicyObjectBase SerialNumber int `json:",omitempty"` - Subject string `json:",omitempty"` Version int `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` @@ -61,7 +60,6 @@ type PCRevocation struct { type SPT struct { PolicyObjectBase Version int `json:",omitempty"` - Subject string `json:",omitempty"` CAName string `json:",omitempty"` LogID int `json:",omitempty"` CertType uint8 `json:",omitempty"` @@ -83,7 +81,6 @@ type SP struct { PolicyObjectBase Policies Policy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - Subject string `json:",omitempty"` CAName string `json:",omitempty"` SerialNumber int `json:",omitempty"` CASignature []byte `json:",omitempty"` @@ -93,7 +90,6 @@ type SP struct { // Policy Signing Request type PSR struct { - PolicyObjectBase Policies Policy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` DomainName string `json:",omitempty"` diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index 29cb437d..b8fbadc1 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -26,7 +26,9 @@ func TestGenerateGoldenFiles(t *testing.T) { // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { rcsr := &RCSR{ - Subject: "bandqhvdbdlwnd", + PolicyObjectBase: PolicyObjectBase{ + Subject: "bandqhvdbdlwnd", + }, Version: 6789, TimeStamp: time.Now(), PublicKeyAlgorithm: RSA, @@ -39,8 +41,10 @@ func TestEqual(t *testing.T) { assert.True(t, rcsr.Equal(rcsr), "RCSR Equal() error") spt1 := SPT{ - Version: 12313, - Subject: "hihihihihhi", + Version: 12313, + PolicyObjectBase: PolicyObjectBase{ + Subject: "hihihihihhi", + }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, CertType: 0x11, @@ -52,8 +56,10 @@ func TestEqual(t *testing.T) { } spt2 := SPT{ - Version: 12368713, - Subject: "hohohoho", + Version: 12368713, + PolicyObjectBase: PolicyObjectBase{ + Subject: "hohohoho", + }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, CertType: 0x21, @@ -68,8 +74,10 @@ func TestEqual(t *testing.T) { sprt := &SPRT{ SPT: SPT{ - Version: 12314, - Subject: "bad domain", + Version: 12314, + PolicyObjectBase: PolicyObjectBase{ + Subject: "bad domain", + }, CAName: "I'm malicious CA, nice to meet you", LogID: 1729381, CertType: 0x21, @@ -85,8 +93,10 @@ func TestEqual(t *testing.T) { assert.True(t, sprt.Equal(sprt), "SPRT Equal() error") rpc := &RPC{ - SerialNumber: 1729381, - Subject: "bad domain", + SerialNumber: 1729381, + PolicyObjectBase: PolicyObjectBase{ + Subject: "bad domain", + }, Version: 1729381, PublicKeyAlgorithm: RSA, PublicKey: generateRandomBytes(), @@ -106,8 +116,10 @@ func TestEqual(t *testing.T) { // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) func TestJsonReadWrite(t *testing.T) { spt1 := &SPT{ - Version: 12313, - Subject: "hihihihihhi", + Version: 12313, + PolicyObjectBase: PolicyObjectBase{ + Subject: "hihihihihhi", + }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, CertType: 0x11, @@ -119,8 +131,10 @@ func TestJsonReadWrite(t *testing.T) { } spt2 := &SPT{ - Version: 12368713, - Subject: "hohohoho", + Version: 12368713, + PolicyObjectBase: PolicyObjectBase{ + Subject: "hohohoho", + }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, CertType: 0x21, @@ -132,8 +146,10 @@ func TestJsonReadWrite(t *testing.T) { } rpc := &RPC{ - SerialNumber: 1729381, - Subject: "bad domain", + SerialNumber: 1729381, + PolicyObjectBase: PolicyObjectBase{ + Subject: "bad domain", + }, Version: 1729381, PublicKeyAlgorithm: RSA, PublicKey: generateRandomBytes(), @@ -160,8 +176,10 @@ func TestJsonReadWrite(t *testing.T) { func randomRPC() *RPC { return &RPC{ - SerialNumber: 1729381, - Subject: "RPC CA", + SerialNumber: 1729381, + PolicyObjectBase: PolicyObjectBase{ + Subject: "RPC CA", + }, Version: 1729381, PublicKeyAlgorithm: RSA, PublicKey: generateRandomBytes(), @@ -178,7 +196,9 @@ func randomRPC() *RPC { func randomRCSR() *RCSR { return &RCSR{ - Subject: "subject", + PolicyObjectBase: PolicyObjectBase{ + Subject: "subject", + }, Version: 6789, TimeStamp: nowWithoutMonotonic(), PublicKeyAlgorithm: RSA, @@ -194,8 +214,10 @@ func randomSP() *SP { Policies: Policy{ TrustedCA: []string{"ca1", "ca2"}, }, - TimeStamp: nowWithoutMonotonic(), - Subject: "domainname.com", + TimeStamp: nowWithoutMonotonic(), + PolicyObjectBase: PolicyObjectBase{ + Subject: "domainname.com", + }, CAName: "ca1", SerialNumber: rand.Int(), CASignature: generateRandomBytes(), @@ -210,8 +232,10 @@ func randomSP() *SP { func randomSPT() *SPT { return &SPT{ + PolicyObjectBase: PolicyObjectBase{ + Subject: "hohohoho", + }, Version: 12368713, - Subject: "hohohoho", CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, CertType: 0x21, diff --git a/pkg/mapserver/logpicker/logpicker.go b/pkg/mapserver/logpicker/logpicker.go index bd51c7e1..d4f7ffeb 100644 --- a/pkg/mapserver/logpicker/logpicker.go +++ b/pkg/mapserver/logpicker/logpicker.go @@ -246,13 +246,17 @@ func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int continue } resultPC = append(resultPC, &common.SP{ - Subject: domainName, + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, TimeStamp: time.Now(), CASignature: generateRandomBytes(), }) resultRPC = append(resultRPC, &common.RPC{ - Subject: domainName, + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, NotBefore: time.Now(), }) } From 81845fa232b5d9c081d702880909ff097ff1278f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 3 May 2023 23:07:05 +0200 Subject: [PATCH 101/187] Inserting policies. --- pkg/common/structure.go | 2 + pkg/db/db.go | 12 ++++-- pkg/db/mysql/mysql.go | 50 ++++++++++++++++++++++- pkg/db/mysql/mysql_test.go | 5 +-- pkg/mapserver/responder/responder_test.go | 5 +-- pkg/mapserver/updater/updater.go | 39 ++++++++++++++---- pkg/tests/testdb/mockdb_for_testing.go | 4 ++ pkg/util/io.go | 14 ++----- tools/create_schema.sh | 33 ++++++++++++++- 9 files changed, 134 insertions(+), 30 deletions(-) diff --git a/pkg/common/structure.go b/pkg/common/structure.go index 9107946b..b4bac0c7 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -11,6 +11,7 @@ import ( type PolicyObject interface { __PolicyObjectMarkerMethod() Raw() []byte + Domain() string } type PolicyObjectBase struct { @@ -20,6 +21,7 @@ type PolicyObjectBase struct { func (PolicyObjectBase) __PolicyObjectMarkerMethod() {} func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } +func (o PolicyObjectBase) Domain() string { return o.Subject } // root certificate signing request type RCSR struct { diff --git a/pkg/db/db.go b/pkg/db/db.go index e5934baf..90e2e88d 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -44,14 +44,18 @@ type Conn interface { RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( payloadID *common.SHA256Output, payload []byte, err error) - ////////////////////////////////////////////////////////////////// - // check if the functions below are needed after the new design // - ////////////////////////////////////////////////////////////////// - // CheckCertsExist returns a slice of true/false values. Each value indicates if // the corresponding certificate identified by its ID is already present in the DB. CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) + // CheckPoliciesExist returns a slice of true/false values. Each value indicates if + // the corresponding policy identified by its ID is already present in the DB. + CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) + + ////////////////////////////////////////////////////////////////// + // check if the functions below are needed after the new design // + ////////////////////////////////////////////////////////////////// + InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 859c3005..372bb2bb 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -186,8 +186,54 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + "SELECT (CASE WHEN certs.cert_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + strings.Join(elems, " UNION ALL ") + - ") AS request left JOIN ( SELECT cert_id FROM certs ) AS certs ON certs.cert_id = request.cert_id" + - ") AS t" + ") AS request LEFT JOIN ( SELECT cert_id FROM certs ) AS certs ON " + + "certs.cert_id = request.cert_id) AS t" + + // Return slice of booleans: + present := make([]bool, len(ids)) + + var value string + if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { + return nil, err + } + for i, c := range value { + if c == '1' { + present[i] = true + } + } + + return present, nil +} + +// CheckPoliciesExist returns a slice of true/false values. Each value indicates if +// the corresponding certificate identified by its ID is already present in the DB. +func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ( + []bool, error) { + + if len(ids) == 0 { + // If empty, return empty. + return nil, nil + } + // Slice to be used in the SQL query: + data := make([]interface{}, len(ids)) + for i, id := range ids { + data[i] = id[:] + } + + // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. + elems := make([]string, len(data)) + for i := range elems { + elems[i] = "SELECT ? AS policy_id" + } + + // The query means: join two tables, one with the values I am passing as arguments (those + // are the ids) and the policies table, and for those that exist write a 1, otherwise a 0. + // Finally, group_concat all rows into just one field of type string. + str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + + "SELECT (CASE WHEN policies.policy_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + + strings.Join(elems, " UNION ALL ") + + ") AS request LEFT JOIN ( SELECT policy_id FROM policies ) AS policies ON " + + "policies.policy_id = request.policy_id) AS t" // Return slice of booleans: present := make([]bool, len(ids)) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index b3676cfd..99f7f484 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -52,10 +52,9 @@ func TestCoalesceForDirtyDomains(t *testing.T) { // Ingest two mock policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) - pols, polIDs, err := util.LoadPoliciesFromRaw(data) + pols, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) - var expirations []*time.Time - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, certNames, expirations, pols, polIDs) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, pols) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 3105bab5..70040c8b 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -54,10 +54,9 @@ func TestProofWithPoP(t *testing.T) { // Ingest two policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) - pols, polIDs, err := util.LoadPoliciesFromRaw(data) + pols, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) - var expirations []*time.Time - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, names, expirations, pols, polIDs) + err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, pols) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 159b8323..d81a342c 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -283,17 +283,33 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st // UpdatePoliciesWithKeepExisting takes a sequence of policies, the aliases associated with each one, and the // expiration times, and updates the DB with them. -func UpdatePoliciesWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, policies []common.PolicyObject, policyIDs []*common.SHA256Output) error { +func UpdatePoliciesWithKeepExisting(ctx context.Context, conn db.Conn, + policies []common.PolicyObject) error { payloads := make([][]byte, len(policies)) + IDs := make([]*common.SHA256Output, len(policies)) + names := make([]string, len(policies)) for i, pol := range policies { payloads[i] = pol.Raw() + id := common.SHA256Hash32Bytes(pol.Raw()) + IDs[i] = &id + names[i] = pol.Domain() } - // deleteme - // TODO - // TODO(juagargi) do it - return nil + + // Check which policies are already present in the DB. + mask, err := conn.CheckPoliciesExist(ctx, IDs) + if err != nil { + return err + } + n := runWhenFalse(mask, func(to, from int) { + IDs[to] = IDs[from] + payloads[to] = payloads[from] + names[to] = names[from] + }) + IDs = IDs[:n] + payloads = payloads[:n] + names = names[:n] + return insertPolicies(ctx, conn, names, IDs, payloads) } func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { @@ -408,7 +424,15 @@ func insertCerts(ctx context.Context, conn db.Conn, names [][]string, return nil } -func runWhenFalse(mask []bool, fcn func(to, from int)) { +func insertPolicies(ctx context.Context, conn db.Conn, names []string, ids []*common.SHA256Output, + payloads [][]byte) error { + + return nil +} + +// runWhenFalse serves as a function to "move" content when the element in mask is true. +// Returns the number of false entries. +func runWhenFalse(mask []bool, fcn func(to, from int)) int { to := 0 for from, condition := range mask { if !condition { @@ -416,4 +440,5 @@ func runWhenFalse(mask []bool, fcn func(to, from int)) { to++ } } + return to } diff --git a/pkg/tests/testdb/mockdb_for_testing.go b/pkg/tests/testdb/mockdb_for_testing.go index 06dd51da..39c7aca7 100644 --- a/pkg/tests/testdb/mockdb_for_testing.go +++ b/pkg/tests/testdb/mockdb_for_testing.go @@ -45,6 +45,10 @@ func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output return make([]bool, len(ids)), nil } +func (d *MockDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { + return make([]bool, len(ids)), nil +} + func (d *MockDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { diff --git a/pkg/util/io.go b/pkg/util/io.go index 09bb37e8..41b9d124 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -117,24 +117,18 @@ func LoadCertsAndChainsFromCSV( // LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their // serialized form. -func LoadPoliciesFromRaw(b []byte) ([]common.PolicyObject, []*common.SHA256Output, error) { +func LoadPoliciesFromRaw(b []byte) ([]common.PolicyObject, error) { obj, err := common.FromJSON(b) if err != nil { - return nil, nil, err + return nil, err } // The returned object should be of type list. pols, err := ToTypedSlice[common.PolicyObject](obj) if err != nil { - return nil, nil, err - } - - ids := make([]*common.SHA256Output, len(pols)) - for i, pol := range pols { - id := common.SHA256Hash32Bytes(pol.Raw()) - ids[i] = &id + return nil, err } - return pols, ids, nil + return pols, nil } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index e86a16fd..3bb48814 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -49,7 +49,6 @@ EOF echo "$CMD" | $MYSQLCMD - CMD=$(cat < Date: Thu, 4 May 2023 09:56:55 +0200 Subject: [PATCH 102/187] WIP inserting policies. --- pkg/db/db.go | 9 +++++--- pkg/db/mysql/mysql.go | 29 ++++++++++++++++++++++++++ pkg/mapserver/updater/updater.go | 15 +++++++++++++ pkg/tests/testdb/mockdb_for_testing.go | 6 ++++++ 4 files changed, 56 insertions(+), 3 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 90e2e88d..dff2c43c 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -52,13 +52,16 @@ type Conn interface { // the corresponding policy identified by its ID is already present in the DB. CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) + InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, + payloads [][]byte) error + + InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error + ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // ////////////////////////////////////////////////////////////////// - InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, - payloads [][]byte) error - // UpdateDomainsWithCerts updates the domains and dirty tables with entries that are // _probably_ not present there. UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 372bb2bb..c195841c 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -280,6 +280,35 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 return nil } +func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { + + if len(ids) == 0 { + return nil + } + // TODO(juagargi) set a prepared statement in constructor + // Because the primary key is the SHA256 of the payload, if there is a clash, it must + // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. + const N = 4 + str := "REPLACE INTO policies (policy_id, parent_id, expiration, payload) VALUES " + + repeatStmt(len(ids), N) + data := make([]interface{}, N*len(ids)) + for i := range ids { + data[i*N] = ids[i][:] + if parents[i] != nil { + data[i*N+1] = parents[i][:] + } + data[i*N+2] = expirations[i] + data[i*N+3] = payloads[i] + } + _, err := c.db.Exec(str, data...) + if err != nil { + return err + } + + return nil +} + // UpdateDomainsWithCerts updates both the domains and the dirty tables. func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, domainNames []string) error { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index d81a342c..69a4d371 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -427,6 +427,21 @@ func insertCerts(ctx context.Context, conn db.Conn, names [][]string, func insertPolicies(ctx context.Context, conn db.Conn, names []string, ids []*common.SHA256Output, payloads [][]byte) error { + // TODO(juagargi) use parent IDs for the policies + + // Create a sequence of nil parent ids. + parents := make([]*common.SHA256Output, len(ids)) + + // Create a sequence of expiration times way in the future. + expirations := make([]*time.Time, len(ids)) + for i := range expirations { + t := time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC) // TODO(juagargi) use real expirations. + expirations[i] = &t + } + if err := conn.InsertPolicies(ctx, ids, parents, expirations, payloads); err != nil { + return fmt.Errorf("inserting policies: %w", err) + } + return nil } diff --git a/pkg/tests/testdb/mockdb_for_testing.go b/pkg/tests/testdb/mockdb_for_testing.go index 39c7aca7..e15667e4 100644 --- a/pkg/tests/testdb/mockdb_for_testing.go +++ b/pkg/tests/testdb/mockdb_for_testing.go @@ -55,6 +55,12 @@ func (d *MockDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256O return nil } +func (d *MockDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { + + return nil +} + func (d *MockDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, domainNames []string) error { From 07b67d3865e260608a2c711da4f665e07f425e42 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 11:23:06 +0200 Subject: [PATCH 103/187] Updater modifies certs and policies simultaneously. --- cmd/ingest/certProcessor.go | 13 +-- pkg/db/db.go | 14 ++- pkg/db/mysql/mysql.go | 110 +++++++++++-------- pkg/db/mysql/mysql_test.go | 10 +- pkg/mapserver/responder/responder_test.go | 14 +-- pkg/mapserver/updater/updater.go | 126 ++++++++++++++-------- pkg/tests/testdb/mockdb_for_testing.go | 14 ++- 7 files changed, 188 insertions(+), 113 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 111d1812..74aacdce 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -77,8 +77,9 @@ const ( CertificateUpdateKeepExisting CertificateUpdateStrategy = 1 ) -type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*time.Time, - []*ctx509.Certificate, []*common.SHA256Output, []*common.SHA256Output) error +type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, + []*common.SHA256Output, []*common.SHA256Output, []*ctx509.Certificate, []*time.Time, + []common.PolicyObject) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { @@ -87,9 +88,9 @@ func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, var updateFcn UpdateCertificateFunction switch strategy { case CertificateUpdateOverwrite: - updateFcn = updater.UpdateCertsWithOverwrite + updateFcn = updater.UpdateWithOverwrite case CertificateUpdateKeepExisting: - updateFcn = updater.UpdateCertsWithKeepExisting + updateFcn = updater.UpdateWithKeepExisting default: panic(fmt.Errorf("invalid strategy %v", strategy)) } @@ -237,8 +238,8 @@ func (p *CertificateProcessor) createBatches() { func (p *CertificateProcessor) processBatch(batch *CertBatch) { // Store certificates in DB: - err := p.updateCertBatch(context.Background(), p.conn, batch.Names, batch.Expirations, - batch.Certs, batch.CertIDs, batch.ParentIDs) + err := p.updateCertBatch(context.Background(), p.conn, batch.Names, + batch.CertIDs, batch.ParentIDs, batch.Certs, batch.Expirations, nil) if err != nil { panic(err) } diff --git a/pkg/db/db.go b/pkg/db/db.go index dff2c43c..ac3ce4d3 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -58,15 +58,19 @@ type Conn interface { InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error + // UpdateDomains updates the domains and dirty tables. + UpdateDomains(ctx context.Context, domainIDs []*common.SHA256Output, domainNames []string) error + + // UpdateDomainCerts updates the domain_certs table with new entries. + UpdateDomainCerts(ctx context.Context, domainIDs, certIDs []*common.SHA256Output) error + + // UpdateDomainPolicies updates the domain_policies table with new entries. + UpdateDomainPolicies(ctx context.Context, domainIDs, policyIDs []*common.SHA256Output) error + ////////////////////////////////////////////////////////////////// // check if the functions below are needed after the new design // ////////////////////////////////////////////////////////////////// - // UpdateDomainsWithCerts updates the domains and dirty tables with entries that are - // _probably_ not present there. - UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, - domainNames []string) error - // ************************************************************ // Function for Tree table // ************************************************************ diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index c195841c..853d5750 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -309,60 +309,84 @@ func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA return nil } -// UpdateDomainsWithCerts updates both the domains and the dirty tables. -func (c *mysqlDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, - domainIDs []*common.SHA256Output, domainNames []string) error { +func (c *mysqlDB) UpdateDomains(ctx context.Context, domainIDs []*common.SHA256Output, + domainNames []string) error { - if len(certIDs) == 0 { + if len(domainIDs) == 0 { return nil } - // First insert into domains. Find out which domain IDs are unique, and attach the - // corresponding name to them. - { - uniqueDomainIDs := make(map[common.SHA256Output]string) - for i, id := range domainIDs { - uniqueDomainIDs[*id] = domainNames[i] - } + // Make the list of domains unique, attach the name to each unique ID. + domainIDsSet := make(map[common.SHA256Output]string) + for i, id := range domainIDs { + domainIDsSet[*id] = domainNames[i] + } - str := "INSERT IGNORE INTO domains (domain_id,domain_name) VALUES " + - repeatStmt(len(uniqueDomainIDs), 2) + // Insert into dirty. + str := "REPLACE INTO dirty (domain_id) VALUES " + repeatStmt(len(domainIDsSet), 1) + data := make([]interface{}, len(domainIDsSet)) + i := 0 + for k := range domainIDsSet { + data[i] = k[:] + i++ + } + _, err := c.db.ExecContext(ctx, str, data...) + if err != nil { + return err + } - data := make([]interface{}, 2*len(uniqueDomainIDs)) - i := 0 - for k, v := range uniqueDomainIDs { - data[2*i] = append([]byte{}, k[:]...) - data[2*i+1] = v - i++ - } - _, err := c.db.ExecContext(ctx, str, data...) - if err != nil { - return err - } + // Insert into domains. + str = "INSERT IGNORE INTO domains (domain_id,domain_name) VALUES " + + repeatStmt(len(domainIDsSet), 2) + data = make([]interface{}, 2*len(domainIDsSet)) + i = 0 + for k, v := range domainIDsSet { + data[2*i] = append([]byte{}, k[:]...) + data[2*i+1] = v + i++ } + _, err = c.db.ExecContext(ctx, str, data...) - // Now insert into the domain_certs: - { - str := "INSERT IGNORE INTO domain_certs (domain_id,cert_id) VALUES " + - repeatStmt(len(certIDs), 2) - data := make([]interface{}, 2*len(certIDs)) - for i := range certIDs { - data[2*i] = domainIDs[i][:] - data[2*i+1] = certIDs[i][:] - } - _, err := c.db.Exec(str, data...) - if err != nil { - return err - } + return err +} + +// UpdateDomainCerts updates the domain_certs table. +func (c *mysqlDB) UpdateDomainCerts(ctx context.Context, + domainIDs, certIDs []*common.SHA256Output) error { + + if len(domainIDs) == 0 { + return nil } + // Insert into domain_certs: + str := "INSERT IGNORE INTO domain_certs (domain_id,cert_id) VALUES " + + repeatStmt(len(certIDs), 2) + data := make([]interface{}, 2*len(certIDs)) + for i := range certIDs { + data[2*i] = domainIDs[i][:] + data[2*i+1] = certIDs[i][:] + } + _, err := c.db.ExecContext(ctx, str, data...) - // Now insert into dirty. - str := "REPLACE INTO dirty (domain_id) VALUES " + repeatStmt(len(domainIDs), 1) - data := make([]interface{}, len(domainIDs)) - for i, id := range domainIDs { - data[i] = id[:] + return err +} + +// UpdateDomainPolicies updates the domain_certs table. +func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, + domainIDs, policyIDs []*common.SHA256Output) error { + + if len(domainIDs) == 0 { + return nil } - _, err := c.db.Exec(str, data...) + // Insert into domain_certs: + str := "INSERT IGNORE INTO domain_policies (domain_id,policy_id) VALUES " + + repeatStmt(len(policyIDs), 2) + data := make([]interface{}, 2*len(policyIDs)) + for i := range policyIDs { + data[2*i] = domainIDs[i][:] + data[2*i+1] = policyIDs[i][:] + } + _, err := c.db.ExecContext(ctx, str, data...) + return err } diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 99f7f484..5b320893 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -43,18 +43,18 @@ func TestCoalesceForDirtyDomains(t *testing.T) { require.NoError(t, err) defer conn.Close() - // Use two mock x509 chains: + // Create two mock x509 chains: certs, certIDs, parentCertIDs, certNames := buildTestCertHierarchy(t) - err = updater.UpdateCertsWithKeepExisting(ctx, conn, certNames, util.ExtractExpirations(certs), - certs, certIDs, parentCertIDs) - require.NoError(t, err) // Ingest two mock policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) pols, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, pols) + + // Update with certificates and policies. + err = updater.UpdateWithKeepExisting(ctx, conn, certNames, certIDs, parentCertIDs, + certs, util.ExtractExpirations(certs), pols) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 70040c8b..21031d83 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -42,21 +42,21 @@ func TestProofWithPoP(t *testing.T) { require.NoError(t, err) defer conn.Close() - // Ingest two certificates and their chains. + // Load two certificates and their chains. raw, err := util.ReadAllGzippedFile("../../../tests/testdata/2-xenon2023.csv.gz") require.NoError(t, err) - certs, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) - require.NoError(t, err) - err = updater.UpdateCertsWithKeepExisting(ctx, conn, names, util.ExtractExpirations(certs), - certs, IDs, parentIDs) + certs, certIDs, parentCertIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) require.NoError(t, err) - // Ingest two policies. + // Load two policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) pols, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) - err = updater.UpdatePoliciesWithKeepExisting(ctx, conn, pols) + + // Ingest those two certificates and two policies. + err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, + util.ExtractExpirations(certs), pols) require.NoError(t, err) // Coalescing of payloads. diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 69a4d371..2090e1ca 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -92,8 +92,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ certChains = append(certChains, chain) } certs, IDs, parentIDs, names := util.UnfoldCerts(certs, certChains) - return UpdateCertsWithKeepExisting(ctx, mapUpdater.dbConn, names, expirations, certs, - IDs, parentIDs) + return UpdateWithKeepExisting(ctx, mapUpdater.dbConn, names, IDs, parentIDs, certs, expirations, nil) } // updateCerts: update the tables and SMT (in memory) using certificates @@ -238,24 +237,46 @@ func (mapUpdater *MapUpdater) Close() error { return mapUpdater.smt.Close() } -func UpdateCertsWithOverwrite(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parentIDs []*common.SHA256Output) error { +func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]string, + certIDs, parentCertIDs []*common.SHA256Output, + certs []*ctx509.Certificate, certExpirations []*time.Time, + policies []common.PolicyObject, +) error { + // Insert all specified certificates. payloads := make([][]byte, len(certs)) for i, c := range certs { payloads[i] = c.Raw } - return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) + err := insertCerts(ctx, conn, domainNames, certIDs, parentCertIDs, certExpirations, payloads) + if err != nil { + return err + } + + // Insert all specified policies. + payloads = make([][]byte, len(policies)) + policyIDs := make([]*common.SHA256Output, len(policies)) + policySubjects := make([]string, len(policies)) + for i, pol := range policies { + payloads[i] = pol.Raw() + id := common.SHA256Hash32Bytes(pol.Raw()) + policyIDs[i] = &id + policySubjects[i] = pol.Domain() + } + err = insertPolicies(ctx, conn, policySubjects, policyIDs, payloads) + + return err } -func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]string, - expirations []*time.Time, certs []*ctx509.Certificate, ids []*common.SHA256Output, - parentIDs []*common.SHA256Output) error { +func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]string, + certIDs, parentCertIDs []*common.SHA256Output, + certs []*ctx509.Certificate, certExpirations []*time.Time, + policies []common.PolicyObject, +) error { // First check which certificates are already present in the DB. - mask, err := conn.CheckCertsExist(ctx, ids) + maskCerts, err := conn.CheckCertsExist(ctx, certIDs) if err != nil { return err } @@ -263,53 +284,50 @@ func UpdateCertsWithKeepExisting(ctx context.Context, conn db.Conn, names [][]st // For all those certificates not already present in the DB, prepare three slices: IDs, // names, payloads, and parentIDs. payloads := make([][]byte, 0, len(certs)) - - // Prepare new parents, IDs and payloads skipping those certificates already in the DB. - runWhenFalse(mask, func(to, from int) { - ids[to] = ids[from] - names[to] = names[from] - parentIDs[to] = parentIDs[from] + runWhenFalse(maskCerts, func(to, from int) { + certIDs[to] = certIDs[from] + domainNames[to] = domainNames[from] + parentCertIDs[to] = parentCertIDs[from] payloads = append(payloads, certs[from].Raw) }) - // Trim the end of the original ID slice, as it contains values from the unmasked certificates. - ids = ids[:len(payloads)] - names = names[:len(payloads)] - parentIDs = parentIDs[:len(payloads)] - - // Only update those certificates that are not in the mask. - return insertCerts(ctx, conn, names, ids, parentIDs, expirations, payloads) -} + certIDs = certIDs[:len(payloads)] + domainNames = domainNames[:len(payloads)] + parentCertIDs = parentCertIDs[:len(payloads)] -// UpdatePoliciesWithKeepExisting takes a sequence of policies, the aliases associated with each one, and the -// expiration times, and updates the DB with them. -func UpdatePoliciesWithKeepExisting(ctx context.Context, conn db.Conn, - policies []common.PolicyObject) error { + // Update those certificates that were not in the mask. + err = insertCerts(ctx, conn, domainNames, certIDs, parentCertIDs, certExpirations, payloads) + if err != nil { + return err + } - payloads := make([][]byte, len(policies)) - IDs := make([]*common.SHA256Output, len(policies)) - names := make([]string, len(policies)) + // Prepare data structures for the policies. + payloads = make([][]byte, len(policies)) + policyIDs := make([]*common.SHA256Output, len(policies)) + policySubjects := make([]string, len(policies)) for i, pol := range policies { payloads[i] = pol.Raw() id := common.SHA256Hash32Bytes(pol.Raw()) - IDs[i] = &id - names[i] = pol.Domain() + policyIDs[i] = &id + policySubjects[i] = pol.Domain() } - // Check which policies are already present in the DB. - mask, err := conn.CheckPoliciesExist(ctx, IDs) + maskPols, err := conn.CheckPoliciesExist(ctx, policyIDs) if err != nil { return err } - n := runWhenFalse(mask, func(to, from int) { - IDs[to] = IDs[from] + n := runWhenFalse(maskPols, func(to, from int) { + policyIDs[to] = policyIDs[from] payloads[to] = payloads[from] - names[to] = names[from] + policySubjects[to] = policySubjects[from] }) - IDs = IDs[:n] + policyIDs = policyIDs[:n] payloads = payloads[:n] - names = names[:n] - return insertPolicies(ctx, conn, names, IDs, payloads) + policySubjects = policySubjects[:n] + // Update those policies that were not in the mask. + err = insertPolicies(ctx, conn, policySubjects, policyIDs, payloads) + + return err } func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { @@ -417,9 +435,12 @@ func insertCerts(ctx context.Context, conn db.Conn, names [][]string, } } // Push the changes of the domains to the DB. - if err := conn.UpdateDomainsWithCerts(ctx, newIDs, domainIDs, newNames); err != nil { + if err := conn.UpdateDomains(ctx, domainIDs, newNames); err != nil { return fmt.Errorf("updating domains: %w", err) } + if err := conn.UpdateDomainCerts(ctx, domainIDs, newIDs); err != nil { + return fmt.Errorf("updating domain_certs: %w", err) + } return nil } @@ -429,19 +450,34 @@ func insertPolicies(ctx context.Context, conn db.Conn, names []string, ids []*co // TODO(juagargi) use parent IDs for the policies - // Create a sequence of nil parent ids. - parents := make([]*common.SHA256Output, len(ids)) + // Push the changes of the domains to the DB. + domainIDs := make([]*common.SHA256Output, len(names)) + for i, name := range names { + domainID := common.SHA256Hash32Bytes([]byte(name)) + domainIDs[i] = &domainID + } + if err := conn.UpdateDomains(ctx, domainIDs, names); err != nil { + return fmt.Errorf("updating domains: %w", err) + } - // Create a sequence of expiration times way in the future. + // Update the policies in the DB, with nil parents and mock expirations. + // Sequence of nil parent ids: + parents := make([]*common.SHA256Output, len(ids)) + // Sequence of expiration times way in the future: expirations := make([]*time.Time, len(ids)) for i := range expirations { t := time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC) // TODO(juagargi) use real expirations. expirations[i] = &t } + // Update policies: if err := conn.InsertPolicies(ctx, ids, parents, expirations, payloads); err != nil { return fmt.Errorf("inserting policies: %w", err) } + if err := conn.UpdateDomainPolicies(ctx, domainIDs, ids); err != nil { + return fmt.Errorf("updating domain_certs: %w", err) + } + return nil } diff --git a/pkg/tests/testdb/mockdb_for_testing.go b/pkg/tests/testdb/mockdb_for_testing.go index e15667e4..c6a41868 100644 --- a/pkg/tests/testdb/mockdb_for_testing.go +++ b/pkg/tests/testdb/mockdb_for_testing.go @@ -61,8 +61,18 @@ func (d *MockDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA2 return nil } -func (d *MockDB) UpdateDomainsWithCerts(ctx context.Context, certIDs, domainIDs []*common.SHA256Output, - domainNames []string) error { +func (d *MockDB) UpdateDomains(context.Context, []*common.SHA256Output, []string) error { + return nil +} + +func (d *MockDB) UpdateDomainCerts(ctx context.Context, + domainIDs, certIDs []*common.SHA256Output) error { + + return nil +} + +func (d *MockDB) UpdateDomainPolicies(ctx context.Context, + domainIDs, policyIDs []*common.SHA256Output) error { return nil } From 17792a70b42a12c32de0e0891b84c346aa4a56b5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 14:00:37 +0200 Subject: [PATCH 104/187] Fix bug and extend UT to cover it. --- pkg/db/mysql/mysql.go | 17 ++++++++----- pkg/db/mysql/mysql_test.go | 51 ++++++++++++++++++++++++++------------ 2 files changed, 46 insertions(+), 22 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 853d5750..1e3ec9a7 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -272,7 +272,7 @@ func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256 data[i*N+2] = expirations[i] data[i*N+3] = payloads[i] } - _, err := c.db.Exec(str, data...) + _, err := c.db.ExecContext(ctx, str, data...) if err != nil { return err } @@ -301,7 +301,7 @@ func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA data[i*N+2] = expirations[i] data[i*N+3] = payloads[i] } - _, err := c.db.Exec(str, data...) + _, err := c.db.ExecContext(ctx, str, data...) if err != nil { return err } @@ -327,6 +327,7 @@ func (c *mysqlDB) UpdateDomains(ctx context.Context, domainIDs []*common.SHA256O data := make([]interface{}, len(domainIDsSet)) i := 0 for k := range domainIDsSet { + k := k // Because k changes during the loop, we need a local copy that doesn't. data[i] = k[:] i++ } @@ -341,7 +342,8 @@ func (c *mysqlDB) UpdateDomains(ctx context.Context, domainIDs []*common.SHA256O data = make([]interface{}, 2*len(domainIDsSet)) i = 0 for k, v := range domainIDsSet { - data[2*i] = append([]byte{}, k[:]...) + k := k + data[2*i] = k[:] data[2*i+1] = v i++ } @@ -393,7 +395,7 @@ func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { // Call the stored procedure with these parameters. str := "CALL calc_some_dirty_domain_payloads(?,?)" - _, err := c.db.Exec(str, firstRow, lastRow) + _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) if err != nil { return fmt.Errorf("aggregating payload for domains: %w", err) } @@ -408,7 +410,10 @@ func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainI str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var certIDsID, certIDs []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) - if err != nil && err != sql.ErrNoRows { + if err != nil { + if err == sql.ErrNoRows { + return nil, nil, nil + } return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) } return (*common.SHA256Output)(certIDsID), certIDs, nil @@ -439,7 +444,7 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { - params[i] = (*id)[:] + params[i] = id[:] } rows, err := c.db.QueryContext(ctx, str, params...) if err != nil { diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 5b320893..6516629c 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -43,8 +43,21 @@ func TestCoalesceForDirtyDomains(t *testing.T) { require.NoError(t, err) defer conn.Close() - // Create two mock x509 chains: - certs, certIDs, parentCertIDs, certNames := buildTestCertHierarchy(t) + leafCerts := []string{ + "leaf.com", + "example.com", + } + var certs []*ctx509.Certificate + var certIDs, parentCertIDs []*common.SHA256Output + var certNames [][]string + for _, leaf := range leafCerts { + // Create two mock x509 chains on top of leaf: + certs2, certIDs2, parentCertIDs2, certNames2 := buildTestCertHierarchy(t, leaf) + certs = append(certs, certs2...) + certIDs = append(certIDs, certIDs2...) + parentCertIDs = append(parentCertIDs, parentCertIDs2...) + certNames = append(certNames, certNames2...) + } // Ingest two mock policies. data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") @@ -61,29 +74,35 @@ func TestCoalesceForDirtyDomains(t *testing.T) { err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) - // Check the certificate coalescing: under leaf.com there must be 4 IDs, for the certs. - domainID := common.SHA256Hash32Bytes([]byte("leaf.com")) - gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) - require.NoError(t, err) - require.Len(t, gotCertIDs, common.SHA256Size*len(certs)) - expectedCertIDs, expectedCertIDsID := glueSortedIDsAndComputeItsID(certIDs) - t.Logf("expectedCertIDs: %s\n", hex.EncodeToString(expectedCertIDs)) - require.Equal(t, expectedCertIDs, gotCertIDs) - require.Equal(t, expectedCertIDsID, gotCertIDsID) + // Check the certificate coalescing: under leaf there must be 4 IDs, for the certs. + for i, leaf := range leafCerts { + domainID := common.SHA256Hash32Bytes([]byte(leaf)) + gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) + require.NoError(t, err) + expectedSize := common.SHA256Size * len(certs) / len(leafCerts) + require.Len(t, gotCertIDs, expectedSize, "bad length, should be %d but it's %d", + expectedSize, len(gotCertIDs)) + // From the certificate IDs, grab the IDs corresponding to this leaf: + N := len(certIDs) / len(leafCerts) // IDs per leaf = total / leaf_count + expectedCertIDs, expectedCertIDsID := glueSortedIDsAndComputeItsID(certIDs[i*N : (i+1)*N]) + t.Logf("expectedCertIDs: %s\n", hex.EncodeToString(expectedCertIDs)) + require.Equal(t, expectedCertIDs, gotCertIDs) + require.Equal(t, expectedCertIDsID, gotCertIDsID) + } } // buildTestCertHierarchy returns the certificates, chains, and names for two mock certificate -// chains: the first chain is leaf.com->c1.com->c0.com , and the second chain is -// leaf.com->c0.com . -func buildTestCertHierarchy(t require.TestingT) ( +// chains: the first chain is domainName->c1.com->c0.com , and the second chain is +// domainName->c0.com . +func buildTestCertHierarchy(t require.TestingT, domainName string) ( certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { // Create all certificates. certs = make([]*ctx509.Certificate, 4) certs[0] = randomX509Cert(t, "c0.com") certs[1] = randomX509Cert(t, "c1.com") - certs[2] = randomX509Cert(t, "leaf.com") - certs[3] = randomX509Cert(t, "leaf.com") + certs[2] = randomX509Cert(t, domainName) + certs[3] = randomX509Cert(t, domainName) // IDs: IDs = make([]*common.SHA256Output, len(certs)) From bbbd1fff0d6dec45f9141f64703cfe9a28f54870 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 14:18:12 +0200 Subject: [PATCH 105/187] Extend DB test to cover policy coalescing. --- pkg/db/mysql/mysql.go | 5 ++++- pkg/db/mysql/mysql_test.go | 38 ++++++++++++++++++++++++++++++++------ 2 files changed, 36 insertions(+), 7 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 1e3ec9a7..96b14979 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -426,7 +426,10 @@ func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID co str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var payloadID, payload []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) - if err != nil && err != sql.ErrNoRows { + if err != nil { + if err == sql.ErrNoRows { + return nil, nil, nil + } return nil, nil, fmt.Errorf("RetrieveDomainPoliciesPayload | %w", err) } return (*common.SHA256Output)(payloadID), payload, nil diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 6516629c..0327aa92 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -89,6 +89,23 @@ func TestCoalesceForDirtyDomains(t *testing.T) { require.Equal(t, expectedCertIDs, gotCertIDs) require.Equal(t, expectedCertIDsID, gotCertIDsID) } + + // Check policy coalescing. + policiesPerName := make(map[string][]common.PolicyObject, len(pols)) + for _, pol := range pols { + policiesPerName[pol.Domain()] = append(policiesPerName[pol.Domain()], pol) + } + for name, policies := range policiesPerName { + id := common.SHA256Hash32Bytes([]byte(name)) + gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesPayload(ctx, id) + require.NoError(t, err) + // For each sequence of policies, compute the ID of their JSON. + polIDs := computeIDsOfPolicies(policies) + expectedPolIDs, expectedPolIDsID := glueSortedIDsAndComputeItsID(polIDs) + t.Logf("expectedPolIDs: %s\n", hex.EncodeToString(expectedPolIDs)) + require.Equal(t, expectedPolIDs, gotPolIDs) + require.Equal(t, expectedPolIDsID, gotPolIDsID) + } } // buildTestCertHierarchy returns the certificates, chains, and names for two mock certificate @@ -127,16 +144,16 @@ func buildTestCertHierarchy(t require.TestingT, domainName string) ( return } -func glueSortedIDsAndComputeItsID(certIDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { +func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { // Copy slice to avoid mutating of the original. - IDs := append(certIDs[:0:0], certIDs...) + ids := append(IDs[:0:0], IDs...) // Sort the IDs. - sort.Slice(IDs, func(i, j int) bool { - return bytes.Compare(IDs[i][:], IDs[j][:]) == -1 + sort.Slice(ids, func(i, j int) bool { + return bytes.Compare(ids[i][:], ids[j][:]) == -1 }) // Glue the sorted IDs. - gluedIDs := make([]byte, common.SHA256Size*len(IDs)) - for i, id := range IDs { + gluedIDs := make([]byte, common.SHA256Size*len(ids)) + for i, id := range ids { copy(gluedIDs[i*common.SHA256Size:], id[:]) } // Compute the hash of the glued IDs. @@ -144,6 +161,15 @@ func glueSortedIDsAndComputeItsID(certIDs []*common.SHA256Output) ([]byte, *comm return gluedIDs, &id } +func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { + IDs := make([]*common.SHA256Output, len(policies)) + for i, pol := range policies { + id := common.SHA256Hash32Bytes(pol.Raw()) + IDs[i] = &id + } + return IDs +} + func randomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { return &ctx509.Certificate{ DNSNames: []string{domain}, From df8df565d92bec2c98e5a4970d9207088c7ccccd Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 14:21:46 +0200 Subject: [PATCH 106/187] Rename certs coalescing stored procecure. --- pkg/db/mysql/mysql.go | 2 +- tools/create_schema.sh | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 96b14979..e824322f 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -394,7 +394,7 @@ func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { // Call the stored procedure with these parameters. - str := "CALL calc_some_dirty_domain_payloads(?,?)" + str := "CALL calc_dirty_domains_certs(?,?)" _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) if err != nil { return fmt.Errorf("aggregating payload for domains: %w", err) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 3bb48814..6b97b03b 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -299,24 +299,23 @@ EOF CMD=$(cat < Date: Thu, 4 May 2023 14:47:56 +0200 Subject: [PATCH 107/187] Always also coalesce policies. --- pkg/db/mysql/mysql.go | 13 +++++++-- pkg/db/mysql/mysql_test.go | 4 +-- tools/create_schema.sh | 57 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 5 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index e824322f..e5c4df98 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -393,11 +393,18 @@ func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, } func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { - // Call the stored procedure with these parameters. + // Call the certificate coalescing stored procedure with these parameters. str := "CALL calc_dirty_domains_certs(?,?)" _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) if err != nil { - return fmt.Errorf("aggregating payload for domains: %w", err) + return fmt.Errorf("coalescing certificates for domains: %w", err) + } + + // Call the policy coalescing stored procedure with these parameters. + str = "CALL calc_dirty_domains_policies(?,?)" + _, err = c.db.ExecContext(ctx, str, firstRow, lastRow) + if err != nil { + return fmt.Errorf("coalescing policies for domains: %w", err) } return nil } @@ -423,7 +430,7 @@ func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID co ) (*common.SHA256Output, []byte, error) { // deleteme use the other field, not the certificates one! - str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" + str := "SELECT policy_ids_id, policy_ids FROM domain_payloads WHERE domain_id = ?" var payloadID, payload []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) if err != nil { diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 0327aa92..fac51d16 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -44,8 +44,8 @@ func TestCoalesceForDirtyDomains(t *testing.T) { defer conn.Close() leafCerts := []string{ - "leaf.com", - "example.com", + "leaf.certs.com", + "example.certs.com", } var certs []*ctx509.Certificate var certIDs, parentCertIDs []*common.SHA256Output diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 6b97b03b..3c42e85b 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -353,6 +353,63 @@ EOF ) echo "$CMD" | $MYSQLCMD + + CMD=$(cat < Date: Thu, 4 May 2023 14:58:18 +0200 Subject: [PATCH 108/187] Fix bug returning empty cert/pol IDs. --- pkg/db/mysql/mysql.go | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index e5c4df98..8b483c3a 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -417,29 +417,30 @@ func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainI str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var certIDsID, certIDs []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) - if err != nil { - if err == sql.ErrNoRows { - return nil, nil, nil - } + if err != nil && err != sql.ErrNoRows { return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) } - return (*common.SHA256Output)(certIDsID), certIDs, nil + var IDptr *common.SHA256Output + if certIDsID != nil { + IDptr = (*common.SHA256Output)(certIDsID) + } + return IDptr, certIDs, nil } func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, ) (*common.SHA256Output, []byte, error) { - // deleteme use the other field, not the certificates one! str := "SELECT policy_ids_id, policy_ids FROM domain_payloads WHERE domain_id = ?" - var payloadID, payload []byte - err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&payloadID, &payload) - if err != nil { - if err == sql.ErrNoRows { - return nil, nil, nil - } + var policyIDsID, policyIDs []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&policyIDsID, &policyIDs) + if err != nil && err != sql.ErrNoRows { return nil, nil, fmt.Errorf("RetrieveDomainPoliciesPayload | %w", err) } - return (*common.SHA256Output)(payloadID), payload, nil + var IDptr *common.SHA256Output + if policyIDsID != nil { + IDptr = (*common.SHA256Output)(policyIDsID) + } + return IDptr, policyIDs, nil } // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table From 705d521761b57e67ad5686471f8ecd82f73bcdd9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 15:26:03 +0200 Subject: [PATCH 109/187] Simplify Hash functions in common. --- pkg/common/hasher.go | 19 +++++-------------- pkg/common/hasher_test.go | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 14 deletions(-) create mode 100644 pkg/common/hasher_test.go diff --git a/pkg/common/hasher.go b/pkg/common/hasher.go index f2089a08..ae54b545 100644 --- a/pkg/common/hasher.go +++ b/pkg/common/hasher.go @@ -6,8 +6,7 @@ const SHA256Size = 32 type SHA256Output [SHA256Size]byte -// Hash exports default hash function for trie -var SHA256Hash = func(data ...[]byte) []byte { +func SHA256Hash(data ...[]byte) []byte { hash := sha256.New() for i := 0; i < len(data); i++ { hash.Write(data[i]) @@ -15,16 +14,8 @@ var SHA256Hash = func(data ...[]byte) []byte { return hash.Sum(nil) } -// Hash exports default hash function for trie -var SHA256Hash32Bytes = func(data ...[]byte) SHA256Output { - hash := sha256.New() - for i := 0; i < len(data); i++ { - hash.Write(data[i]) - } - output := hash.Sum(nil) - - var output32Bytes SHA256Output - copy(output32Bytes[:], output) - - return output32Bytes +func SHA256Hash32Bytes(data ...[]byte) SHA256Output { + output := SHA256Hash(data...) // will never be empty, will always be 32 bytes. + ptr := (*SHA256Output)(output) + return *ptr } diff --git a/pkg/common/hasher_test.go b/pkg/common/hasher_test.go new file mode 100644 index 00000000..00e9c20c --- /dev/null +++ b/pkg/common/hasher_test.go @@ -0,0 +1,16 @@ +package common + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +// TestEmptyHash checks that the hash of anything is always something. +func TestEmptyHash(t *testing.T) { + v := SHA256Hash() + require.NotEmpty(t, v) + + a := SHA256Hash32Bytes() + require.Equal(t, v, a[:]) +} From 93f4891ac09471c23b2382a7d5efdbd7ab8bddb1 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 16:00:06 +0200 Subject: [PATCH 110/187] Functions to fold/unfold IDs into bytes. --- pkg/common/hasher.go | 41 ++++++++++++++++++++++++++++++- pkg/common/hasher_test.go | 51 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+), 1 deletion(-) diff --git a/pkg/common/hasher.go b/pkg/common/hasher.go index ae54b545..885b9009 100644 --- a/pkg/common/hasher.go +++ b/pkg/common/hasher.go @@ -1,6 +1,11 @@ package common -import sha256 "github.com/minio/sha256-simd" +import ( + "bytes" + "sort" + + sha256 "github.com/minio/sha256-simd" +) const SHA256Size = 32 @@ -19,3 +24,37 @@ func SHA256Hash32Bytes(data ...[]byte) SHA256Output { ptr := (*SHA256Output)(output) return *ptr } + +// BytesToIDs takes a sequence of bytes and returns a slice of IDs, where the byte sequence +// is a set of N blocks of ID size. +// The function expects the sequence to be the correct length (or panic). +func BytesToIDs(buff []byte) []*SHA256Output { + N := len(buff) / SHA256Size + IDs := make([]*SHA256Output, N) + for i := 0; i < N; i++ { + id := *(*SHA256Output)(buff[i*SHA256Size : (i+1)*SHA256Size]) + IDs[i] = &id + } + return IDs +} + +func IDsToBytes(IDs []*SHA256Output) []byte { + // Glue the sorted IDs. + gluedIDs := make([]byte, SHA256Size*len(IDs)) + for i, id := range IDs { + copy(gluedIDs[i*SHA256Size:], id[:]) + } + return gluedIDs +} + +// SortIDsAndGlue takes a sequence of IDs, sorts them alphabetically, and glues every byte of +// them together. +func SortIDsAndGlue(IDs []*SHA256Output) []byte { + // Copy slice to avoid mutating of the original. + ids := append(IDs[:0:0], IDs...) + // Sort the IDs. + sort.Slice(ids, func(i, j int) bool { + return bytes.Compare(ids[i][:], ids[j][:]) == -1 + }) + return IDsToBytes(ids) +} diff --git a/pkg/common/hasher_test.go b/pkg/common/hasher_test.go index 00e9c20c..379a5f9d 100644 --- a/pkg/common/hasher_test.go +++ b/pkg/common/hasher_test.go @@ -1,6 +1,8 @@ package common import ( + "encoding/hex" + "fmt" "testing" "github.com/stretchr/testify/require" @@ -9,8 +11,57 @@ import ( // TestEmptyHash checks that the hash of anything is always something. func TestEmptyHash(t *testing.T) { v := SHA256Hash() + fmt.Printf("Hash of nothing is: %s\n", hex.EncodeToString(v)) require.NotEmpty(t, v) a := SHA256Hash32Bytes() require.Equal(t, v, a[:]) } + +func TestBytesToIDSequenceAndBack(t *testing.T) { + sequence := newSequence() + // sequence is 0,1,...,63 + t.Logf("sequence is %s", hex.EncodeToString(sequence)) + require.Less(t, len(sequence), 256) + + IDs := BytesToIDs(sequence) + require.Len(t, IDs, 2) + + // Check values of the IDs are sequential. + checkSequentialValues(t, IDs) + + // Remove the values in the sequence. Underlying memory support should not affect the IDs. + for i := range sequence { + sequence[i] = 0xff + } + checkSequentialValues(t, IDs) + + // Check that the back conversion works. + sequence = IDsToBytes(IDs) + require.Equal(t, newSequence(), sequence) + // Modify the values of the IDs, the sequence must remain unaltered. + for _, id := range IDs { + for i := range id { + id[i] = 0xAA + } + } + require.Equal(t, newSequence(), sequence) +} + +func checkSequentialValues(t require.TestingT, IDs []*SHA256Output) { + i := 0 + for _, id := range IDs { + for _, j := range id { + require.Equal(t, i, int(j)) + i++ + } + } +} + +func newSequence() []byte { + sequence := make([]byte, 2*SHA256Size) + for i := range sequence { + sequence[i] = byte(i) + } + return sequence +} From 6a8bc06e3ff481569ff89a2cae84d12491e38cd0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 4 May 2023 17:43:31 +0200 Subject: [PATCH 111/187] Proofs with certs and policies IDs. --- pkg/db/mysql/mysql.go | 15 +-- pkg/db/mysql/mysql_test.go | 65 +----------- pkg/mapserver/common/domainEntry.go | 10 +- pkg/mapserver/prover/prover.go | 16 --- pkg/mapserver/responder/deleteme.go | 2 +- pkg/mapserver/responder/responder.go | 13 ++- pkg/mapserver/responder/responder_test.go | 123 ++++++++-------------- pkg/tests/testdb/certificates.go | 60 +++++++++++ pkg/tests/testdb/policies.go | 30 ++++++ pkg/util/random.go | 15 +++ 10 files changed, 175 insertions(+), 174 deletions(-) create mode 100644 pkg/tests/testdb/certificates.go create mode 100644 pkg/tests/testdb/policies.go create mode 100644 pkg/util/random.go diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 8b483c3a..23048e9d 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -451,7 +451,9 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common if len(domainIDs) == 0 { return nil, nil } - str := "SELECT domain_id,cert_ids FROM domain_payloads WHERE domain_id IN " + + + // Retrieve the certificate and policy IDs for each domain ID. + str := "SELECT domain_id,cert_ids,policy_ids FROM domain_payloads WHERE domain_id IN " + repeatStmt(1, len(domainIDs)) params := make([]interface{}, len(domainIDs)) for i, id := range domainIDs { @@ -459,19 +461,20 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common } rows, err := c.db.QueryContext(ctx, str, params...) if err != nil { - fmt.Printf("Query is: '%s'\n", str) return nil, fmt.Errorf("error obtaining payloads for domains: %w", err) } pairs := make([]*db.KeyValuePair, 0, len(domainIDs)) for rows.Next() { - var id, payload []byte - err := rows.Scan(&id, &payload) + var id, certIDs, policyIDs []byte + err := rows.Scan(&id, &certIDs, &policyIDs) if err != nil { - return nil, fmt.Errorf("error scanning domain ID and its payload") + return nil, fmt.Errorf("error scanning domain ID and its certs/policies") } + // Unfold the byte streams into IDs, sort them, and fold again. + allIDs := append(common.BytesToIDs(certIDs), common.BytesToIDs(policyIDs)...) pairs = append(pairs, &db.KeyValuePair{ Key: *(*common.SHA256Output)(id), - Value: payload, + Value: common.SortIDsAndGlue(allIDs), }) } return pairs, nil diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index fac51d16..8ca58de6 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -1,17 +1,14 @@ package mysql_test import ( - "bytes" "context" "encoding/hex" "math/rand" "os" - "sort" "testing" "time" ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/google/certificate-transparency-go/x509/pkix" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" @@ -52,7 +49,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { var certNames [][]string for _, leaf := range leafCerts { // Create two mock x509 chains on top of leaf: - certs2, certIDs2, parentCertIDs2, certNames2 := buildTestCertHierarchy(t, leaf) + certs2, certIDs2, parentCertIDs2, certNames2 := testdb.BuildTestCertHierarchy(t, leaf) certs = append(certs, certs2...) certIDs = append(certIDs, certIDs2...) parentCertIDs = append(parentCertIDs, parentCertIDs2...) @@ -108,54 +105,8 @@ func TestCoalesceForDirtyDomains(t *testing.T) { } } -// buildTestCertHierarchy returns the certificates, chains, and names for two mock certificate -// chains: the first chain is domainName->c1.com->c0.com , and the second chain is -// domainName->c0.com . -func buildTestCertHierarchy(t require.TestingT, domainName string) ( - certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { - - // Create all certificates. - certs = make([]*ctx509.Certificate, 4) - certs[0] = randomX509Cert(t, "c0.com") - certs[1] = randomX509Cert(t, "c1.com") - certs[2] = randomX509Cert(t, domainName) - certs[3] = randomX509Cert(t, domainName) - - // IDs: - IDs = make([]*common.SHA256Output, len(certs)) - for i, c := range certs { - id := common.SHA256Hash32Bytes(c.Raw) - IDs[i] = &id - } - - // Names: only c2 and c3 are leaves, the rest should be nil. - names = make([][]string, len(certs)) - names[2] = certs[2].DNSNames - names[3] = certs[3].DNSNames - - // Parent IDs. - parentIDs = make([]*common.SHA256Output, len(certs)) - // First chain: - parentIDs[1] = IDs[0] - parentIDs[2] = IDs[1] - // Second chain: - parentIDs[3] = IDs[0] - - return -} - func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { - // Copy slice to avoid mutating of the original. - ids := append(IDs[:0:0], IDs...) - // Sort the IDs. - sort.Slice(ids, func(i, j int) bool { - return bytes.Compare(ids[i][:], ids[j][:]) == -1 - }) - // Glue the sorted IDs. - gluedIDs := make([]byte, common.SHA256Size*len(ids)) - for i, id := range ids { - copy(gluedIDs[i*common.SHA256Size:], id[:]) - } + gluedIDs := common.SortIDsAndGlue(IDs) // Compute the hash of the glued IDs. id := common.SHA256Hash32Bytes(gluedIDs) return gluedIDs, &id @@ -170,18 +121,6 @@ func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output return IDs } -func randomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { - return &ctx509.Certificate{ - DNSNames: []string{domain}, - Subject: pkix.Name{ - CommonName: domain, - }, - NotBefore: util.TimeFromSecs(0), - NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), - Raw: randomBytes(t, 10), - } -} - func randomBytes(t require.TestingT, size int) []byte { buff := make([]byte, size) n, err := rand.Read(buff) diff --git a/pkg/mapserver/common/domainEntry.go b/pkg/mapserver/common/domainEntry.go index 3a66bd91..1596cfaa 100644 --- a/pkg/mapserver/common/domainEntry.go +++ b/pkg/mapserver/common/domainEntry.go @@ -16,10 +16,12 @@ type DomainEntry struct { DomainID *common.SHA256Output // This is the SHA256 of the domain name DomainValue *common.SHA256Output // = SHA256 ( certsPayloadID || polsPayloadID ) - DomainCertsPayloadID *common.SHA256Output - DomainCertsPayload []byte // Includes x509 leafs and trust chains, raw ASN.1 DER. - DomainPoliciesPayloadID *common.SHA256Output - DomainPoliciesPayload []byte // Includes RPCs, SPs, etc. JSON. + // TODO(juagargi) remove the CertsIDsID and PolicyIDsID from here and from the DB. + + CertIDsID *common.SHA256Output + CertIDs []byte // Includes x509 leafs and trust chains, raw ASN.1 DER. + PolicyIDsID *common.SHA256Output + PolicyIDs []byte // Includes RPCs, SPs, etc. JSON. } // DeletemeSerializeDomainEntry uses json to serialize. diff --git a/pkg/mapserver/prover/prover.go b/pkg/mapserver/prover/prover.go index f38d9ef8..44902f7e 100644 --- a/pkg/mapserver/prover/prover.go +++ b/pkg/mapserver/prover/prover.go @@ -9,22 +9,6 @@ import ( "github.com/netsec-ethz/fpki/pkg/mapserver/trie" ) -// deleteme -func VerifyProofByDomainOld(response mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { - if response.PoI.ProofType == mapCommon.PoP { - //TODO(yongzhe): compare h(domainEntry) and proof.poi.proofValue - // value := common.SHA256Hash(response.DomainEntryBytes) - // return mapCommon.PoP, trie.VerifyInclusion(response.PoI.Root, response.PoI.Proof, - // common.SHA256Hash([]byte(response.Domain)), value), nil - - // The value is the hash of the two payload hashes. - return mapCommon.PoP, trie.VerifyInclusion(response.PoI.Root, response.PoI.Proof, - response.DomainEntry.DomainID[:], response.DomainEntry.DomainValue[:]), nil - } - return mapCommon.PoA, trie.VerifyNonInclusion(response.PoI.Root, response.PoI.Proof, - response.DomainEntry.DomainID[:], response.PoI.ProofValue, response.PoI.ProofKey), nil -} - // VerifyProofByDomain verifies the MapServerResponse (received from map server), // and returns the type of proof, and proofing result. func VerifyProofByDomain(response *mapCommon.MapServerResponse) (mapCommon.ProofType, bool, error) { diff --git a/pkg/mapserver/responder/deleteme.go b/pkg/mapserver/responder/deleteme.go index e6ba0998..12fe583a 100644 --- a/pkg/mapserver/responder/deleteme.go +++ b/pkg/mapserver/responder/deleteme.go @@ -32,7 +32,7 @@ func (mapResponder *OldMapResponder) GetDomainProofsTest(ctx context.Context, do end2 := time.Now() for _, keyValuePair := range result { // domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value - domainProofMap[keyValuePair.Key].DomainEntry.DomainCertsPayload = keyValuePair.Value + domainProofMap[keyValuePair.Key].DomainEntry.CertIDs = keyValuePair.Value } fmt.Println(len(domainResultMap), end.Sub(start), " ", end1.Sub(start1), " ", end2.Sub(start2)) diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 66b8f8d7..44de1215 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -67,19 +67,24 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, proofType := mapCommon.PoA if isPoP { proofType = mapCommon.PoP - de.DomainCertsPayloadID, de.DomainCertsPayload, err = + de.CertIDsID, de.CertIDs, err = r.conn.RetrieveDomainCertificatesPayload(ctx, domainPartID) if err != nil { return nil, fmt.Errorf("error obtaining x509 payload for %s: %w", domainPart, err) } - de.DomainPoliciesPayloadID, de.DomainPoliciesPayload, err = + de.PolicyIDsID, de.PolicyIDs, err = r.conn.RetrieveDomainPoliciesPayload(ctx, domainPartID) if err != nil { return nil, fmt.Errorf("error obtaining policies payload for %s: %w", domainPart, err) } - // deleteme change this to sha(certIDs || polIDs) - de.DomainValue = de.DomainCertsPayloadID + // Concat certIDs with polIDs, in alphabetically sorted order. + allIDs := append(common.BytesToIDs(de.CertIDs), common.BytesToIDs(de.PolicyIDs)...) + v := common.SortIDsAndGlue(allIDs) + vID := common.SHA256Hash32Bytes(v) + de.DomainValue = &vID + + // TODO(juagargi) the sorting and concatenation should happen inside the DB. } proofList[i] = &mapCommon.MapServerResponse{ diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 21031d83..1bc10379 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -2,18 +2,15 @@ package responder import ( "context" - "os" - "strings" "testing" "time" - ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/netsec-ethz/fpki/pkg/domain" mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" @@ -21,6 +18,9 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) +// TestProofWithPoP checks for 3 domains: a.com (certs), b.com (policies), c.com (both), +// that the proofs of presence work correctly, by ingesting all the material, updating the DB, +// creating a responder, and checking those domains. func TestProofWithPoP(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() @@ -42,22 +42,26 @@ func TestProofWithPoP(t *testing.T) { require.NoError(t, err) defer conn.Close() - // Load two certificates and their chains. - raw, err := util.ReadAllGzippedFile("../../../tests/testdata/2-xenon2023.csv.gz") - require.NoError(t, err) - certs, certIDs, parentCertIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) + // a.com + certs, certIDs, parentCertIDs, names := testdb.BuildTestCertHierarchy(t, "a.com") + err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, + util.ExtractExpirations(certs), nil) require.NoError(t, err) + certsA := certs - // Load two policies. - data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") - require.NoError(t, err) - pols, err := util.LoadPoliciesFromRaw(data) + // b.com + policies := testdb.BuildTestPolicyHierarchy(t, "b.com") + err = updater.UpdateWithKeepExisting(ctx, conn, nil, nil, nil, nil, nil, policies) require.NoError(t, err) + policiesB := policies - // Ingest those two certificates and two policies. + // c.com + certs, certIDs, parentCertIDs, names = testdb.BuildTestCertHierarchy(t, "c.com") + policies = testdb.BuildTestPolicyHierarchy(t, "c.com") err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, - util.ExtractExpirations(certs), pols) + util.ExtractExpirations(certs), policies) require.NoError(t, err) + certsC := certs // Coalescing of payloads. err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) @@ -71,88 +75,47 @@ func TestProofWithPoP(t *testing.T) { err = conn.CleanupDirty(ctx) require.NoError(t, err) - // Now to the test. - // Create a responder. responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) require.NoError(t, err) - // Log the names of the certs. - for i, names := range names { - t.Logf("cert %d for the following names:\n", i) - for j, name := range names { - t.Logf("\t[%3d]: \"%s\"\n", j, name) - } - if len(names) == 0 { - t.Log("\t[no names]") - } - } - - // Check proofs for the previously ingested certificates. - foundValidDomainNames := false - for i, c := range certs { - t.Logf("Certificate subject is: \"%s\"", domain.CertSubjectName(c)) - if names[i] == nil { - // This is a non leaf certificate, skip. - continue - } - - for _, name := range names[i] { - t.Logf("Proving \"%s\"", name) - if !domain.IsValidDomain(name) { - t.Logf("Invalid domain name: \"%s\", skipping", name) - continue - } - foundValidDomainNames = true - proofChain, err := responder.GetProof(ctx, name) - assert.NoError(t, err) - if err == nil { - checkProof(t, c, proofChain) - } - } - } - require.True(t, foundValidDomainNames, "bad test: not one valid checkable domain name") + // Check a.com: + proofChain, err := responder.GetProof(ctx, "a.com") + assert.NoError(t, err) + id := common.SHA256Hash32Bytes(certsA[0].Raw) + checkProof(t, &id, proofChain) + + // Check b.com: + proofChain, err = responder.GetProof(ctx, "b.com") + assert.NoError(t, err) + id = common.SHA256Hash32Bytes(policiesB[0].Raw()) + checkProof(t, &id, proofChain) + + // Check b.com: + proofChain, err = responder.GetProof(ctx, "c.com") + assert.NoError(t, err) + id = common.SHA256Hash32Bytes(certsC[0].Raw) + checkProof(t, &id, proofChain) } // checkProof checks the proof to be correct. -func checkProof(t *testing.T, cert *ctx509.Certificate, proofs []*mapcommon.MapServerResponse) { +func checkProof(t *testing.T, payloadID *common.SHA256Output, proofs []*mapcommon.MapServerResponse) { t.Helper() - require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, - "PoP not found for \"%s\"", domain.CertSubjectName(cert)) + require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found") for _, proof := range proofs { - includesDomainName(t, proof.DomainEntry.DomainName, cert) proofType, isCorrect, err := prover.VerifyProofByDomain(proof) require.NoError(t, err) require.True(t, isCorrect) if proofType == mapcommon.PoA { - require.Empty(t, proof.DomainEntry.DomainCertsPayload) - require.Empty(t, proof.DomainEntry.DomainPoliciesPayload) + require.Empty(t, proof.DomainEntry.CertIDs) + require.Empty(t, proof.DomainEntry.PolicyIDs) } if proofType == mapcommon.PoP { - certs, err := util.DeserializeCertificates(proof.DomainEntry.DomainCertsPayload) - require.NoError(t, err) - // The certificate must be present. - for _, c := range certs { - if cert.Equal(c) { - return - } - } - } - } - // require.Fail(t, "cert/CA not found") -} - -// includesDomainName checks that the subDomain appears as a substring of at least one of the -// names in the certificate. -func includesDomainName(t *testing.T, subDomain string, cert *ctx509.Certificate) { - names := util.ExtractCertDomains(cert) - - for _, s := range names { - if strings.Contains(s, subDomain) { - return + // The ID passed as argument must be one of the IDs present in the domain entry. + allIDs := append(common.BytesToIDs(proof.DomainEntry.CertIDs), + common.BytesToIDs(proof.DomainEntry.PolicyIDs)...) + require.Contains(t, allIDs, payloadID) } } - require.FailNow(t, "the subdomain \"%s\" is not present as a preffix in any of the contained "+ - "names of the certtificate: [%s]", subDomain, strings.Join(names, ", ")) } diff --git a/pkg/tests/testdb/certificates.go b/pkg/tests/testdb/certificates.go new file mode 100644 index 00000000..38957185 --- /dev/null +++ b/pkg/tests/testdb/certificates.go @@ -0,0 +1,60 @@ +package testdb + +import ( + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" + "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/util" +) + +// BuildTestCertHierarchy returns the certificates, chains, and names for two mock certificate +// chains: the first chain is domainName->c1.com->c0.com , and the second chain is +// domainName->c0.com . +func BuildTestCertHierarchy(t require.TestingT, domainName string) ( + certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { + + // Create all certificates. + certs = make([]*ctx509.Certificate, 4) + certs[0] = RandomX509Cert(t, "c0.com") + certs[1] = RandomX509Cert(t, "c1.com") + certs[2] = RandomX509Cert(t, domainName) + certs[3] = RandomX509Cert(t, domainName) + + // IDs: + IDs = make([]*common.SHA256Output, len(certs)) + for i, c := range certs { + id := common.SHA256Hash32Bytes(c.Raw) + IDs[i] = &id + } + + // Names: only c2 and c3 are leaves, the rest should be nil. + names = make([][]string, len(certs)) + names[2] = certs[2].DNSNames + names[3] = certs[3].DNSNames + + // Parent IDs. + parentIDs = make([]*common.SHA256Output, len(certs)) + // First chain: + parentIDs[1] = IDs[0] + parentIDs[2] = IDs[1] + // Second chain: + parentIDs[3] = IDs[0] + + return +} + +func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { + return &ctx509.Certificate{ + DNSNames: []string{domain}, + Subject: pkix.Name{ + CommonName: domain, + }, + NotBefore: util.TimeFromSecs(0), + NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), + Raw: util.RandomBytesForTest(t, 10), + } +} diff --git a/pkg/tests/testdb/policies.go b/pkg/tests/testdb/policies.go new file mode 100644 index 00000000..e8269c94 --- /dev/null +++ b/pkg/tests/testdb/policies.go @@ -0,0 +1,30 @@ +package testdb + +import ( + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/util" + "github.com/stretchr/testify/require" +) + +func BuildTestPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { + // Create one RPC and one SP for that name. + rpc := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, + SerialNumber: 1, + Version: 1, + PublicKey: util.RandomBytesForTest(t, 32), + CAName: "c0.com", + CASignature: util.RandomBytesForTest(t, 100), + } + sp := &common.SP{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, + CAName: "c0.com", + CASignature: util.RandomBytesForTest(t, 100), + RootCertSignature: util.RandomBytesForTest(t, 100), + } + return []common.PolicyObject{rpc, sp} +} diff --git a/pkg/util/random.go b/pkg/util/random.go new file mode 100644 index 00000000..a81e9429 --- /dev/null +++ b/pkg/util/random.go @@ -0,0 +1,15 @@ +package util + +import ( + "crypto/rand" + + "github.com/stretchr/testify/require" +) + +func RandomBytesForTest(t require.TestingT, size int) []byte { + buff := make([]byte, size) + n, err := rand.Read(buff) + require.NoError(t, err) + require.Equal(t, size, n) + return buff +} From 0a46c7014b52eaa79d2313f7a9acd089aebefb08 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 16 May 2023 17:46:03 +0200 Subject: [PATCH 112/187] Check PoA as well. --- pkg/mapserver/responder/old_responder_test.go | 223 ------------------ pkg/mapserver/responder/responder_test.go | 16 +- 2 files changed, 14 insertions(+), 225 deletions(-) delete mode 100644 pkg/mapserver/responder/old_responder_test.go diff --git a/pkg/mapserver/responder/old_responder_test.go b/pkg/mapserver/responder/old_responder_test.go deleted file mode 100644 index 57e39982..00000000 --- a/pkg/mapserver/responder/old_responder_test.go +++ /dev/null @@ -1,223 +0,0 @@ -package responder - -import ( - "context" - "io/ioutil" - "testing" - "time" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" - mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" - "github.com/netsec-ethz/fpki/pkg/mapserver/prover" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/tests/testdb" - "github.com/stretchr/testify/require" -) - -// TestGetProof: test GetProof() -func TestOldGetProof(t *testing.T) { - return - - certs := []*ctx509.Certificate{} - - // load test certs - files, err := ioutil.ReadDir("../updater/testdata/certs/") - require.NoError(t, err) - - for _, file := range files { - cert, err := common.CTX509CertFromFile("../updater/testdata/certs/" + file.Name()) - require.NoError(t, err) - certs = append(certs, cert) - } - - // get mock responder - responder := getMockOldResponder(t, certs) - require.NoError(t, err) - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - for _, cert := range certs { - proofs, err := responder.GetProof(ctx, cert.Subject.CommonName) - require.NoError(t, err) - - checkProofOld(t, *cert, proofs) - } -} - -func TestOldResponderWithPoP(t *testing.T) { - return - - ctx, cancelF := context.WithTimeout(context.Background(), time.Second) - defer cancelF() - - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() - - mapUpdater, err := updater.NewMapUpdater(config, nil, 233) - require.NoError(t, err) - - mapUpdater.Fetcher.BatchSize = 10000 - const baseCTSize = 2 * 1000 - const count = 1 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", - baseCTSize, baseCTSize+count-1) - - n, err := mapUpdater.UpdateNextBatch(ctx) - require.NoError(t, err) - require.Equal(t, n, count) - - n, err = mapUpdater.UpdateNextBatch(ctx) - require.NoError(t, err) - require.Equal(t, n, 0) - - err = mapUpdater.CommitSMTChanges(ctx) - require.NoError(t, err) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - require.NoError(t, err) - - // manually get those certificates and make a list of the common names - // https://ct.googleapis.com/logs/argon2021/ct/v1/get-entries?start=2000&end=2001 - fetcher := logpicker.LogFetcher{ - URL: "https://ct.googleapis.com/logs/argon2021", - Start: baseCTSize, - End: baseCTSize + count - 1, - WorkerCount: 1, - BatchSize: 20, - } - certs, err := fetcher.FetchAllCertificates(ctx) - require.NoError(t, err) - require.Len(t, certs, count) - - // create responder and request proof for those names - responder, err := NewOldMapResponder(ctx, config, root, 233, "./testdata/mapserver_config.json") - require.NoError(t, err) - for _, cert := range certs { - responses, err := responder.GetProof(ctx, cert.Subject.CommonName) - require.NoError(t, err) - - for _, r := range responses { - t.Logf("%v : %s", r.PoI.ProofType, r.DomainEntry.DomainName) - } - - require.NotEmpty(t, responses) - checkProofOld(t, *cert, responses) - // ensure that the response for the whole name is a PoP - require.Equal(t, mapcommon.PoP, responses[len(responses)-1].PoI.ProofType, - "PoP not found for %s", cert.Subject.CommonName) - } -} - -// TestGetDomainProof: test getDomainProof() -func TestOldGetDomainProof(t *testing.T) { - return - - certs := []*ctx509.Certificate{} - - // load test certs - files, err := ioutil.ReadDir("../updater/testdata/certs/") - require.NoError(t, err) - - for _, file := range files { - cert, err := common.CTX509CertFromFile("../updater/testdata/certs/" + file.Name()) - require.NoError(t, err) - certs = append(certs, cert) - } - - responderWorker := getMockOldResponder(t, certs) - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - for _, cert := range certs { - proofs, err := responderWorker.getProof(ctx, cert.Subject.CommonName) - require.NoError(t, err) - - checkProofOld(t, *cert, proofs) - } -} - -// getMockOldResponder builds a mock responder. -func getMockOldResponder(t require.TestingT, certs []*ctx509.Certificate) *OldMapResponder { - // update the certs, and get the mock db of SMT and db - conn, root, err := getUpdatedUpdater(t, certs) - require.NoError(t, err) - - smt, err := trie.NewTrie(root, common.SHA256Hash, conn) - require.NoError(t, err) - smt.CacheHeightLimit = 233 - - return newMapResponder(conn, smt) -} - -// getUpdatedUpdater builds an updater using a mock db, updates the certificates -// and returns the mock db. -func getUpdatedUpdater(t require.TestingT, certs []*ctx509.Certificate) (db.Conn, []byte, error) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - conn := testdb.NewMockDB() - smt, err := trie.NewTrie(nil, common.SHA256Hash, conn) - require.NoError(t, err) - smt.CacheHeightLimit = 233 - - updater := &updater.UpdaterTestAdapter{} - updater.SetDBConn(conn) - updater.SetSMT(smt) - - // Update the db using the certs and empty chains: - emptyChains := make([][]*ctx509.Certificate, len(certs)) - err = updater.UpdateCerts(ctx, certs, emptyChains) - require.NoError(t, err) - - err = updater.CommitSMTChanges(ctx) - require.NoError(t, err) - - return conn, updater.SMT().Root, nil -} - -func checkProofOld(t *testing.T, cert ctx509.Certificate, proofs []mapcommon.MapServerResponse) { - t.Helper() - caName := cert.Issuer.String() - require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, - "PoP not found for %s", cert.Subject.CommonName) - for _, proof := range proofs { - require.Contains(t, cert.Subject.CommonName, proof.DomainEntry.DomainName) - proofType, isCorrect, err := prover.VerifyProofByDomainOld(proof) - require.NoError(t, err) - require.True(t, isCorrect) - - if proofType == mapcommon.PoA { - // require.Empty(t, proof.DomainEntryBytes) - } - if proofType == mapcommon.PoP { - // // get the correct CA entry - // for _, caEntry := range domainEntry.Entries { - // if caEntry.CAName == caName { - // // check if the cert is in the CA entry - // for _, certRaw := range caEntry.DomainCerts { - // require.Equal(t, certRaw, cert.Raw) - // return - // } - // } - // } - _ = caName - return - } - } - require.Fail(t, "cert/CA not found") -} diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 1bc10379..f10d3619 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -21,7 +21,7 @@ import ( // TestProofWithPoP checks for 3 domains: a.com (certs), b.com (policies), c.com (both), // that the proofs of presence work correctly, by ingesting all the material, updating the DB, // creating a responder, and checking those domains. -func TestProofWithPoP(t *testing.T) { +func TestProof(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() @@ -96,12 +96,24 @@ func TestProofWithPoP(t *testing.T) { assert.NoError(t, err) id = common.SHA256Hash32Bytes(certsC[0].Raw) checkProof(t, &id, proofChain) + + // Now check an absent domain. + proofChain, err = responder.GetProof(ctx, "absentdomain.domain") + assert.NoError(t, err) + checkProof(t, nil, proofChain) } // checkProof checks the proof to be correct. func checkProof(t *testing.T, payloadID *common.SHA256Output, proofs []*mapcommon.MapServerResponse) { t.Helper() - require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found") + // Determine if we are checking an absence or presence. + if payloadID == nil { + // Absence. + require.Equal(t, mapcommon.PoA, proofs[len(proofs)-1].PoI.ProofType, "PoA not found") + } else { + // Check the last component is present. + require.Equal(t, mapcommon.PoP, proofs[len(proofs)-1].PoI.ProofType, "PoP not found") + } for _, proof := range proofs { proofType, isCorrect, err := prover.VerifyProofByDomain(proof) require.NoError(t, err) From d6db4a81f2b266cd073de0810ddd478429454f52 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 17 May 2023 10:51:00 +0200 Subject: [PATCH 113/187] Test responder STH. Fix bugs. --- pkg/common/cert.go | 8 ++-- pkg/common/crypto.go | 34 +++++++------- pkg/common/crypto_test.go | 10 ++-- pkg/mapserver/responder/old_responder.go | 4 +- pkg/mapserver/responder/responder.go | 15 ++++-- pkg/mapserver/responder/responder_test.go | 57 +++++++++++++++++++++-- pkg/pca/pca.go | 2 +- 7 files changed, 92 insertions(+), 38 deletions(-) diff --git a/pkg/common/cert.go b/pkg/common/cert.go index aa0e1baf..85eb452a 100644 --- a/pkg/common/cert.go +++ b/pkg/common/cert.go @@ -70,18 +70,18 @@ func X509CertFromFile(fileName string) (*x509.Certificate, error) { return cert, nil } -// LoadRSAKeyPairFromFile: load rsa key pair from file -func LoadRSAKeyPairFromFile(keyPath string) (*rsa.PrivateKey, error) { +// LoadRSAPrivateKeyFromFile loads a RSA private key from file +func LoadRSAPrivateKeyFromFile(keyPath string) (*rsa.PrivateKey, error) { bytes, err := ioutil.ReadFile(keyPath) if err != nil { - return nil, fmt.Errorf("LoadRSAKeyPairFromFile | read file | %w", err) + return nil, fmt.Errorf("LoadRSAPrivateKeyFromFile | read file | %w", err) } block, _ := pem.Decode(bytes) keyPair, err := x509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { - return nil, fmt.Errorf("LoadRSAKeyPairFromFile | ParsePKCS1PrivateKey | %w", err) + return nil, fmt.Errorf("LoadRSAPrivateKeyFromFile | ParsePKCS1PrivateKey | %w", err) } return keyPair, nil } diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index d7add77f..ffca5b22 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -25,18 +25,11 @@ const ( RSA PublicKeyAlgorithm = iota ) -// SignStructRSASHA256: generate a signature using SHA256 and RSA -func SignStructRSASHA256(s any, privKey *rsa.PrivateKey) ([]byte, error) { - bytes, err := ToJSON(s) +func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { + hashOutput := sha256.Sum256(b) + signature, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, hashOutput[:]) if err != nil { - return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) - } - - hashOutput := sha256.Sum256(bytes) - - signature, err := rsa.SignPKCS1v15(rand.Reader, privKey, crypto.SHA256, hashOutput[:]) - if err != nil { - return nil, fmt.Errorf("SignStructRSASHA256 | SignPKCS1v15 | %w", err) + return nil, fmt.Errorf("SignBytes | SignPKCS1v15 | %w", err) } return signature, nil } @@ -50,7 +43,7 @@ func RCSRCreateSignature(domainOwnerPrivKey *rsa.PrivateKey, rcsr *RCSR) error { // clear signature; normally should be empty rcsr.Signature = []byte{} - signature, err := SignStructRSASHA256(rcsr, domainOwnerPrivKey) + signature, err := signStructRSASHA256(rcsr, domainOwnerPrivKey) if err != nil { return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) } @@ -67,7 +60,7 @@ func RCSRGenerateRPCSignature(rcsr *RCSR, prevPrivKeyOfPRC *rsa.PrivateKey) erro rcsr.Signature = []byte{} rcsr.PRCSignature = []byte{} - rpcSignature, err := SignStructRSASHA256(rcsr, prevPrivKeyOfPRC) + rpcSignature, err := signStructRSASHA256(rcsr, prevPrivKeyOfPRC) if err != nil { return fmt.Errorf("RCSRGenerateRPCSignature | SignStructRSASHA256 | %w", err) } @@ -145,7 +138,7 @@ func RCSRGenerateRPC(rcsr *RCSR, notBefore time.Time, serialNumber int, caPrivKe SPTs: []SPT{}, } - signature, err := SignStructRSASHA256(rpc, caPrivKey) + signature, err := signStructRSASHA256(rpc, caPrivKey) if err != nil { return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) } @@ -185,7 +178,7 @@ func RPCVerifyCASignature(caCert *x509.Certificate, rpc *RPC) error { // DomainOwnerSignSP: Used by domain owner to sign the PC func DomainOwnerSignPSR(domainOwnerPrivKey *rsa.PrivateKey, psr *PSR) error { - signature, err := SignStructRSASHA256(psr, domainOwnerPrivKey) + signature, err := signStructRSASHA256(psr, domainOwnerPrivKey) if err != nil { return fmt.Errorf("DomainOwnerSignPC | SignStructRSASHA256 | %w", err) } @@ -231,7 +224,7 @@ func CASignSP(psr *PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) SerialNumber: serialNum, } - caSignature, err := SignStructRSASHA256(sp, caPrivKey) + caSignature, err := signStructRSASHA256(sp, caPrivKey) if err != nil { return &SP{}, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) } @@ -262,3 +255,12 @@ func VerifyCASigInSP(caCert *x509.Certificate, sp *SP) error { } return nil } + +// signStructRSASHA256: generate a signature using SHA256 and RSA +func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { + b, err := ToJSON(s) + if err != nil { + return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) + } + return SignBytes(b, key) +} diff --git a/pkg/common/crypto_test.go b/pkg/common/crypto_test.go index 4c2b89a5..7487ce55 100644 --- a/pkg/common/crypto_test.go +++ b/pkg/common/crypto_test.go @@ -11,7 +11,7 @@ import ( // TestSignatureOfRCSR: Generate RCSR -> generate signature for RCSR -> verify signature func TestSignatureOfRCSR(t *testing.T) { - privKey, err := LoadRSAKeyPairFromFile("./testdata/clientkey.pem") + privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") test := &RCSR{ @@ -43,7 +43,7 @@ func TestIssuanceOfRPC(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := LoadRSAKeyPairFromFile("./testdata/clientkey.pem") + privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &RCSR{ @@ -75,7 +75,7 @@ func TestIssuanceOfRPC(t *testing.T) { err = RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := LoadRSAKeyPairFromFile("./testdata/serverkey.pem") + pcaPrivKey, err := LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") rpc, err := RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") @@ -97,7 +97,7 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := LoadRSAKeyPairFromFile("./testdata/clientkey.pem") + privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &RCSR{ @@ -128,7 +128,7 @@ func TestIssuanceOfSP(t *testing.T) { err = RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := LoadRSAKeyPairFromFile("./testdata/serverkey.pem") + pcaPrivKey, err := LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") rpc, err := RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go index 84975e8d..0844cd13 100644 --- a/pkg/mapserver/responder/old_responder.go +++ b/pkg/mapserver/responder/old_responder.go @@ -66,14 +66,14 @@ func (r *OldMapResponder) loadPrivKeyAndSignTreeHead(mapServerConfigPath string) return fmt.Errorf("ReadConfigFromFile | %w", err) } - keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + keyPair, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) if err != nil { return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) } r.rsaKeyPair = keyPair - signature, err := common.SignStructRSASHA256(r.smt.Root, keyPair) + signature, err := common.SignBytes(r.smt.Root, keyPair) if err != nil { return fmt.Errorf("SignStructRSASHA256 | %w", err) } diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 44de1215..3285e4b8 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -36,8 +36,8 @@ func NewMapResponder(ctx context.Context, configFile string, conn db.Conn) (*Map conn: conn, smt: smt, } - r.signTreeHead(configFile) - return r, nil + + return r, r.signTreeHead(configFile) } func (r *MapResponder) GetProof(ctx context.Context, domainName string, @@ -96,12 +96,17 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, ProofKey: proofKey, ProofValue: proofValue, }, - // TreeHeadSig: , TODO(juagargi) + TreeHeadSig: r.SignedTreeHead(), } } return proofList, nil } +// SignedTreeHead returns a copy of the Signed Tree Head (STH). +func (r *MapResponder) SignedTreeHead() []byte { + return append(r.signedTreeHead[:0:0], r.signedTreeHead...) +} + func (r *MapResponder) signTreeHead(configFile string) error { // Load configuration. config := &MapserverConfig{} @@ -111,13 +116,13 @@ func (r *MapResponder) signTreeHead(configFile string) error { } // Load private key from configuration. - keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + privateKey, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) if err != nil { return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) } // Sign the tree head. - signature, err := common.SignStructRSASHA256(r.smt.Root, keyPair) + signature, err := common.SignBytes(r.smt.Root, privateKey) if err != nil { return fmt.Errorf("SignStructRSASHA256 | %w", err) } diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index f10d3619..708d3c78 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -2,10 +2,10 @@ package responder import ( "context" + "encoding/hex" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" @@ -18,6 +18,53 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) +func TestNewResponder(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + // DB will have the same name as the test function. + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + + // Create a new DB with that name. On exiting the function, it will be removed. + err := testdb.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = testdb.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() + + // Connect to the DB. + conn, err := mysql.Connect(config) + require.NoError(t, err) + defer conn.Close() + + // Create a responder (root will be nil). + responder, err := NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) + require.NoError(t, err) + // Check its tree head is nil. + require.Nil(t, responder.smt.Root) + // Check its STH is not nil. + sth := responder.SignedTreeHead() + require.Equal(t, 8*common.SHA256Size, len(sth), + "bad length of STH: %s", hex.EncodeToString(sth)) + + // Repeat test with a non nil root. + // Insert a mockup root. + root := common.SHA256Hash32Bytes([]byte{0}) + err = conn.SaveRoot(ctx, &root) + require.NoError(t, err) + // Create a responder (root will NOT be nil). + responder, err = NewMapResponder(ctx, "./testdata/mapserver_config.json", conn) + require.NoError(t, err) + // Check its tree head is NOT nil. + require.NotNil(t, responder.smt.Root) + // Check its STH is not nil. + sth2 := responder.SignedTreeHead() + require.Equal(t, 8*common.SHA256Size, len(sth2), + "bad length of STH: %s", hex.EncodeToString(sth2)) +} + // TestProofWithPoP checks for 3 domains: a.com (certs), b.com (policies), c.com (both), // that the proofs of presence work correctly, by ingesting all the material, updating the DB, // creating a responder, and checking those domains. @@ -81,25 +128,25 @@ func TestProof(t *testing.T) { // Check a.com: proofChain, err := responder.GetProof(ctx, "a.com") - assert.NoError(t, err) + require.NoError(t, err) id := common.SHA256Hash32Bytes(certsA[0].Raw) checkProof(t, &id, proofChain) // Check b.com: proofChain, err = responder.GetProof(ctx, "b.com") - assert.NoError(t, err) + require.NoError(t, err) id = common.SHA256Hash32Bytes(policiesB[0].Raw()) checkProof(t, &id, proofChain) // Check b.com: proofChain, err = responder.GetProof(ctx, "c.com") - assert.NoError(t, err) + require.NoError(t, err) id = common.SHA256Hash32Bytes(certsC[0].Raw) checkProof(t, &id, proofChain) // Now check an absent domain. proofChain, err = responder.GetProof(ctx, "absentdomain.domain") - assert.NoError(t, err) + require.NoError(t, err) checkProof(t, nil, proofChain) } diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 8c8b8be2..41eb5a11 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -57,7 +57,7 @@ func NewPCA(configPath string) (*PCA, error) { return nil, fmt.Errorf("NewPCA | ReadConfigFromFile | %w", err) } // load rsa key pair - keyPair, err := common.LoadRSAKeyPairFromFile(config.KeyPath) + keyPair, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) if err != nil { return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) } From 3298a626e3dc70b6ed29e9bf4a07573b9ae31741 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 17 May 2023 10:53:58 +0200 Subject: [PATCH 114/187] Remove old responder. --- pkg/mapserver/responder/old_responder.go | 264 ----------------------- 1 file changed, 264 deletions(-) delete mode 100644 pkg/mapserver/responder/old_responder.go diff --git a/pkg/mapserver/responder/old_responder.go b/pkg/mapserver/responder/old_responder.go deleted file mode 100644 index 0844cd13..00000000 --- a/pkg/mapserver/responder/old_responder.go +++ /dev/null @@ -1,264 +0,0 @@ -package responder - -import ( - "context" - "crypto/rsa" - "fmt" - "strings" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" -) - -// OldMapResponder: A map responder, which is responsible for receiving client's request. Only read from db. -type OldMapResponder struct { - conn db.Conn - getProofLimiter chan struct{} - smt *trie.Trie - signedTreeHead []byte - rsaKeyPair *rsa.PrivateKey -} - -// NewOldMapResponder: return a new responder -func NewOldMapResponder( - ctx context.Context, - config *db.Configuration, - root []byte, - cacheHeight int, - mapServerConfigPath string, -) (*OldMapResponder, error) { - - // new db connection for SMT - conn, err := mysql.Connect(config) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | Connect | %w", err) - } - - smt, err := trie.NewTrie(root, common.SHA256Hash, conn) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | NewTrie | %w", err) - } - smt.CacheHeightLimit = cacheHeight - - // load cache - err = smt.LoadCache(ctx, root) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | LoadCache | %w", err) - } - - mapServer := newMapResponder(conn, smt) - - err = mapServer.loadPrivKeyAndSignTreeHead(mapServerConfigPath) - if err != nil { - return nil, fmt.Errorf("NewMapResponder | loadPrivKey | %w", err) - } - - return mapServer, nil -} - -func (r *OldMapResponder) loadPrivKeyAndSignTreeHead(mapServerConfigPath string) error { - config := &MapserverConfig{} - err := ReadConfigFromFile(config, mapServerConfigPath) - if err != nil { - return fmt.Errorf("ReadConfigFromFile | %w", err) - } - - keyPair, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) - if err != nil { - return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) - } - - r.rsaKeyPair = keyPair - - signature, err := common.SignBytes(r.smt.Root, keyPair) - if err != nil { - return fmt.Errorf("SignStructRSASHA256 | %w", err) - } - - r.signedTreeHead = signature - - return nil -} - -func (r *OldMapResponder) GetSignTreeHead() []byte { - return r.signedTreeHead -} - -func newMapResponder(conn db.Conn, smt *trie.Trie) *OldMapResponder { - return &OldMapResponder{ - conn: conn, - getProofLimiter: make(chan struct{}, 64), // limit getProof to 64 concurrent routines - smt: smt, - } -} - -// GetProof: get proofs for one domain -func (r *OldMapResponder) GetProof(ctx context.Context, domainName string) ([]mapCommon.MapServerResponse, error) { - r.getProofLimiter <- struct{}{} - defer func() { <-r.getProofLimiter }() - return r.getProof(ctx, domainName) -} - -// GetRoot: get current root of the smt -func (mapResponder *OldMapResponder) GetRoot() []byte { - return mapResponder.smt.Root -} - -// Close: close db -func (mapResponder *OldMapResponder) Close() error { - err := mapResponder.conn.Close() - if err != nil { - return err - } - return mapResponder.smt.Close() -} - -func (r *OldMapResponder) getProof(ctx context.Context, domainName string) ( - []mapCommon.MapServerResponse, error) { - - return nil, nil - // // check domain name first - // domainList, err := domain.ParseDomainName(domainName) - // if err != nil { - // if err == domain.ErrInvalidDomainName { - // return nil, err - // } - // return nil, fmt.Errorf("GetDomainProof | parseDomainName | %w", err) - // } - // proofsResult := make([]mapCommon.MapServerResponse, 0, len(domainList)) - - // for _, domain := range domainList { - // domainHash := common.SHA256Hash32Bytes([]byte(domain)) - - // proof, isPoP, proofKey, ProofValue, err := r.smt.MerkleProof(ctx, domainHash[:]) - // if err != nil { - // return nil, fmt.Errorf("getDomainProof | MerkleProof | %w", err) - // } - - // var proofType mapCommon.ProofType - // var payloadID *common.SHA256Output - // domainBytes := []byte{} - // // If it is PoP, query the domain entry. If it is PoA, directly return the PoA - // if isPoP { - // proofType = mapCommon.PoP - // payloadID, domainBytes, err = r.conn.RetrieveDomainEntry(ctx, domainHash) - // if err != nil { - // return nil, fmt.Errorf("GetDomainProof | %w", err) - // } - // } else { - // proofType = mapCommon.PoA - // } - - // proofsResult = append(proofsResult, mapCommon.MapServerResponse{ - // Domain: domain, - // PoI: mapCommon.PoI{ - // Proof: proof, - // Root: r.smt.Root, - // ProofType: proofType, - // ProofKey: proofKey, - // ProofValue: ProofValue}, - // DomainEntryID: payloadID, - // DomainEntryBytes: domainBytes, - // TreeHeadSig: r.signedTreeHead, - // }) - // } - // - // return proofsResult, nil -} - -func (mapResponder *OldMapResponder) GetDomainProofs(ctx context.Context, domainNames []string) ( - map[string][]*mapCommon.MapServerResponse, error) { - - return nil, nil - // domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) - // if err != nil { - // return nil, fmt.Errorf("GetDomainProofs | getMapping | %w", err) - // } - - // domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) - // if err != nil { - // return nil, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) - // } - - // result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) - // if err != nil { - // return nil, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) - // } - // for _, keyValuePair := range result { - // domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value - // } - - // return domainResultMap, nil -} - -func getMapping(domainNames []string, signedTreeHead []byte) ( - map[string][]*mapCommon.MapServerResponse, map[common.SHA256Output]*mapCommon.MapServerResponse, error) { - - return nil, nil, nil - // domainResultMap := make(map[string][]*mapCommon.MapServerResponse) - // domainProofMap := make(map[common.SHA256Output]*mapCommon.MapServerResponse) - - // for _, domainName := range domainNames { - // _, ok := domainResultMap[domainName] - // if !ok { - // // list of proofs for this domain - // resultsList := []*mapCommon.MapServerResponse{} - // subDomainNames, err := domain.ParseDomainName(domainName) - - // if err != nil { - // return nil, nil, fmt.Errorf("getMapping | parseDomainName | %w", err) - // } - // for _, subDomainName := range subDomainNames { - // var domainHash32Bytes common.SHA256Output - // copy(domainHash32Bytes[:], common.SHA256Hash([]byte(subDomainName))) - // subDomainResult, ok := domainProofMap[domainHash32Bytes] - // if ok { - // resultsList = append(resultsList, subDomainResult) - // } else { - // domainProofMap[domainHash32Bytes] = &mapCommon.MapServerResponse{Domain: subDomainName, TreeHeadSig: signedTreeHead} - // resultsList = append(resultsList, domainProofMap[domainHash32Bytes]) - // } - // } - // domainResultMap[domainName] = resultsList - // } - // } - // return domainResultMap, domainProofMap, nil -} - -func (mapResponder *OldMapResponder) getProofFromSMT(ctx context.Context, - domainMap map[common.SHA256Output]*mapCommon.MapServerResponse, -) ([]*common.SHA256Output, error) { - - domainNameToFetchFromDB := []*common.SHA256Output{} - for key, value := range domainMap { - proof, isPoP, proofKey, ProofValue, err := mapResponder.smt.MerkleProof(ctx, key[:]) - if err != nil { - return nil, fmt.Errorf("getProofFromSMT | MerkleProof | %w", err) - } - - value.PoI = mapCommon.PoI{Proof: proof, ProofKey: proofKey, ProofValue: ProofValue, Root: mapResponder.smt.Root} - - switch { - case isPoP: - value.PoI.ProofType = mapCommon.PoP - domainNameToFetchFromDB = append(domainNameToFetchFromDB, &key) - - case !isPoP: - value.PoI.ProofType = mapCommon.PoA - } - } - return domainNameToFetchFromDB, nil -} - -// repeatStmt returns ( (?,..inner..,?), ...outer... ) -func repeatStmt(outer int, inner int) string { - components := make([]string, inner) - for i := 0; i < len(components); i++ { - components[i] = "?" - } - toRepeat := "(" + strings.Join(components, ",") + ")" - return strings.Repeat(toRepeat+",", outer-1) + toRepeat -} From fa711c8c49a41dddff01f7098184987cd5162be0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 14:43:42 +0200 Subject: [PATCH 115/187] Fix logverifier tests pass. --- pkg/logverifier/logverifier_test.go | 192 ++++- pkg/logverifier/testdata/NewSTH.json | 1 - pkg/logverifier/testdata/OldSTH.json | 1 - pkg/logverifier/testdata/POI.json | 1 - pkg/logverifier/testdata/rpc.json | 20 - pkg/logverifier/testdata/sp.json | 22 - pkg/logverifier/verifier.go | 93 ++- pkg/tests/defs.go | 8 + pkg/tests/must.go | 23 + pkg/util/random.go | 2 +- pkg/util/types.go | 10 + pkg/util/types_test.go | 59 ++ tests/benchmark/db_benchmark/db.go | 4 +- .../updater_benchmark/main.go | 4 +- .../wholesys_benchmark_PoA/main.go | 257 ------ .../wholesys_benchmark_PoP/main.go | 780 ------------------ .../wholesys_benchmark_PoP_diffSize/main.go | 338 -------- tests/benchmark/smt_benchmark/main.go | 5 +- 18 files changed, 321 insertions(+), 1499 deletions(-) delete mode 100644 pkg/logverifier/testdata/NewSTH.json delete mode 100644 pkg/logverifier/testdata/OldSTH.json delete mode 100644 pkg/logverifier/testdata/POI.json delete mode 100644 pkg/logverifier/testdata/rpc.json delete mode 100644 pkg/logverifier/testdata/sp.json create mode 100644 pkg/tests/defs.go create mode 100644 pkg/tests/must.go create mode 100644 pkg/util/types_test.go delete mode 100644 tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 7ec696cd..b54d8a0f 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -1,71 +1,195 @@ package logverifier import ( + "math/rand" "testing" "github.com/google/trillian" + "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) -// TestVerification: Test logverifier.VerifyInclusionByHash() -func TestVerification(t *testing.T) { - proof, err := common.JsonFileToProof("./testdata/POI.json") - require.NoError(t, err, "Json File To Proof Error") - - sth, err := common.JSONToLogRoot([]byte("{\"TreeSize\":2,\"RootHash\":\"VsGAf6yfqGWcEno9aRBj3O1N9E8fY/XE9nJmYKjefPM=\",\"TimestampNanos\":1661986742112252000,\"Revision\":0,\"Metadata\":\"\"}")) - require.NoError(t, err, "Json bytes To STH Error") - - logverifier := NewLogVerifier(nil) - - rpc, err := common.JsonFileToRPC("./testdata/rpc.json") - require.NoError(t, err, "Json File To RPC Error") - - rpc.SPTs = []common.SPT{} - - rpcBytes, err := common.ToJSON(rpc) +func TestVerifyInclusionByHash(t *testing.T) { + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(1) + + // Create a mock proof. + proof := &trillian.Proof{ + Hashes: [][]byte{ + tests.MustDecodeBase64(t, "RCW7/AbelL3TNWgMot/jsSAUfvxIepMGEZNqvcZTJuw="), + }, + } + + // Create a mock STH with the correct root hash. + sth := &types.LogRootV1{ + TreeSize: 2, + RootHash: tests.MustDecodeBase64(t, "BSH/yAK1xdSSNMxzNbBD4pdAsqUin8L3st6w9su+nRk="), + TimestampNanos: 1661986742112252000, + Revision: 0, + Metadata: []byte{}, + } + + // Mock up a RPC. + rpc := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "fpki.com", + }, + SerialNumber: 2, + Version: 1, + PublicKey: util.RandomBytesForTest(t, 32), + NotBefore: util.TimeFromSecs(42), + NotAfter: util.TimeFromSecs(142), + CAName: "pca", + TimeStamp: util.TimeFromSecs(100), + CASignature: util.RandomBytesForTest(t, 32), + } + + // Serialize it without SPTs. + serializedRPC, err := common.ToJSON(rpc) require.NoError(t, err, "Json Struct To Bytes Error") - rpcHash := logverifier.HashLeaf(rpcBytes) + // New log verifier and hash the RPC. + logverifier := NewLogVerifier(nil) + rpcHash := logverifier.HashLeaf(serializedRPC) + // Check that VerifyInclusionByHash works: err = logverifier.VerifyInclusionByHash(sth, rpcHash, []*trillian.Proof{proof}) require.NoError(t, err, "Verify Inclusion By Hash Error") } -// TestConsistencyBetweenSTH: test logverifier.VerifyRoot() +// TestConsistencyBetweenSTH checks that two STHs are consistently sequential by using VerifyRoot. func TestConsistencyBetweenSTH(t *testing.T) { - sth, err := common.JsonFileToSTH("./testdata/OldSTH.json") - require.NoError(t, err, "Json File To STH Error") - - newSTH, err := common.JsonFileToSTH("./testdata/NewSTH.json") - require.NoError(t, err, "Json File To STH Error") + sth := &types.LogRootV1{ + Revision: 0, + TreeSize: 2, + TimestampNanos: 1651518756445580000, + RootHash: tests.MustDecodeBase64(t, "qVKbXMndXP7Pd+rJm9NuUsgENjgXeMgf9CsXtmNxtxM="), + Metadata: []byte{}, + } + + newSTH := &types.LogRootV1{ + Revision: 0, + TreeSize: 3, + TimestampNanos: 1651518756732994000, + RootHash: tests.MustDecodeBase64(t, "ua6XccS1nESMgxBA3gh+pfAI9DgIrPD6o1Ib7gXS4fI="), + Metadata: []byte{}, + } + + consistencyProof := [][]byte{ + tests.MustDecodeBase64(t, "QGoGEyLcU/fXIKJr9u+xTak8KUbmAPFs8aALVsjdeng="), + } logverifier := NewLogVerifier(nil) - - consistencyProof := [][]byte{{64, 106, 6, 19, 34, 220, 83, 247, 215, 32, 162, 107, 246, 239, 177, 77, 169, 60, 41, 70, 230, 0, 241, 108, - 241, 160, 11, 86, 200, 221, 122, 120}} - - _, err = logverifier.VerifyRoot(sth, newSTH, consistencyProof) + _, err := logverifier.VerifyRoot(sth, newSTH, consistencyProof) require.NoError(t, err, "Verify Root Error") } func TestCheckRPC(t *testing.T) { - rpc, err := common.JsonFileToRPC("./testdata/rpc.json") - require.NoError(t, err, "Json File To RPC Error") + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(1) + + // Mock a STH with the right root hash. + sth := &types.LogRootV1{ + TreeSize: 2, + RootHash: tests.MustDecodeBase64(t, "qtkcR3q27tgl90D5Wl1yCRYPEcvXcDvqEi1HH1mnffg="), + TimestampNanos: 1661986742112252000, + Revision: 0, + Metadata: []byte{}, + } + serializedSTH, err := common.ToJSON(sth) + require.NoError(t, err) - logverifier := NewLogVerifier(nil) + // Mock a PoI. + poi := []*trillian.Proof{ + { + LeafIndex: 1, + Hashes: [][]byte{util.RandomBytesForTest(t, 32)}, + }, + } + serializedPoI, err := common.ToJSON(poi) + require.NoError(t, err) + // Mock a RPC. + rpc := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "fpki.com", + }, + SerialNumber: 2, + Version: 1, + PublicKey: util.RandomBytesForTest(t, 32), + NotBefore: util.TimeFromSecs(42), + NotAfter: util.TimeFromSecs(142), + CAName: "pca", + TimeStamp: util.TimeFromSecs(100), + CASignature: util.RandomBytesForTest(t, 32), + SPTs: []common.SPT{ + { + AddedTS: util.TimeFromSecs(99), + STH: serializedSTH, + PoI: serializedPoI, + }, + }, + } + + // Check VerifyRPC. + logverifier := NewLogVerifier(nil) err = logverifier.VerifyRPC(rpc) require.NoError(t, err) } func TestCheckSP(t *testing.T) { - sp, err := common.JsonFileToSP("./testdata/sp.json") - require.NoError(t, err, "Json File To RPC Error") + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(3) + + // Mock a STH with the right root hash. + sth := &types.LogRootV1{ + TreeSize: 2, + RootHash: tests.MustDecodeBase64(t, "8rAPQQeydFrBYHkreAlISGoGeHXFLlTqWM8Xb0wJNiY="), + TimestampNanos: 1661986742112252000, + Revision: 0, + Metadata: []byte{}, + } + serializedSTH, err := common.ToJSON(sth) + require.NoError(t, err) - logverifier := NewLogVerifier(nil) + // Mock a PoI. + poi := []*trillian.Proof{ + { + LeafIndex: 1, + Hashes: [][]byte{util.RandomBytesForTest(t, 32)}, + }, + } + serializedPoI, err := common.ToJSON(poi) + require.NoError(t, err) + // Mock an SP. + sp := &common.SP{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "fpki.com", + }, + Policies: common.Policy{ + TrustedCA: []string{"US CA"}, + }, + TimeStamp: util.TimeFromSecs(444), + CAName: "pca", + SerialNumber: 4, + CASignature: util.RandomBytesForTest(t, 32), + RootCertSignature: util.RandomBytesForTest(t, 32), + SPTs: []common.SPT{ + { + AddedTS: util.TimeFromSecs(444), + STH: serializedSTH, + PoI: serializedPoI, + }, + }, + } + + // Check VerifySP works. + logverifier := NewLogVerifier(nil) err = logverifier.VerifySP(sp) require.NoError(t, err) } diff --git a/pkg/logverifier/testdata/NewSTH.json b/pkg/logverifier/testdata/NewSTH.json deleted file mode 100644 index b0eae55a..00000000 --- a/pkg/logverifier/testdata/NewSTH.json +++ /dev/null @@ -1 +0,0 @@ -{"TreeSize":3,"RootHash":"ua6XccS1nESMgxBA3gh+pfAI9DgIrPD6o1Ib7gXS4fI=","TimestampNanos":1651518756732994000,"Revision":0,"Metadata":""} \ No newline at end of file diff --git a/pkg/logverifier/testdata/OldSTH.json b/pkg/logverifier/testdata/OldSTH.json deleted file mode 100644 index b3e4445d..00000000 --- a/pkg/logverifier/testdata/OldSTH.json +++ /dev/null @@ -1 +0,0 @@ -{"TreeSize":2,"RootHash":"qVKbXMndXP7Pd+rJm9NuUsgENjgXeMgf9CsXtmNxtxM=","TimestampNanos":1651518756445580000,"Revision":0,"Metadata":""} \ No newline at end of file diff --git a/pkg/logverifier/testdata/POI.json b/pkg/logverifier/testdata/POI.json deleted file mode 100644 index 03075969..00000000 --- a/pkg/logverifier/testdata/POI.json +++ /dev/null @@ -1 +0,0 @@ -{"hashes":["RCW7/AbelL3TNWgMot/jsSAUfvxIepMGEZNqvcZTJuw="]} \ No newline at end of file diff --git a/pkg/logverifier/testdata/rpc.json b/pkg/logverifier/testdata/rpc.json deleted file mode 100644 index 79bc20be..00000000 --- a/pkg/logverifier/testdata/rpc.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "SerialNumber": 2, - "Subject": "fpki.com", - "Version": 1, - "PublicKey": "LS0tLS1CRUdJTiBSU0EgUFVCTElDIEtFWS0tLS0tCk1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcUxGV2xPSnhXdGlCVmQ0WFRmclMKazVnbmpqbDJocXhvektteGJMeDNSWjRjdTNMSlNraUdzVU5JdjZOc3NQRnA4ZnBPY0NLbWhMWFlRalUvd0NTYwpOd2NZeUV1OHF5VDVrbUZqc2ZIdnFsTWxtQjlyZlVwaDVJNmxZNUJKaDRWdDN1SE1ScEdTUjRpWitHekd5di91CnJJVGc0Z0VyR3R6blB2MklYV1Q1Vkg5OHVjcWo5aEpXK3hCbmZmQUxmOWtwRUwwc0RuaWkycGRVZ0o0OHR2YisKQjVRbEhBQ0NMTkh1V3BtektNQnpDMUJsWGh3L1ozSzA3VnljRWFhSGoycDRpUVRIQ2JsN2NUR3BQRzBIUHF2dwpiZEVSc3Q5aXp5anplcmVIeXFUblQxMFhEdE5EZHNyYm9USWpxOVRRTnQ0NFN2bFkxN2NncXZLNWJ5L3RqT043Ci9RSURBUUFCCi0tLS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0K", - "NotBefore": "2022-09-01T00:59:01.326624+02:00", - "NotAfter": "2022-11-30T00:59:01.326624+01:00", - "CAName": "pca", - "TimeStamp": "2022-09-01T00:59:01.326624+02:00", - "CASignature": "YdE6/zbzSihPGYuC3NBoIlgMZZnEWZPE0LJHz7WJo7cfnRhqlkxMr7q0PxecUoAPon6LL//fyEy9l76+pa1ksJczJL3B6nJfqxR9/mwJEuKsRZSGjJYAHL9Bolkzebr2QvxhEYJcey8LT68yvC0NgXsC0bgq19Bg3gv0ipsNKDAxzHy/VVTL8aMWUjEnX6xr4j3MiupnBMIqSwDCgRLQ4OM4n7Za16fTzmjS0IgBOVLYmgKjggMWBtHgb5Fp9xIalSS4xhRB6eZT3ia+P4KqI8m2cbBod2yBDRURWC1qDsXxfFdofMsLYWKyrf2gpJbAX4vW70vdxlV898ZlKdTbPg==", - "SPTs": [ - { - "AddedTS": "0001-01-01T00:00:00Z", - "STH": "eyJUcmVlU2l6ZSI6MiwiUm9vdEhhc2giOiJWc0dBZjZ5ZnFHV2NFbm85YVJCajNPMU45RThmWS9YRTluSm1ZS2plZlBNPSIsIlRpbWVzdGFtcE5hbm9zIjoxNjYxOTg2NzQyMTEyMjUyMDAwLCJSZXZpc2lvbiI6MCwiTWV0YWRhdGEiOiIifQ==", - "PoI": [ - "eyJoYXNoZXMiOlsiUkNXNy9BYmVsTDNUTldnTW90L2pzU0FVZnZ4SWVwTUdFWk5xdmNaVEp1dz0iXX0=" - ] - } - ] -} \ No newline at end of file diff --git a/pkg/logverifier/testdata/sp.json b/pkg/logverifier/testdata/sp.json deleted file mode 100644 index 38e6e649..00000000 --- a/pkg/logverifier/testdata/sp.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "Policies": { - "TrustedCA": [ - "US CA" - ] - }, - "TimeStamp": "2022-09-01T00:59:02.137125+02:00", - "Subject": "fpki.com", - "CAName": "pca", - "SerialNumber": 4, - "CASignature": "Kpu9a+5xgpUyE5Mr8hVAXK+DzdUknnnzbifTl3UMESG/eHOef5vKkwYEijzGUVUIh/TM8UtlJhb5vNsLeFQdjSpKD6Dp/vl8KISoKST5ogv0YS2fzFtzo2PnAbc1L4ilS8gtp3CmFBVYKUXlm6XoKemNizMIzp9mm+ihRYuiwstFJQwHzEoKPjAjnIUNjiGvYPOpufubZC3nUpuIz7GbRbSWIKolx4ZlOTStdE7pERJWzuVVU7icJabV5h/66RGmLFTC6s+6HaRGDnajoS7Jo2trKW5wlqaBIcgjmYrIqUyi2AxMeQRyEFndJz4QyunzFkuzj9pfi/VcE/sU8VRDaQ==", - "RootCertSignature": "MtUoa4E6ArLuVCJa8XZOeLDOyeMvHVQccWpjJUDHlxgG384bTFiMExHCOmei4DgS0fB/uMLOjxAGeYVv5Tq+2GydJcarU73dtskeIRTfPmgLMr6/dZ6rWRv8xWcwMiF5yIjGVemKtRIIhgGKQ0+dpDaxYuWKNyuOM9W8UyRYWmzmGbFyuaqLF7fP1zq68mViU0sPldRv7ru1moSvCqV9tMGOfzilBKujon49Dtyvi4hi4ktPOYzIs7qbL7va+AdCxn9IFSDR2kBSloh7izaU5HG6bdQxRYMEbhIg19phvtpiEZoKn1mwbE5xPSOFDRgLCyNh9bfMyPxM+UymwTCWaQ==", - "SPTs": [ - { - "AddedTS": "0001-01-01T00:00:00Z", - "STH": "eyJUcmVlU2l6ZSI6NCwiUm9vdEhhc2giOiJMRXhQSk5PWXgyc01sNzQ0L2YvdFZtYzRTOFJKM1hXS2NBY1pCV0R1ZkZBPSIsIlRpbWVzdGFtcE5hbm9zIjoxNjYxOTg2NzQyMjAxMDc0MDAwLCJSZXZpc2lvbiI6MCwiTWV0YWRhdGEiOiIifQ==", - "PoI": [ - "eyJsZWFmX2luZGV4IjozLCJoYXNoZXMiOlsiTXgyelMwMjlKUjRYcEZEMmJIcmQ4Wi9nWDIyV3RJdkRVTDZWdFNIOUVzUT0iLCJWc0dBZjZ5ZnFHV2NFbm85YVJCajNPMU45RThmWS9YRTluSm1ZS2plZlBNPSJdfQ==" - ] - } - ] -} \ No newline at end of file diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 04d3f2c3..77480695 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -6,6 +6,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/transparency-dev/merkle" logProof "github.com/transparency-dev/merkle/proof" "github.com/transparency-dev/merkle/rfc6962" @@ -27,7 +28,7 @@ func NewLogVerifier(hasher merkle.LogHasher) *LogVerifier { } } -// HashLeaf: hash the input +// HashLeaf hashes the input. func (logVerifier *LogVerifier) HashLeaf(input []byte) []byte { return logVerifier.hasher.HashLeaf(input) } @@ -82,60 +83,52 @@ func (c *LogVerifier) VerifyRoot(trusted *types.LogRootV1, newRoot *types.LogRoo // VerifyInclusionByHash verifies that the inclusion proof for the given Merkle leafHash // matches the given trusted root. -func (c *LogVerifier) VerifyInclusionByHash(trusted *types.LogRootV1, leafHash []byte, +func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHash []byte, proofs []*trillian.Proof) error { - switch { - case trusted == nil: - return fmt.Errorf("VerifyInclusionByHash() error: trusted == nil") - case proofs == nil: - return fmt.Errorf("VerifyInclusionByHash() error: proof == nil") - } - // As long as one proof is verified, the verification is successful. // Proofs might contain multiple proofs for different leaves, while the content of each leaf // is identical. Trillian will return all the proofs for one content. // So one successful verification is enough. + var specialErr error for _, proof := range proofs { - if err := logProof.VerifyInclusion(c.hasher, uint64(proof.LeafIndex), trusted.TreeSize, - leafHash, proof.Hashes, trusted.RootHash); err == nil { + err := logProof.VerifyInclusion(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + leafHash, proof.Hashes, trustedRoot.RootHash) + + if err == nil { return nil } + if _, ok := err.(logProof.RootMismatchError); !ok { + specialErr = err + } + } + if specialErr != nil { + return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", specialErr) } - return fmt.Errorf("verification fails") + // This is a logProof.RootMismatchError, aka different hash values. + return fmt.Errorf("verification failed: different hashes") } -func (c *LogVerifier) VerifySP(sp *common.SP) error { +func (v *LogVerifier) VerifySP(sp *common.SP) error { // Get the hash of the SP without SPTs: SPTs := sp.SPTs sp.SPTs = []common.SPT{} - serializedStruct, err := common.ToJSON(sp) + serializedSP, err := common.ToJSON(sp) if err != nil { - return fmt.Errorf("VerifyRPC | ToJSON | %w", err) + return fmt.Errorf("VerifySP | ToJSON | %w", err) } - bytesHash := c.HashLeaf([]byte(serializedStruct)) + bytesHash := v.HashLeaf([]byte(serializedSP)) // Restore the SPTs to the SP: sp.SPTs = SPTs - for _, p := range sp.SPTs { - sth, err := common.JSONToLogRoot(p.STH) - if err != nil { - return fmt.Errorf("VerifySP | JsonBytesToLogRoot | %w", err) - } - poi, err := common.JSONToPoI(p.PoI) - if err != nil { - return fmt.Errorf("VerifySP | JsonBytesToPoI | %w", err) - } - - if err = c.VerifyInclusionByHash(sth, bytesHash, poi); err != nil { - return fmt.Errorf("VerifySP | VerifyInclusionByHash | %w", err) - } + if err := v.verifySPTs(sp.SPTs, bytesHash); err != nil { + return fmt.Errorf("VerifySP | %w", err) } return nil } -func (c *LogVerifier) VerifyRPC(rpc *common.RPC) error { +func (v *LogVerifier) VerifyRPC(rpc *common.RPC) error { // Get the hash of the RPC without SPTs: SPTs := rpc.SPTs rpc.SPTs = []common.SPT{} @@ -143,22 +136,46 @@ func (c *LogVerifier) VerifyRPC(rpc *common.RPC) error { if err != nil { return fmt.Errorf("VerifyRPC | ToJSON | %w", err) } - bytesHash := c.HashLeaf([]byte(serializedStruct)) + bytesHash := v.HashLeaf([]byte(serializedStruct)) // Restore the SPTs to the RPC: rpc.SPTs = SPTs - for _, p := range rpc.SPTs { - sth, err := common.JSONToLogRoot(p.STH) + if err := v.verifySPTs(rpc.SPTs, bytesHash); err != nil { + return fmt.Errorf("VerifyRPC | %w", err) + } + return nil +} + +func (v *LogVerifier) verifySPTs(SPTs []common.SPT, dataHash []byte) error { + for _, p := range SPTs { + // Load the STH from JSON. + sthRaw, err := common.FromJSON(p.STH) + if err != nil { + return fmt.Errorf("verifySPTs | FromJSON(STH) | %w", err) + } + // Into its right type. + sth, err := util.ToType[*types.LogRootV1](sthRaw) if err != nil { - return fmt.Errorf("VerifyRPC | JsonBytesToLogRoot | %w", err) + return fmt.Errorf("verifySPTs | ToType | %w", err) } - poi, err := common.JSONToPoI(p.PoI) + + // Load the PoI from JSON. + poiRaw, err := common.FromJSON(p.PoI) + if err != nil { + return fmt.Errorf("verifySPTs | FromJSON(PoI) | %w", err) + } + // Into its right type. + poi, err := util.ToTypedSlice[*trillian.Proof](poiRaw) + if err != nil { + return fmt.Errorf("verifySPTs | ToTypedSlice | %w", err) + } + if err != nil { - return fmt.Errorf("VerifyRPC | JsonBytesToPoI | %w", err) + return fmt.Errorf("verifySPTs | JsonBytesToPoI | %w", err) } - if err = c.VerifyInclusionByHash(sth, bytesHash, poi); err != nil { - return fmt.Errorf("VerifyRPC | VerifyInclusionByHash | %w", err) + if err = v.VerifyInclusionByHash(sth, dataHash, poi); err != nil { + return fmt.Errorf("verifySPTs | VerifyInclusionByHash | %w", err) } } return nil diff --git a/pkg/tests/defs.go b/pkg/tests/defs.go new file mode 100644 index 00000000..186f8316 --- /dev/null +++ b/pkg/tests/defs.go @@ -0,0 +1,8 @@ +package tests + +import "github.com/stretchr/testify/require" + +type T interface { + require.TestingT + Helper() +} diff --git a/pkg/tests/must.go b/pkg/tests/must.go new file mode 100644 index 00000000..a3dbe8ee --- /dev/null +++ b/pkg/tests/must.go @@ -0,0 +1,23 @@ +package tests + +import ( + "encoding/base64" + "encoding/hex" + + "github.com/stretchr/testify/require" +) + +// MustDecodeString decodes the string using hex.DecodeString or fails the test. +func MustDecodeString(t T, s string) []byte { + b, err := hex.DecodeString(s) + require.NoError(t, err) + return b +} + +// MustDecodeBase64 decodes the base64 string using base64.StdEncoding.DecodeString or fails +// the test. +func MustDecodeBase64(t T, s string) []byte { + b, err := base64.StdEncoding.DecodeString(s) + require.NoError(t, err) + return b +} diff --git a/pkg/util/random.go b/pkg/util/random.go index a81e9429..b35dd6fd 100644 --- a/pkg/util/random.go +++ b/pkg/util/random.go @@ -1,7 +1,7 @@ package util import ( - "crypto/rand" + "math/rand" "github.com/stretchr/testify/require" ) diff --git a/pkg/util/types.go b/pkg/util/types.go index 48e5e756..13c3cc51 100644 --- a/pkg/util/types.go +++ b/pkg/util/types.go @@ -2,6 +2,8 @@ package util import "fmt" +// ToTypedSlice expects a slice as input and returns a slice whose elements are converted to the +// required type one by one, or error. func ToTypedSlice[T any](obj any) ([]T, error) { s, ok := obj.([]any) if !ok { @@ -18,3 +20,11 @@ func ToTypedSlice[T any](obj any) ([]T, error) { } return t, nil } + +// ToType returns the passed object as the specified type, or error. +func ToType[T any](obj any) (T, error) { + if o, ok := obj.(T); ok { + return o, nil + } + return *new(T), fmt.Errorf("cannot convert from %T into %T", obj, *new(T)) +} diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go new file mode 100644 index 00000000..82f9bc6f --- /dev/null +++ b/pkg/util/types_test.go @@ -0,0 +1,59 @@ +package util + +import ( + "testing" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/stretchr/testify/require" +) + +func TestToTypedSlice(t *testing.T) { + // slice of int + { + s := []any{1, 2} + r, err := ToTypedSlice[int](s) + require.NoError(t, err) + require.Equal(t, []int{1, 2}, r) + } + + // slice of *common.RPC + { + orig := []*common.RPC{ + { + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "a.com", + }, + Version: 1, + }, + { + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "b.com", + }, + Version: 1, + }, + } + s := make([]any, len(orig)) + for i, e := range orig { + s[i] = e + } + r, err := ToTypedSlice[*common.RPC](s) + require.NoError(t, err) + require.Equal(t, orig, r) + } +} + +func TestToType(t *testing.T) { + // *common.RPC + { + orig := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: "a.com", + }, + Version: 1, + } + e := any(orig) + r, err := ToType[*common.RPC](e) + require.NoError(t, err) + require.Equal(t, orig, r) + } +} diff --git a/tests/benchmark/db_benchmark/db.go b/tests/benchmark/db_benchmark/db.go index 47456944..94e3ba89 100644 --- a/tests/benchmark/db_benchmark/db.go +++ b/tests/benchmark/db_benchmark/db.go @@ -11,11 +11,11 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" ) func main() { - dbtest.TruncateAllTablesWithoutTestObject() + testdb.TruncateAllTablesWithoutTestObject() // ***************************************************************** // open a db connection // ***************************************************************** diff --git a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go b/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go index d721b4d7..af4d2011 100644 --- a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go +++ b/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go @@ -13,7 +13,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" ) var domainCount int @@ -22,7 +22,7 @@ var domainCount int func main() { domainCount = 0 - dbtest.TruncateAllTablesWithoutTestObject() + testdb.TruncateAllTablesWithoutTestObject() csvFile, err := os.Create("result.csv") diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go deleted file mode 100644 index 5471b7ee..00000000 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoA/main.go +++ /dev/null @@ -1,257 +0,0 @@ -package main - -import ( - "context" - "encoding/csv" - "fmt" - "math" - "math/rand" - "os" - "strconv" - "sync" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" -) - -var domainCount int - -// collect 1M certs, and update them -func main() { - - fmt.Println("new") - - testSet := []string{} - - for i := 0; i < 100000; i++ { - testSet = append(testSet, getRandomDomainName(3)) - } - - domainCount = 0 - dbtest.TruncateAllTablesWithoutTestObject() - - csvFile, err := os.Create("result.csv") - respondeCSVFile, err := os.Create("result_responder.csv") - //domainInfoFile, err := os.Create("domainInfo.csv") - - if err != nil { - panic(err) - } - - csvwriter := csv.NewWriter(csvFile) - //domaincsvwriter := csv.NewWriter(domainInfoFile) - responder_csvwriter := csv.NewWriter(respondeCSVFile) - - // new updater - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - ctx, cancelF := context.WithTimeout(context.Background(), 200*time.Minute) - defer cancelF() - - // collect 100K certs - mapUpdater.Fetcher.BatchSize = 40000 - const baseCTSize = 2*1000 + 1600000 - const count = 10000 * 1000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", - baseCTSize, baseCTSize+count-1) - - updateStart := time.Now() - //names := []string{} - for i := 0; ; i++ { - fmt.Println() - fmt.Println() - fmt.Println(" ---------------------- batch ", i, " ---------------------------") - - n, timeList, _, err, _, _, _ := mapUpdater.UpdateNextBatchReturnTimeList(ctx) - if err != nil { - panic(err) - } - fmt.Println("number of certs: ", n) - if n == 0 { - break - } - - start := time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - fmt.Println("time to commit the changes: ", time.Since(start)) - timeToUpdateSMT := time.Since(start) - - domainCount = dbtest.GetDomainCountWithoutTestObject() - fmt.Println("total domains: ", domainCount) - - err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) - if err != nil { - panic(err) - } - - if i%5 == 4 { - fetchProof(mapUpdater.GetRoot(), testSet, responder_csvwriter) - //names = []string{} - } - - csvwriter.Flush() - } - fmt.Println("************************ Update finished ******************************") - fmt.Printf("time to get and update %d certs: %s\n", count, time.Since(updateStart)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - err = os.WriteFile("root", root, 0644) - if err != nil { - panic(err) - } -} - -func fetchProof(root []byte, names []string, csv *csv.Writer) { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - const numOfWorkers = 1000 - const totalQueries = 5 * 100 * 1000 - - depth := getTreeDepth() - fmt.Println("tree depth:", depth) - - responder, err := responder.NewMapResponder(ctx, root, depth, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - responderStartTime := time.Now() - - var wg sync.WaitGroup - for w := 0; w < numOfWorkers; w++ { - wg.Add(1) - go func(queryCount int) { - defer wg.Done() - - for i := 0; i < queryCount; i++ { - name := names[rand.Intn(len(names))] - _, err := responder.GetProof(ctx, name) - if err != nil && err != domain.ErrInvalidDomainName { - fmt.Println(err) - continue - } - - } - }(totalQueries / numOfWorkers) - } - wg.Wait() - - responderDuration := time.Since(responderStartTime) - - fmt.Println(responderDuration, "for ", totalQueries, " queries") - - csv.Write([]string{ - responderDuration.String(), - }) - csv.Flush() - - responder.Close() -} - -func getTreeDepth() int { - treeDepth := int(math.Log2(float64(domainCount))) - fmt.Println("tree depth before: ", treeDepth) - - return 255 - treeDepth -} - -func getUniqueName(names []string) []string { - uniqueSet := make(map[string]struct{}) - for _, name := range names { - uniqueSet[name] = struct{}{} - } - - result := []string{} - - for k := range uniqueSet { - result = append(result, k) - } - return result -} - -func checkPoP(input []*common.MapServerResponse) bool { - for _, pair := range input { - if pair.PoI.ProofType == common.PoP { - if len(pair.DomainEntryBytes) == 0 { - panic("result error") - } - return true - } - } - return false -} - -func countSizeOfDomainProofs(proofs []*common.MapServerResponse) (int, int, int, int, int, int, int, int, int) { - proofSize := 0 - domainSize := 0 - largeDomains10K := 0 - largeDomains50K := 0 - largeDomains100K := 0 - largeDomains200K := 0 - largeDomains500K := 0 - largeDomains1M := 0 - largeDomains5M := 0 - - for _, proof := range proofs { - for _, p := range proof.PoI.Proof { - proofSize = len(p) - } - proofSize = proofSize + len(proof.PoI.ProofKey) - proofSize = proofSize + len(proof.PoI.Root) - proofSize = proofSize + len(proof.PoI.ProofValue) - - domainSize = domainSize + len(proof.DomainEntryBytes) - } - - if domainSize > 5000*1024 { - largeDomains5M++ - } else if domainSize > 1000*1024 { - largeDomains1M++ - } else if domainSize > 500*1024 { - largeDomains500K++ - } else if domainSize > 200*1024 { - largeDomains200K++ - } else if domainSize > 100*1024 { - largeDomains100K++ - } else if domainSize > 50*1024 { - largeDomains50K++ - } else if domainSize > 10*1024 { - largeDomains10K++ - } - - return proofSize, domainSize, largeDomains10K, largeDomains50K, largeDomains100K, largeDomains200K, largeDomains500K, largeDomains1M, largeDomains5M -} - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func getRandomDomainName(level int) string { - result := "" - for i := 0; i < level; i++ { - b := make([]rune, 10) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - result = string(b) + "." + result - } - return result + "com" -} diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go deleted file mode 100644 index 92df1e84..00000000 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP/main.go +++ /dev/null @@ -1,780 +0,0 @@ -package main - -import ( - "bufio" - "context" - "database/sql" - "encoding/csv" - "fmt" - "math" - "math/rand" - "os" - "strconv" - "sync" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" -) - -var domainCount int - -// collect 1M certs, and update them -func main() { - - fmt.Println("new") - - testSet := []string{} - - f, err := os.Open("testData.txt") - if err != nil { - panic(err) - } - // remember to close the file at the end of the program - defer f.Close() - - // read the file line by line using scanner - scanner := bufio.NewScanner(f) - - for scanner.Scan() { - // do something with a line - testSet = append(testSet, scanner.Text()) - } - - if err := scanner.Err(); err != nil { - panic(err) - } - - if len(testSet) != 100038 { - panic("read file error") - } - - /* - testSet = testSet[0:50000] - - for i := 0; i < 100000; i++ { - testSet = append(testSet, RandStringRunes(30)) - }*/ - - domainCount = 0 - dbtest.TruncateAllTablesWithoutTestObject() - - csvFile, err := os.Create("result.csv") - respondeCSVFile, err := os.Create("result_responder.csv") - //domainInfoFile, err := os.Create("domainInfo.csv") - - if err != nil { - panic(err) - } - - csvwriter := csv.NewWriter(csvFile) - //domaincsvwriter := csv.NewWriter(domainInfoFile) - responder_csvwriter := csv.NewWriter(respondeCSVFile) - - // new updater - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - ctx, cancelF := context.WithTimeout(context.Background(), 200*time.Minute) - defer cancelF() - - // collect 100K certs - mapUpdater.Fetcher.BatchSize = 40000 - const baseCTSize = 2*1000 + 1600000 - const count = 20000 * 1000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", - baseCTSize, baseCTSize+count-1) - - updateStart := time.Now() - //names := []string{} - for i := 0; ; i++ { - fmt.Println() - fmt.Println() - fmt.Println(" ---------------------- batch ", i, " ---------------------------") - - n, timeList, _, err, _, _, _ := mapUpdater.UpdateNextBatchReturnTimeList(ctx) - if err != nil { - panic(err) - } - fmt.Println("number of certs: ", n) - if n == 0 { - break - } - - //names = append(names, newNames...) - - start := time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - fmt.Println("time to commit the changes: ", time.Since(start)) - timeToUpdateSMT := time.Since(start) - - domainCount = dbtest.GetDomainCountWithoutTestObject() - fmt.Println("total domains: ", domainCount) - - err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) - if err != nil { - panic(err) - } - - //fmt.Println(mapUpdater.GetRoot()) - - if i%5 == 4 { - fetchProof(mapUpdater.GetRoot(), testSet, responder_csvwriter) - //names = []string{} - } - - //if i%10 == 9 { - // getDomainInfo(domainCount, domaincsvwriter) - //} - - csvwriter.Flush() - } - fmt.Println("************************ Update finished ******************************") - fmt.Printf("time to get and update %d certs: %s\n", count, time.Since(updateStart)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - err = os.WriteFile("root", root, 0644) - if err != nil { - panic(err) - } -} - -func fetchProof(root []byte, names []string, csv *csv.Writer) { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - const numOfWorkers = 1000 - const totalQueries = 5 * 100 * 1000 - - depth := getTreeDepth() - fmt.Println("tree depth:", depth) - - responder, err := responder.NewMapResponder(ctx, root, depth, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - responderStartTime := time.Now() - - var wg sync.WaitGroup - for w := 0; w < numOfWorkers; w++ { - wg.Add(1) - go func(queryCount int) { - defer wg.Done() - - for i := 0; i < queryCount; i++ { - name := names[rand.Intn(len(names))] - _, err := responder.GetProof(ctx, name) - if err != nil && err != domain.ErrInvalidDomainName { - fmt.Println(err) - continue - } - - } - }(totalQueries / numOfWorkers) - } - wg.Wait() - - responderDuration := time.Since(responderStartTime) - - fmt.Println(responderDuration, "for ", totalQueries, " queries") - - csv.Write([]string{ - responderDuration.String(), - }) - csv.Flush() - - responder.Close() -} - -func fetchProofs(root []byte, names []string, csv *csv.Writer) { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - batch := 2000 - - count := len(names) - - workerCount := count / batch - - count = workerCount * batch - - fmt.Println() - fmt.Println() - fmt.Println("**********************************************************") - fmt.Println("num of fetchings: ", count) - - depth := getTreeDepth() - fmt.Println("tree depth:", depth) - - responder, err := responder.NewMapResponder(ctx, root, depth, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - wg := &sync.WaitGroup{} - - t1 := time.Now() - - readSize := 0 - readLock := &sync.Mutex{} - - proofSize := 0 - domainSize := 0 - totalSize := 0 - - sizeReadLock := &sync.Mutex{} - - work := func(names []string) { - domains, newReadSize, err := responder.GetDomainProofsTest(ctx, names) - if err != nil { - panic(err) - } - - readLock.Lock() - readSize = readSize + newReadSize - readLock.Unlock() - - pSizeT := 0 - dSizeT := 0 - - for _, domain := range domains { - pSize, dSize, _, _, _, _, _, _, _ := countSizeOfDomainProofs(domain) - pSizeT = pSizeT + pSize - dSizeT = dSizeT + dSize - - if !checkPoP(domain) { - panic("proof error!!!!!!") - } - } - sizeReadLock.Lock() - proofSize = proofSize + pSizeT - domainSize = domainSize + dSizeT - totalSize = totalSize + pSizeT + dSizeT - - sizeReadLock.Unlock() - wg.Done() - } - - wg.Add(workerCount) - - for i := 0; i < workerCount; i++ { - go work(names[i*batch : i*batch+batch]) - } - wg.Wait() - - t2 := time.Now() - fmt.Println(t2.Sub(t1)) - speed := float64(count) / float64(t2.Sub(t1).Seconds()) - fmt.Println("speed: ", speed) - fmt.Println("total read size: ", readSize/1024, " KB") - fmt.Println("proof size: ", proofSize/1024, " KB") - fmt.Println("domain size: ", domainSize/1024, " KB") - fmt.Println("avg domain size: ", domainSize/(1024*count), " KB") - fmt.Println() - fmt.Println("**********************************************************") - fmt.Println() - fmt.Println() - - csv.Write([]string{ - t2.Sub(t1).String(), - strconv.Itoa(count), - fmt.Sprintf("%f", speed), - strconv.Itoa(readSize), - strconv.Itoa(proofSize), - strconv.Itoa(domainSize), - }) - csv.Flush() - - responder.Close() - /* - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - batch := 2000 - - count := len(names) - - workerCount := count / batch - - count = workerCount * batch - - fmt.Println() - fmt.Println() - fmt.Println("**********************************************************") - fmt.Println("num of fetchings: ", count) - - depth := getTreeDepth() - fmt.Println("tree length:", depth) - - responder, err := responder.NewMapResponder(ctx, root, depth) - if err != nil { - panic(err) - } - wg := &sync.WaitGroup{} - - t1 := time.Now() - - readSize := 0 - readLock := &sync.Mutex{} - - proofSize := 0 - domainSize := 0 - totalSize := 0 - - largeDomains10K := 0 - largeDomains50K := 0 - largeDomains100K := 0 - largeDomains200K := 0 - largeDomains500K := 0 - largeDomains1M := 0 - largeDomains5M := 0 - - sizeReadLock := &sync.Mutex{} - - work := func(names []string) { - domains, newReadSize, err := responder.GetDomainProofsTest(ctx, names) - if err != nil { - panic(err) - } - - readLock.Lock() - readSize = readSize + newReadSize - readLock.Unlock() - - pSizeT := 0 - dSizeT := 0 - - T10K := 0 - T50K := 0 - T100K := 0 - T200K := 0 - T500K := 0 - T1M := 0 - T5M := 0 - - for _, domain := range domains { - pSize, dSize, L10K, L50K, L100K, L200K, L500K, L1M, L5M := countSizeOfDomainProofs(domain) - pSizeT = pSizeT + pSize - dSizeT = dSizeT + dSize - - T10K = T10K + L10K - T50K = T50K + L50K - T100K = T100K + L100K - T200K = T200K + L200K - T500K = T500K + L500K - T1M = T1M + L1M - T5M = T5M + L5M - - } - sizeReadLock.Lock() - proofSize = proofSize + pSizeT - domainSize = domainSize + dSizeT - totalSize = totalSize + pSizeT + dSizeT - - largeDomains10K = largeDomains10K + T10K - largeDomains50K = largeDomains50K + T50K - largeDomains100K = largeDomains100K + T100K - largeDomains200K = largeDomains200K + T200K - largeDomains500K = largeDomains500K + T500K - largeDomains1M = largeDomains1M + T1M - largeDomains5M = largeDomains5M + T5M - - sizeReadLock.Unlock() - wg.Done() - } - - wg.Add(workerCount) - - for i := 0; i < workerCount; i++ { - go work(names[i*batch : i*batch+batch]) - } - wg.Wait() - - t2 := time.Now() - fmt.Println(t2.Sub(t1)) - speed := float64(count) / float64(t2.Sub(t1).Seconds()) - fmt.Println("speed: ", speed) - fmt.Println("total read size: ", readSize/1024, " KB") - fmt.Println("proof size: ", proofSize/1024, " KB") - fmt.Println("domain size: ", domainSize/1024, " KB") - fmt.Println("avg domain size: ", domainSize/(1024*count), " KB") - fmt.Println() - fmt.Println(" 10K: ", largeDomains10K) - fmt.Println(" 50K: ", largeDomains50K) - fmt.Println(" 100K: ", largeDomains100K) - fmt.Println(" 200K: ", largeDomains200K) - fmt.Println(" 500K: ", largeDomains500K) - fmt.Println(" 1M: ", largeDomains1M) - fmt.Println(" 5M: ", largeDomains5M) - fmt.Println("**********************************************************") - fmt.Println() - fmt.Println() - - csv.Write([]string{ - t2.Sub(t1).String(), - strconv.Itoa(count), - fmt.Sprintf("%f", speed), - strconv.Itoa(readSize/1024) + " KB", - strconv.Itoa(proofSize), - strconv.Itoa(domainSize), - strconv.Itoa(largeDomains10K), - strconv.Itoa(largeDomains50K), - strconv.Itoa(largeDomains100K), - strconv.Itoa(largeDomains200K), - strconv.Itoa(largeDomains500K), - strconv.Itoa(largeDomains1M), - strconv.Itoa(largeDomains5M), - }) - csv.Flush() - - responder.Close() - */ -} - -func getTreeDepth() int { - treeDepth := int(math.Log2(float64(domainCount))) - fmt.Println("tree depth before: ", treeDepth) - - return 255 - treeDepth -} - -func getUniqueName(names []string) []string { - uniqueSet := make(map[string]struct{}) - for _, name := range names { - uniqueSet[name] = struct{}{} - } - - result := []string{} - - for k := range uniqueSet { - result = append(result, k) - } - return result -} - -func checkPoP(input []*common.MapServerResponse) bool { - for _, pair := range input { - if pair.PoI.ProofType == common.PoP { - if len(pair.DomainEntryBytes) == 0 { - panic("result error") - } - return true - } - } - return false -} - -func countSizeOfDomainProofs(proofs []*common.MapServerResponse) (int, int, int, int, int, int, int, int, int) { - proofSize := 0 - domainSize := 0 - largeDomains10K := 0 - largeDomains50K := 0 - largeDomains100K := 0 - largeDomains200K := 0 - largeDomains500K := 0 - largeDomains1M := 0 - largeDomains5M := 0 - - for _, proof := range proofs { - for _, p := range proof.PoI.Proof { - proofSize = len(p) - } - proofSize = proofSize + len(proof.PoI.ProofKey) - proofSize = proofSize + len(proof.PoI.Root) - proofSize = proofSize + len(proof.PoI.ProofValue) - - domainSize = domainSize + len(proof.DomainEntryBytes) - } - - /* - if domainSize == 19654521 { - for _, proof := range proofs { - domainEntry, err := common.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - panic(err) - } - for _, caList := range domainEntry.CAEntry { - fmt.Println(caList.CAName) - for _, certBytes := range caList.DomainCerts { - - cert, err := ctx509.ParseTBSCertificate(certBytes) - if err != nil { - panic("failed to parse certificate: " + err.Error()) - } - fmt.Println("-----------------------------") - fmt.Println(cert.Subject.CommonName) - fmt.Println(cert.DNSNames) - fmt.Println("-----------------------------") - } - } - } - fmt.Println("large domains: ", domainSize) - */ - - if domainSize > 5000*1024 { - largeDomains5M++ - } else if domainSize > 1000*1024 { - largeDomains1M++ - } else if domainSize > 500*1024 { - largeDomains500K++ - } else if domainSize > 200*1024 { - largeDomains200K++ - } else if domainSize > 100*1024 { - largeDomains100K++ - } else if domainSize > 50*1024 { - largeDomains50K++ - } else if domainSize > 10*1024 { - largeDomains10K++ - } - - return proofSize, domainSize, largeDomains10K, largeDomains50K, largeDomains100K, largeDomains200K, largeDomains500K, largeDomains1M, largeDomains5M -} - -func getDomainInfo(count int, csv *csv.Writer) { - db, err := sql.Open("mysql", "root@tcp(localhost)/fpki") - if err != nil { - panic(err) - } - fmt.Println("total domain count: ", count) - - var size11 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>1*1024;").Scan(&size11) - if err != nil { - panic(err) - } - fmt.Println("1KB ", size11) - - var size12 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>2*1024;").Scan(&size12) - if err != nil { - panic(err) - } - fmt.Println("2KB ", size12) - - var size13 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>3*1024;").Scan(&size13) - if err != nil { - panic(err) - } - fmt.Println("3KB ", size13) - - var size14 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>4*1024;").Scan(&size14) - if err != nil { - panic(err) - } - fmt.Println("4KB ", size14) - - var size15 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>5*1024;").Scan(&size15) - if err != nil { - panic(err) - } - fmt.Println("5KB ", size15) - - var size16 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>7*1024;").Scan(&size16) - if err != nil { - panic(err) - } - fmt.Println("7KB ", size16) - - var size17 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>8*1024;").Scan(&size17) - if err != nil { - panic(err) - } - fmt.Println("8KB ", size17) - - var size1 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>10*1024;").Scan(&size1) - if err != nil { - panic(err) - } - fmt.Println("10K ", size1) - - var size2 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>20*1024;").Scan(&size2) - if err != nil { - panic(err) - } - fmt.Println("20K ", size2) - - var size3 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>50*1024;").Scan(&size3) - if err != nil { - panic(err) - } - fmt.Println("50K ", size3) - - var size4 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>100*1024;").Scan(&size4) - if err != nil { - panic(err) - } - fmt.Println("100K ", size4) - - var size5 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>200*1024;").Scan(&size5) - if err != nil { - panic(err) - } - fmt.Println("200K ", size5) - - var size6 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>500*1024;").Scan(&size6) - if err != nil { - panic(err) - } - fmt.Println("500K ", size6) - - var size7 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>1024*1024;").Scan(&size7) - if err != nil { - panic(err) - } - fmt.Println("1M ", size7) - - var size8 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>2*1024*1024;").Scan(&size8) - if err != nil { - panic(err) - } - fmt.Println("2M ", size8) - - var size9 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>5*1024*1024;").Scan(&size9) - if err != nil { - panic(err) - } - fmt.Println("5M ", size9) - - var size10 int - err = db.QueryRow("SELECT COUNT(*) from domainEntries WHERE length(value)>10*1024*1024;").Scan(&size10) - if err != nil { - panic(err) - } - fmt.Println("10M ", size10) - - csv.Write([]string{ - strconv.Itoa(count), - strconv.Itoa(size11), - strconv.Itoa(size12), - strconv.Itoa(size13), - strconv.Itoa(size14), - strconv.Itoa(size15), - strconv.Itoa(size16), - strconv.Itoa(size17), - strconv.Itoa(size1), - strconv.Itoa(size2), - strconv.Itoa(size3), - strconv.Itoa(size4), - strconv.Itoa(size5), - strconv.Itoa(size6), - strconv.Itoa(size7), - strconv.Itoa(size8), - strconv.Itoa(size9), - strconv.Itoa(size10), - }) - csv.Flush() - - db.Close() -} - -/* - -func fetchProof(root []byte, names []string) { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - count := len(names) - fmt.Println("**********************************************************") - fmt.Println("num of fetchings: ", count) - - responder, err := responder.NewMapResponder(ctx, root, 233) - if err != nil { - panic(err) - } - - t1 := time.Now() - - parallelRequestLimit := count%1000 - wg := &sync.WaitGroup{} - var numRequests int64 = 0 - var domainData int64 = 0 - work := func(count int, names []string) { - defer wg.Done() - for i := 0; i < count; i++ { - name := names[rand.Intn(len(names))] - responses, err := responder.GetProof(ctx, name) - if err != nil { - panic(err) - } - - atomic.AddInt64(&numRequests, 1) - for _, p := range responses { - atomic.AddInt64(&domainData, int64(len(p.DomainEntryBytes))) - } - } - } - wg.Add(parallelRequestLimit) - i := 0 - for ; i < count%parallelRequestLimit; i++ { - go work(count/parallelRequestLimit+1, names) - } - for ; i < parallelRequestLimit; i++ { - go work(count/parallelRequestLimit, names) - } - wg.Wait() - - t2 := time.Now() - fmt.Println(t2.Sub(t1)) - fmt.Println("**********************************************************") - - err = responder.Close() - if err != nil { - panic(err) - } -} - -func getUniqueName(names []string) []string { - uniqueSet := make(map[string]struct{}) - for _, name := range names { - uniqueSet[name] = struct{}{} - } - - result := []string{} - - for k, _ := range uniqueSet { - result = append(result, k) - } - return result -} -*/ - -func init() { - rand.Seed(time.Now().UnixNano()) -} - -var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - -func RandStringRunes(n int) string { - b := make([]rune, n) - for i := range b { - b[i] = letterRunes[rand.Intn(len(letterRunes))] - } - return string(b) -} diff --git a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go b/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go deleted file mode 100644 index a51dde81..00000000 --- a/tests/benchmark/mapserver_benchmark/wholesys_benchmark_PoP_diffSize/main.go +++ /dev/null @@ -1,338 +0,0 @@ -package main - -import ( - "bufio" - "context" - "encoding/csv" - "fmt" - "math" - "math/rand" - "os" - "strconv" - "sync" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" -) - -var domainCount int - -// collect 1M certs, and update them -func main() { - testSet6 := loadTestData("testData6.txt") - testSet10 := loadTestData("testData10.txt") - testSet20 := loadTestData("testData20.txt") - testSet50 := loadTestData("testData50.txt") - testSet100 := loadTestData("testData100.txt") - testSet200 := loadTestData("testData200.txt") - testSet500 := loadTestData("testData500.txt") - testSet1000 := loadTestData("testData1000.txt") - - domainCount = 0 - dbtest.TruncateAllTablesWithoutTestObject() - - csvFile, err := os.Create("result.csv") - csvPathFile, err := os.Create("pathResult.csv") - - //domainInfoFile, err := os.Create("domainInfo.csv") - - if err != nil { - panic(err) - } - - csvwriter := csv.NewWriter(csvFile) - csvpathwriter := csv.NewWriter(csvPathFile) - //domaincsvwriter := csv.NewWriter(domainInfoFile) - - // new updater - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - ctx, cancelF := context.WithTimeout(context.Background(), 200*time.Minute) - defer cancelF() - - // collect 100K certs - mapUpdater.Fetcher.BatchSize = 10000 - const baseCTSize = 2*1000 + 1600000 - const count = 1000 * 1000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", - baseCTSize, baseCTSize+count-1) - - updateStart := time.Now() - - for i := 0; ; i++ { - fmt.Println() - fmt.Println() - fmt.Println(" ---------------------- batch ", i, " ---------------------------") - - n, timeList, _, err, _, _, _ := mapUpdater.UpdateNextBatchReturnTimeList(ctx) - if err != nil { - panic(err) - } - fmt.Println("number of certs: ", n) - if n == 0 { - break - } - - start := time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - fmt.Println("time to commit the changes: ", time.Since(start)) - timeToUpdateSMT := time.Since(start) - - domainCount = dbtest.GetDomainCountWithoutTestObject() - fmt.Println("total domains: ", domainCount) - - err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), strconv.Itoa(domainCount))) - if err != nil { - panic(err) - } - - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - responder, err := responder.NewMapResponder(ctx, mapUpdater.GetRoot(), 233, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - totalPath := 0.0 - - for i := 0; i < 10000; i++ { - name := testSet6[rand.Intn(len(testSet6))] - proofs, err := responder.GetProof(ctx, name) - if err != nil { - fmt.Println(err) - } - - totalPath = totalPath + countPathSize(proofs) - } - - fmt.Println(" size !!!!!!!!!!", totalPath/10000) - - responder.Close() - - csvwriter.Flush() - csvpathwriter.Write([]string{fmt.Sprintf("%f", totalPath/10000.0), strconv.Itoa(domainCount)}) - csvpathwriter.Flush() - } - fmt.Println("************************ Update finished ******************************") - fmt.Printf("time to get and update %d certs: %s\n", count, time.Since(updateStart)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - depth := getTreeDepth() - fmt.Println("tree depth:", depth) - - responder, err := responder.NewMapResponder(ctx, root, depth, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - fetchProof(testSet6, "testSet6.csv", responder) - fetchProof(testSet10, "testSet10.csv", responder) - fetchProof(testSet20, "testSet20.csv", responder) - fetchProof(testSet50, "testSet50.csv", responder) - fetchProof(testSet100, "testSet100.csv", responder) - fetchProof(testSet200, "testSet200.csv", responder) - fetchProof(testSet500, "testSet500.csv", responder) - fetchProof(testSet1000, "testSet1000.csv", responder) - - anaylseProofOverhead(testSet6, responder) -} - -func anaylseProofOverhead(names []string, responder *responder.MapResponder) { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - totalSize := 0 - proofSize := 0 - totalCertificateSize := 0 - - totalPath := 0.0 - - for i := 0; i < 10000; i++ { - name := names[rand.Intn(len(names))] - proofs, err := responder.GetProof(ctx, name) - if err != nil { - fmt.Println(err) - } - - newProofSize, newPOISize := countProofsSize(proofs) - totalSize = totalSize + newProofSize - proofSize = proofSize + newPOISize - - for _, proof := range proofs { - if proof.PoI.ProofType == common.PoP { - entry, err := common.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - panic(err) - } - totalCertificateSize = totalCertificateSize + countCertSize(entry) - } - } - - totalPath = totalPath + countPathSize(proofs) - } - - fmt.Println(totalSize, proofSize, totalCertificateSize, totalPath/10000.0) -} - -func countPathSize(proofs []common.MapServerResponse) float64 { - total := 0.0 - for _, proof := range proofs { - total = total + float64(len(proof.PoI.Proof)) - } - return total / float64(len(proofs)) -} - -func countCertSize(entry *common.DomainEntry) int { - size := 0 - - for _, caList := range entry.Entries { - for _, certRaw := range caList.DomainCerts { - size = size + len(certRaw) - } - } - return size -} - -func countPOISize(poi common.PoI) int { - size := 0 - for _, proof := range poi.Proof { - size = size + len(proof) - } - size = size + len(poi.Root) - size = size + len(poi.ProofKey) - size = size + len(poi.ProofValue) - - return size -} - -func countProofsSize(proofs []common.MapServerResponse) (int, int) { - size := 0 - proofSize := 0 - - for _, proof := range proofs { - size = size + countPOISize(proof.PoI) + len(proof.DomainEntryBytes) - proofSize = proofSize + countPOISize(proof.PoI) - } - - return size, proofSize -} - -func fetchProof(names []string, resultFileName string, responder *responder.MapResponder) { - respondeCSVFile, err := os.Create(resultFileName) - if err != nil { - panic(err) - } - - responder_csvwriter := csv.NewWriter(respondeCSVFile) - - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - const numOfWorkers = 1000 - const totalQueries = 1 * 100 * 1000 - - responderStartTime := time.Now() - - var wg sync.WaitGroup - for w := 0; w < numOfWorkers; w++ { - wg.Add(1) - go func(queryCount int) { - defer wg.Done() - - for i := 0; i < queryCount; i++ { - name := names[rand.Intn(len(names))] - _, err := responder.GetProof(ctx, name) - if err != nil && err != domain.ErrInvalidDomainName { - fmt.Println(err) - continue - } - - } - }(totalQueries / numOfWorkers) - } - wg.Wait() - - responderDuration := time.Since(responderStartTime) - - fmt.Println(resultFileName, " ", responderDuration, "for ", totalQueries, " queries") - - responder_csvwriter.Write([]string{ - responderDuration.String(), - }) - responder_csvwriter.Flush() - -} - -func loadTestData(fileName string) []string { - testSet := []string{} - - f, err := os.Open(fileName) - if err != nil { - panic(err) - } - // remember to close the file at the end of the program - defer f.Close() - - // read the file line by line using scanner - scanner := bufio.NewScanner(f) - - for scanner.Scan() { - // do something with a line - testSet = append(testSet, scanner.Text()) - } - - if err := scanner.Err(); err != nil { - panic(err) - } - - return testSet -} - -func getTreeDepth() int { - treeDepth := int(math.Log2(float64(domainCount))) - fmt.Println("tree depth before: ", treeDepth) - - return 255 - treeDepth -} - -func getUniqueName(names []string) []string { - uniqueSet := make(map[string]struct{}) - for _, name := range names { - uniqueSet[name] = struct{}{} - } - - result := []string{} - - for k := range uniqueSet { - result = append(result, k) - } - return result -} - -func checkPoP(input []*common.MapServerResponse) bool { - for _, pair := range input { - if pair.PoI.ProofType == common.PoP { - if len(pair.DomainEntryBytes) == 0 { - panic("result error") - } - return true - } - } - return false -} diff --git a/tests/benchmark/smt_benchmark/main.go b/tests/benchmark/smt_benchmark/main.go index 5e8e84a0..ed6de447 100644 --- a/tests/benchmark/smt_benchmark/main.go +++ b/tests/benchmark/smt_benchmark/main.go @@ -12,15 +12,16 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" - "github.com/netsec-ethz/fpki/tests/pkg/db" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" ) var wg sync.WaitGroup // benchmark for sparse merkle tree func main() { - db.TruncateAllTablesWithoutTestObject() + testdb.TruncateAllTablesWithoutTestObject() conn, err := db.Connect(nil) if err != nil { From 81cc1cfb73f1990e3ab0fb0550bcef733977b764 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 14:54:31 +0200 Subject: [PATCH 116/187] Cleanup pkg/tests/testdb --- pkg/db/mysql/mysql_test.go | 5 ++- pkg/logverifier/logverifier_test.go | 16 ++++---- pkg/mapserver/responder/deleteme.go | 48 ----------------------- pkg/mapserver/responder/responder_test.go | 12 ++++-- pkg/tests/random.go | 32 +++++++++++++++ pkg/tests/testdb/certificates.go | 29 ++++---------- pkg/tests/testdb/policies.go | 12 +++--- pkg/util/random.go | 15 ------- 8 files changed, 65 insertions(+), 104 deletions(-) delete mode 100644 pkg/mapserver/responder/deleteme.go create mode 100644 pkg/tests/random.go delete mode 100644 pkg/util/random.go diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 8ca58de6..9889b17b 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -20,6 +20,9 @@ import ( ) func TestCoalesceForDirtyDomains(t *testing.T) { + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(1) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() @@ -49,7 +52,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { var certNames [][]string for _, leaf := range leafCerts { // Create two mock x509 chains on top of leaf: - certs2, certIDs2, parentCertIDs2, certNames2 := testdb.BuildTestCertHierarchy(t, leaf) + certs2, certIDs2, parentCertIDs2, certNames2 := testdb.BuildTestRandomCertHierarchy(t, leaf) certs = append(certs, certs2...) certIDs = append(certIDs, certIDs2...) parentCertIDs = append(parentCertIDs, parentCertIDs2...) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index b54d8a0f..8daf92a4 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -40,12 +40,12 @@ func TestVerifyInclusionByHash(t *testing.T) { }, SerialNumber: 2, Version: 1, - PublicKey: util.RandomBytesForTest(t, 32), + PublicKey: tests.RandomBytesForTest(t, 32), NotBefore: util.TimeFromSecs(42), NotAfter: util.TimeFromSecs(142), CAName: "pca", TimeStamp: util.TimeFromSecs(100), - CASignature: util.RandomBytesForTest(t, 32), + CASignature: tests.RandomBytesForTest(t, 32), } // Serialize it without SPTs. @@ -107,7 +107,7 @@ func TestCheckRPC(t *testing.T) { poi := []*trillian.Proof{ { LeafIndex: 1, - Hashes: [][]byte{util.RandomBytesForTest(t, 32)}, + Hashes: [][]byte{tests.RandomBytesForTest(t, 32)}, }, } serializedPoI, err := common.ToJSON(poi) @@ -120,12 +120,12 @@ func TestCheckRPC(t *testing.T) { }, SerialNumber: 2, Version: 1, - PublicKey: util.RandomBytesForTest(t, 32), + PublicKey: tests.RandomBytesForTest(t, 32), NotBefore: util.TimeFromSecs(42), NotAfter: util.TimeFromSecs(142), CAName: "pca", TimeStamp: util.TimeFromSecs(100), - CASignature: util.RandomBytesForTest(t, 32), + CASignature: tests.RandomBytesForTest(t, 32), SPTs: []common.SPT{ { AddedTS: util.TimeFromSecs(99), @@ -160,7 +160,7 @@ func TestCheckSP(t *testing.T) { poi := []*trillian.Proof{ { LeafIndex: 1, - Hashes: [][]byte{util.RandomBytesForTest(t, 32)}, + Hashes: [][]byte{tests.RandomBytesForTest(t, 32)}, }, } serializedPoI, err := common.ToJSON(poi) @@ -177,8 +177,8 @@ func TestCheckSP(t *testing.T) { TimeStamp: util.TimeFromSecs(444), CAName: "pca", SerialNumber: 4, - CASignature: util.RandomBytesForTest(t, 32), - RootCertSignature: util.RandomBytesForTest(t, 32), + CASignature: tests.RandomBytesForTest(t, 32), + RootCertSignature: tests.RandomBytesForTest(t, 32), SPTs: []common.SPT{ { AddedTS: util.TimeFromSecs(444), diff --git a/pkg/mapserver/responder/deleteme.go b/pkg/mapserver/responder/deleteme.go deleted file mode 100644 index 12fe583a..00000000 --- a/pkg/mapserver/responder/deleteme.go +++ /dev/null @@ -1,48 +0,0 @@ -package responder - -import ( - "context" - "fmt" - "time" - - "github.com/netsec-ethz/fpki/pkg/db" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -// GetDomainProofsTest deleteme! only used to print extra info in benchmarks. -func (mapResponder *OldMapResponder) GetDomainProofsTest(ctx context.Context, domainNames []string) (map[string][]*mapCommon.MapServerResponse, int, error) { - start := time.Now() - domainResultMap, domainProofMap, err := getMapping(domainNames, mapResponder.GetSignTreeHead()) - if err != nil { - return nil, 0, fmt.Errorf("GetDomainProofs | getMapping | %w", err) - } - end := time.Now() - - start1 := time.Now() - domainToFetch, err := mapResponder.getProofFromSMT(ctx, domainProofMap) - if err != nil { - return nil, 0, fmt.Errorf("GetDomainProofs | getProofFromSMT | %w", err) - } - end1 := time.Now() - start2 := time.Now() - result, err := mapResponder.conn.RetrieveDomainEntries(ctx, domainToFetch) - if err != nil { - return nil, 0, fmt.Errorf("GetDomainProofs | RetrieveKeyValuePairMultiThread | %w", err) - } - end2 := time.Now() - for _, keyValuePair := range result { - // domainProofMap[keyValuePair.Key].DomainEntryBytes = keyValuePair.Value - domainProofMap[keyValuePair.Key].DomainEntry.CertIDs = keyValuePair.Value - } - - fmt.Println(len(domainResultMap), end.Sub(start), " ", end1.Sub(start1), " ", end2.Sub(start2)) - return domainResultMap, countReadSize(result), nil -} - -func countReadSize(input []*db.KeyValuePair) int { - size := 0 - for _, pair := range input { - size = size + len(pair.Value) - } - return size -} diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 708d3c78..6ba7e515 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -3,6 +3,7 @@ package responder import ( "context" "encoding/hex" + "math/rand" "testing" "time" @@ -69,6 +70,9 @@ func TestNewResponder(t *testing.T) { // that the proofs of presence work correctly, by ingesting all the material, updating the DB, // creating a responder, and checking those domains. func TestProof(t *testing.T) { + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(1) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() @@ -90,21 +94,21 @@ func TestProof(t *testing.T) { defer conn.Close() // a.com - certs, certIDs, parentCertIDs, names := testdb.BuildTestCertHierarchy(t, "a.com") + certs, certIDs, parentCertIDs, names := testdb.BuildTestRandomCertHierarchy(t, "a.com") err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, util.ExtractExpirations(certs), nil) require.NoError(t, err) certsA := certs // b.com - policies := testdb.BuildTestPolicyHierarchy(t, "b.com") + policies := testdb.BuildTestRandomPolicyHierarchy(t, "b.com") err = updater.UpdateWithKeepExisting(ctx, conn, nil, nil, nil, nil, nil, policies) require.NoError(t, err) policiesB := policies // c.com - certs, certIDs, parentCertIDs, names = testdb.BuildTestCertHierarchy(t, "c.com") - policies = testdb.BuildTestPolicyHierarchy(t, "c.com") + certs, certIDs, parentCertIDs, names = testdb.BuildTestRandomCertHierarchy(t, "c.com") + policies = testdb.BuildTestRandomPolicyHierarchy(t, "c.com") err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, util.ExtractExpirations(certs), policies) require.NoError(t, err) diff --git a/pkg/tests/random.go b/pkg/tests/random.go new file mode 100644 index 00000000..6212d37e --- /dev/null +++ b/pkg/tests/random.go @@ -0,0 +1,32 @@ +package tests + +import ( + "math/rand" + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" + + "github.com/netsec-ethz/fpki/pkg/util" + "github.com/stretchr/testify/require" +) + +func RandomBytesForTest(t require.TestingT, size int) []byte { + buff := make([]byte, size) + n, err := rand.Read(buff) + require.NoError(t, err) + require.Equal(t, size, n) + return buff +} + +func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { + return &ctx509.Certificate{ + DNSNames: []string{domain}, + Subject: pkix.Name{ + CommonName: domain, + }, + NotBefore: util.TimeFromSecs(0), + NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), + Raw: RandomBytesForTest(t, 10), + } +} diff --git a/pkg/tests/testdb/certificates.go b/pkg/tests/testdb/certificates.go index 38957185..3464e44a 100644 --- a/pkg/tests/testdb/certificates.go +++ b/pkg/tests/testdb/certificates.go @@ -1,28 +1,25 @@ package testdb import ( - "time" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/google/certificate-transparency-go/x509/pkix" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/util" + "github.com/netsec-ethz/fpki/pkg/tests" ) -// BuildTestCertHierarchy returns the certificates, chains, and names for two mock certificate +// BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate // chains: the first chain is domainName->c1.com->c0.com , and the second chain is // domainName->c0.com . -func BuildTestCertHierarchy(t require.TestingT, domainName string) ( +func BuildTestRandomCertHierarchy(t require.TestingT, domainName string) ( certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { // Create all certificates. certs = make([]*ctx509.Certificate, 4) - certs[0] = RandomX509Cert(t, "c0.com") - certs[1] = RandomX509Cert(t, "c1.com") - certs[2] = RandomX509Cert(t, domainName) - certs[3] = RandomX509Cert(t, domainName) + certs[0] = tests.RandomX509Cert(t, "c0.com") + certs[1] = tests.RandomX509Cert(t, "c1.com") + certs[2] = tests.RandomX509Cert(t, domainName) + certs[3] = tests.RandomX509Cert(t, domainName) // IDs: IDs = make([]*common.SHA256Output, len(certs)) @@ -46,15 +43,3 @@ func BuildTestCertHierarchy(t require.TestingT, domainName string) ( return } - -func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { - return &ctx509.Certificate{ - DNSNames: []string{domain}, - Subject: pkix.Name{ - CommonName: domain, - }, - NotBefore: util.TimeFromSecs(0), - NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), - Raw: util.RandomBytesForTest(t, 10), - } -} diff --git a/pkg/tests/testdb/policies.go b/pkg/tests/testdb/policies.go index e8269c94..9287e7ae 100644 --- a/pkg/tests/testdb/policies.go +++ b/pkg/tests/testdb/policies.go @@ -2,11 +2,11 @@ package testdb import ( "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/util" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/stretchr/testify/require" ) -func BuildTestPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { +func BuildTestRandomPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { // Create one RPC and one SP for that name. rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ @@ -14,17 +14,17 @@ func BuildTestPolicyHierarchy(t require.TestingT, domainName string) []common.Po }, SerialNumber: 1, Version: 1, - PublicKey: util.RandomBytesForTest(t, 32), + PublicKey: tests.RandomBytesForTest(t, 32), CAName: "c0.com", - CASignature: util.RandomBytesForTest(t, 100), + CASignature: tests.RandomBytesForTest(t, 100), } sp := &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ Subject: domainName, }, CAName: "c0.com", - CASignature: util.RandomBytesForTest(t, 100), - RootCertSignature: util.RandomBytesForTest(t, 100), + CASignature: tests.RandomBytesForTest(t, 100), + RootCertSignature: tests.RandomBytesForTest(t, 100), } return []common.PolicyObject{rpc, sp} } diff --git a/pkg/util/random.go b/pkg/util/random.go deleted file mode 100644 index b35dd6fd..00000000 --- a/pkg/util/random.go +++ /dev/null @@ -1,15 +0,0 @@ -package util - -import ( - "math/rand" - - "github.com/stretchr/testify/require" -) - -func RandomBytesForTest(t require.TestingT, size int) []byte { - buff := make([]byte, size) - n, err := rand.Read(buff) - require.NoError(t, err) - require.Equal(t, size, n) - return buff -} From a4a045963672cbb4cd600577810580989b1eed11 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 15:05:04 +0200 Subject: [PATCH 117/187] Cleanup pkg/tests/testdb and pkg/tests/random . --- pkg/db/mysql/mysql_test.go | 3 +- pkg/logverifier/logverifier_test.go | 17 +++-- pkg/mapserver/responder/responder_test.go | 9 ++- pkg/tests/random.go | 32 -------- pkg/tests/random/random.go | 92 +++++++++++++++++++++++ pkg/tests/testdb/certificates.go | 45 ----------- pkg/tests/testdb/policies.go | 30 -------- 7 files changed, 108 insertions(+), 120 deletions(-) delete mode 100644 pkg/tests/random.go create mode 100644 pkg/tests/random/random.go delete mode 100644 pkg/tests/testdb/certificates.go delete mode 100644 pkg/tests/testdb/policies.go diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 9889b17b..ec426865 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -15,6 +15,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" ) @@ -52,7 +53,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { var certNames [][]string for _, leaf := range leafCerts { // Create two mock x509 chains on top of leaf: - certs2, certIDs2, parentCertIDs2, certNames2 := testdb.BuildTestRandomCertHierarchy(t, leaf) + certs2, certIDs2, parentCertIDs2, certNames2 := random.BuildTestRandomCertHierarchy(t, leaf) certs = append(certs, certs2...) certIDs = append(certIDs, certIDs2...) parentCertIDs = append(parentCertIDs, parentCertIDs2...) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 8daf92a4..86f9c9a5 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -8,6 +8,7 @@ import ( "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" @@ -40,12 +41,12 @@ func TestVerifyInclusionByHash(t *testing.T) { }, SerialNumber: 2, Version: 1, - PublicKey: tests.RandomBytesForTest(t, 32), + PublicKey: random.RandomBytesForTest(t, 32), NotBefore: util.TimeFromSecs(42), NotAfter: util.TimeFromSecs(142), CAName: "pca", TimeStamp: util.TimeFromSecs(100), - CASignature: tests.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), } // Serialize it without SPTs. @@ -107,7 +108,7 @@ func TestCheckRPC(t *testing.T) { poi := []*trillian.Proof{ { LeafIndex: 1, - Hashes: [][]byte{tests.RandomBytesForTest(t, 32)}, + Hashes: [][]byte{random.RandomBytesForTest(t, 32)}, }, } serializedPoI, err := common.ToJSON(poi) @@ -120,12 +121,12 @@ func TestCheckRPC(t *testing.T) { }, SerialNumber: 2, Version: 1, - PublicKey: tests.RandomBytesForTest(t, 32), + PublicKey: random.RandomBytesForTest(t, 32), NotBefore: util.TimeFromSecs(42), NotAfter: util.TimeFromSecs(142), CAName: "pca", TimeStamp: util.TimeFromSecs(100), - CASignature: tests.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), SPTs: []common.SPT{ { AddedTS: util.TimeFromSecs(99), @@ -160,7 +161,7 @@ func TestCheckSP(t *testing.T) { poi := []*trillian.Proof{ { LeafIndex: 1, - Hashes: [][]byte{tests.RandomBytesForTest(t, 32)}, + Hashes: [][]byte{random.RandomBytesForTest(t, 32)}, }, } serializedPoI, err := common.ToJSON(poi) @@ -177,8 +178,8 @@ func TestCheckSP(t *testing.T) { TimeStamp: util.TimeFromSecs(444), CAName: "pca", SerialNumber: 4, - CASignature: tests.RandomBytesForTest(t, 32), - RootCertSignature: tests.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), + RootCertSignature: random.RandomBytesForTest(t, 32), SPTs: []common.SPT{ { AddedTS: util.TimeFromSecs(444), diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index 6ba7e515..f7e8efba 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -15,6 +15,7 @@ import ( mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" ) @@ -94,21 +95,21 @@ func TestProof(t *testing.T) { defer conn.Close() // a.com - certs, certIDs, parentCertIDs, names := testdb.BuildTestRandomCertHierarchy(t, "a.com") + certs, certIDs, parentCertIDs, names := random.BuildTestRandomCertHierarchy(t, "a.com") err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, util.ExtractExpirations(certs), nil) require.NoError(t, err) certsA := certs // b.com - policies := testdb.BuildTestRandomPolicyHierarchy(t, "b.com") + policies := random.BuildTestRandomPolicyHierarchy(t, "b.com") err = updater.UpdateWithKeepExisting(ctx, conn, nil, nil, nil, nil, nil, policies) require.NoError(t, err) policiesB := policies // c.com - certs, certIDs, parentCertIDs, names = testdb.BuildTestRandomCertHierarchy(t, "c.com") - policies = testdb.BuildTestRandomPolicyHierarchy(t, "c.com") + certs, certIDs, parentCertIDs, names = random.BuildTestRandomCertHierarchy(t, "c.com") + policies = random.BuildTestRandomPolicyHierarchy(t, "c.com") err = updater.UpdateWithKeepExisting(ctx, conn, names, certIDs, parentCertIDs, certs, util.ExtractExpirations(certs), policies) require.NoError(t, err) diff --git a/pkg/tests/random.go b/pkg/tests/random.go deleted file mode 100644 index 6212d37e..00000000 --- a/pkg/tests/random.go +++ /dev/null @@ -1,32 +0,0 @@ -package tests - -import ( - "math/rand" - "time" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/google/certificate-transparency-go/x509/pkix" - - "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/require" -) - -func RandomBytesForTest(t require.TestingT, size int) []byte { - buff := make([]byte, size) - n, err := rand.Read(buff) - require.NoError(t, err) - require.Equal(t, size, n) - return buff -} - -func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { - return &ctx509.Certificate{ - DNSNames: []string{domain}, - Subject: pkix.Name{ - CommonName: domain, - }, - NotBefore: util.TimeFromSecs(0), - NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), - Raw: RandomBytesForTest(t, 10), - } -} diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go new file mode 100644 index 00000000..3f8afa04 --- /dev/null +++ b/pkg/tests/random/random.go @@ -0,0 +1,92 @@ +package random + +import ( + "math/rand" + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/google/certificate-transparency-go/x509/pkix" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/util" + "github.com/stretchr/testify/require" +) + +func RandomBytesForTest(t require.TestingT, size int) []byte { + buff := make([]byte, size) + n, err := rand.Read(buff) + require.NoError(t, err) + require.Equal(t, size, n) + return buff +} + +func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { + return &ctx509.Certificate{ + DNSNames: []string{domain}, + Subject: pkix.Name{ + CommonName: domain, + }, + NotBefore: util.TimeFromSecs(0), + NotAfter: time.Date(3000, 1, 1, 0, 0, 0, 0, time.UTC), + Raw: RandomBytesForTest(t, 10), + } +} + +func BuildTestRandomPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { + // Create one RPC and one SP for that name. + rpc := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, + SerialNumber: 1, + Version: 1, + PublicKey: RandomBytesForTest(t, 32), + CAName: "c0.com", + CASignature: RandomBytesForTest(t, 100), + } + sp := &common.SP{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, + CAName: "c0.com", + CASignature: RandomBytesForTest(t, 100), + RootCertSignature: RandomBytesForTest(t, 100), + } + return []common.PolicyObject{rpc, sp} +} + +// BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate +// chains: the first chain is domainName->c1.com->c0.com , and the second chain is +// domainName->c0.com . +func BuildTestRandomCertHierarchy(t require.TestingT, domainName string) ( + certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { + + // Create all certificates. + certs = make([]*ctx509.Certificate, 4) + certs[0] = RandomX509Cert(t, "c0.com") + certs[1] = RandomX509Cert(t, "c1.com") + certs[2] = RandomX509Cert(t, domainName) + certs[3] = RandomX509Cert(t, domainName) + + // IDs: + IDs = make([]*common.SHA256Output, len(certs)) + for i, c := range certs { + id := common.SHA256Hash32Bytes(c.Raw) + IDs[i] = &id + } + + // Names: only c2 and c3 are leaves, the rest should be nil. + names = make([][]string, len(certs)) + names[2] = certs[2].DNSNames + names[3] = certs[3].DNSNames + + // Parent IDs. + parentIDs = make([]*common.SHA256Output, len(certs)) + // First chain: + parentIDs[1] = IDs[0] + parentIDs[2] = IDs[1] + // Second chain: + parentIDs[3] = IDs[0] + + return +} diff --git a/pkg/tests/testdb/certificates.go b/pkg/tests/testdb/certificates.go deleted file mode 100644 index 3464e44a..00000000 --- a/pkg/tests/testdb/certificates.go +++ /dev/null @@ -1,45 +0,0 @@ -package testdb - -import ( - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/stretchr/testify/require" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/tests" -) - -// BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate -// chains: the first chain is domainName->c1.com->c0.com , and the second chain is -// domainName->c0.com . -func BuildTestRandomCertHierarchy(t require.TestingT, domainName string) ( - certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { - - // Create all certificates. - certs = make([]*ctx509.Certificate, 4) - certs[0] = tests.RandomX509Cert(t, "c0.com") - certs[1] = tests.RandomX509Cert(t, "c1.com") - certs[2] = tests.RandomX509Cert(t, domainName) - certs[3] = tests.RandomX509Cert(t, domainName) - - // IDs: - IDs = make([]*common.SHA256Output, len(certs)) - for i, c := range certs { - id := common.SHA256Hash32Bytes(c.Raw) - IDs[i] = &id - } - - // Names: only c2 and c3 are leaves, the rest should be nil. - names = make([][]string, len(certs)) - names[2] = certs[2].DNSNames - names[3] = certs[3].DNSNames - - // Parent IDs. - parentIDs = make([]*common.SHA256Output, len(certs)) - // First chain: - parentIDs[1] = IDs[0] - parentIDs[2] = IDs[1] - // Second chain: - parentIDs[3] = IDs[0] - - return -} diff --git a/pkg/tests/testdb/policies.go b/pkg/tests/testdb/policies.go deleted file mode 100644 index 9287e7ae..00000000 --- a/pkg/tests/testdb/policies.go +++ /dev/null @@ -1,30 +0,0 @@ -package testdb - -import ( - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/tests" - "github.com/stretchr/testify/require" -) - -func BuildTestRandomPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { - // Create one RPC and one SP for that name. - rpc := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, - }, - SerialNumber: 1, - Version: 1, - PublicKey: tests.RandomBytesForTest(t, 32), - CAName: "c0.com", - CASignature: tests.RandomBytesForTest(t, 100), - } - sp := &common.SP{ - PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, - }, - CAName: "c0.com", - CASignature: tests.RandomBytesForTest(t, 100), - RootCertSignature: tests.RandomBytesForTest(t, 100), - } - return []common.PolicyObject{rpc, sp} -} From 9c05f921a50896762cb11092209aac8b40f40d33 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 16:49:38 +0200 Subject: [PATCH 118/187] Fix pkg/pca build. --- pkg/pca/pca.go | 57 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 34 insertions(+), 23 deletions(-) diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 41eb5a11..fdbf49b9 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -6,8 +6,11 @@ import ( "log" "os" + "github.com/google/trillian" + "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/logverifier" + "github.com/netsec-ethz/fpki/pkg/util" ) // CRITICAL: The funcs are not thread-safe for now. DO NOT use them for multi-thread program. @@ -79,8 +82,7 @@ func NewPCA(configPath string) (*PCA, error) { func (pca *PCA) ReceiveSPTFromPolicyLog() error { for k, v := range pca.preRPCByDomains { // read the corresponding spt - spt := &common.SPT{} - err := common.JsonFileToSPT(spt, pca.policyLogExgPath+"/spt/"+k) + spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + k) if err != nil { return fmt.Errorf("ReceiveSPTFromPolicyLog | JsonFileToSPT | %w", err) } @@ -102,8 +104,7 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { for k, v := range pca.preSPByDomains { // read the corresponding spt - spt := &common.SPT{} - err := common.JsonFileToSPT(spt, pca.policyLogExgPath+"/spt/"+k) + spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + k) if err != nil { return fmt.Errorf("ReceiveSPTFromPolicyLog | JsonFileToSPT | %w", err) } @@ -145,11 +146,9 @@ func (pca *PCA) OutputRPCAndSP() error { // verify the SPT of the RPC. func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { - // construct proofs - - proofs, err := common.JSONToPoI(spt.PoI) + proofs, logRoot, err := getProofsAndLogRoot(spt) if err != nil { - return fmt.Errorf("verifySPT | JsonBytesToPoI | %w", err) + return fmt.Errorf("verifySPTWithRPC | parsePoIAndSTH | %w", err) } // get leaf hash @@ -159,12 +158,6 @@ func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { } leafHash := pca.logVerifier.HashLeaf(rpcBytes) - // get LogRootV1 - logRoot, err := common.JSONToLogRoot(spt.STH) - if err != nil { - return fmt.Errorf("verifySPT | JsonBytesToLogRoot | %w", err) - } - // verify the PoI err = pca.logVerifier.VerifyInclusionByHash(logRoot, leafHash, proofs) if err != nil { @@ -176,10 +169,9 @@ func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { // verify the SPT of the RPC. func (pca *PCA) verifySPTWithSP(spt *common.SPT, sp *common.SP) error { - // construct proofs - proofs, err := common.JSONToPoI(spt.PoI) + proofs, logRoot, err := getProofsAndLogRoot(spt) if err != nil { - return fmt.Errorf("verifySPT | JsonBytesToPoI | %w", err) + return fmt.Errorf("verifySPTWithSP | parsePoIAndSTH | %w", err) } // get leaf hash @@ -189,12 +181,6 @@ func (pca *PCA) verifySPTWithSP(spt *common.SPT, sp *common.SP) error { } leafHash := pca.logVerifier.HashLeaf(spBytes) - // get LogRootV1 - logRoot, err := common.JSONToLogRoot(spt.STH) - if err != nil { - return fmt.Errorf("verifySPT | JsonBytesToLogRoot | %w", err) - } - // verify the PoI err = pca.logVerifier.VerifyInclusionByHash(logRoot, leafHash, proofs) if err != nil { @@ -213,6 +199,31 @@ func (pca *PCA) ReturnValidRPC() map[string]*common.RPC { return pca.validRPCsByDomains } +// getProofsAndLogRoot return the proofs and root parsed from the PoI and STH in JSON. +func getProofsAndLogRoot(spt *common.SPT) ([]*trillian.Proof, *types.LogRootV1, error) { + // Parse the PoI into []*trillian.Proof. + serializedProofs, err := common.FromJSON(spt.PoI) + if err != nil { + return nil, nil, err + } + proofs, err := util.ToTypedSlice[*trillian.Proof](serializedProofs) + if err != nil { + return nil, nil, err + } + + // Parse the STH into a *types.LogRootV1. + serializedRoot, err := common.FromJSON(spt.STH) + if err != nil { + return nil, nil, err + } + root, err := util.ToType[*types.LogRootV1](serializedRoot) + if err != nil { + return nil, nil, err + } + + return proofs, root, nil +} + /* // check whether the RPC signature is correct func (pca *PCA) checkRPCSignature(rcsr *common.RCSR) bool { From 4e37351e714a8106a45132d522532aee928b3108 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 17:15:35 +0200 Subject: [PATCH 119/187] Cleanup pkg/mapserver/common and tests. --- pkg/mapserver/common/structure_test.go | 227 ----------------------- pkg/mapserver/common/tools.go | 1 + pkg/mapserver/common/tools_test.go | 1 - pkg/mapserver/updater/certs_updater.go | 6 +- pkg/mapserver/updater/deleteme.go | 246 ------------------------- pkg/mapserver/updater/updater.go | 2 +- 6 files changed, 7 insertions(+), 476 deletions(-) delete mode 100644 pkg/mapserver/common/structure_test.go delete mode 100644 pkg/mapserver/common/tools_test.go delete mode 100644 pkg/mapserver/updater/deleteme.go diff --git a/pkg/mapserver/common/structure_test.go b/pkg/mapserver/common/structure_test.go deleted file mode 100644 index 9bf5044f..00000000 --- a/pkg/mapserver/common/structure_test.go +++ /dev/null @@ -1,227 +0,0 @@ -package common - -import ( - "crypto/rand" - "fmt" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/require" -) - -// TestSerializingDomainEntry: Serializing and deserializing of DomainEntry -func TestSerializeDomainEntry(t *testing.T) { - cert, err := common.X509CertFromFile("./testdata/cert.pem") - require.NoError(t, err, "X509CertFromFile error") - - testDomainEntry := &DomainEntry{ - DomainName: "test.com", - DomainID: common.SHA256Hash32Bytes([]byte("test.com")), - RPCs: []common.RPC{ - { - PublicKey: []byte{1, 4, 7, 3, 2}, - PublicKeyAlgorithm: common.RSA, - Version: 1, - }, - { - PublicKey: []byte{2, 4, 7, 3, 2}, - PublicKeyAlgorithm: common.RSA, - Version: 2, - }, - }, - PCs: []common.SP{ - { - Policies: common.Policy{ - TrustedCA: []string{"ca1", "ca2"}, - AllowedSubdomains: []string{"flowers.com"}, - }, - // TimeStamp: time.Now(), - TimeStamp: util.TimeFromSecs(42), - SerialNumber: 1, - SPTs: []common.SPT{ - { - Version: 1, - Subject: "spt subject", - STH: []byte{0, 1, 2, 3}, - STHSerialNumber: 12345, - PoI: []byte{0, 1, 2, 3, 4, 5, 6, 7}, - }, - }, - }, - }, - DomainCerts: cert.Raw, - } - - domainBytes, err := DeletemeSerializeDomainEntry(testDomainEntry) - require.NoError(t, err, "SerializedDomainEntry error") - - fmt.Println(string(domainBytes)) - testDomainEntryDeserialized, err := DeletemeDeserializeDomainEntry(domainBytes) - require.NoError(t, err, "DeserializeDomainEntry error") - - require.True(t, cmp.Equal(testDomainEntry, testDomainEntryDeserialized)) - // assert.EqualValues(t, testDomainEntry, testDomainEntryDeserialized) - // assert.Equal(t, reflect.DeepEqual(testDomainEntry, testDomainEntryDeserialized), true, "structure not equal") -} - -// TestAddCert: test AddCert() -// update with new cert -> AddCert() should return true -// update with old cert -> AddCert() should return false -// then check if all the certs are correctly added -func TestAddCert(t *testing.T) { - // cert1, err := common.CTX509CertFromFile("./testdata/cert1.cer") - // require.NoError(t, err) - - // cert2, err := common.CTX509CertFromFile("./testdata/cert2.cer") - // require.NoError(t, err) - - // emptyChain := []*ctx509.Certificate{} - - // domainEntry := &DomainEntry{} - - // isUpdated := domainEntry.AddCert(cert1, emptyChain) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddCert(cert1, emptyChain) - // assert.False(t, isUpdated) - - // isUpdated = domainEntry.AddCert(cert2, emptyChain) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddCert(cert2, emptyChain) - // assert.False(t, isUpdated) - - // assert.Equal(t, 2, len(domainEntry.Entries)) - - // isFound := false - // issuerRepresentation := cert1.Issuer.String() - // for _, caEntry := range domainEntry.Entries { - // if caEntry.CAName == issuerRepresentation { - // assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert1.Raw)) - // isFound = true - // } - // } - // assert.True(t, isFound) - - // isFound = false - // issuerRepresentation = cert2.Issuer.String() - // for _, caEntry := range domainEntry.Entries { - // if caEntry.CAName == issuerRepresentation { - // assert.True(t, bytes.Equal(caEntry.DomainCerts[0], cert2.Raw)) - // isFound = true - // } - // } - // assert.True(t, isFound) -} - -// TestAddPC: test AddPC -// update with new PC -> AddPC() should return true -// update with old PC -> AddPC() should return false -// then check if all the PC are correctly added -func TestAddPC(t *testing.T) { - // pc1 := common.SP{ - // CAName: "ca1", - // Subject: "before", - // } - - // pc2 := common.SP{ - // CAName: "ca1", - // Subject: "after", - // } - - // pc3 := common.SP{ - // CAName: "ca2", - // Subject: "after", - // } - - // domainEntry := &DomainEntry{} - - // isUpdated := domainEntry.AddPC(&pc1) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddPC(&pc3) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddPC(&pc1) - // assert.False(t, isUpdated) - - // isUpdated = domainEntry.AddPC(&pc3) - // assert.False(t, isUpdated) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName == "ca1" { - // assert.True(t, caList.PCs.Subject == "before") - // } - // } - - // isUpdated = domainEntry.AddPC(&pc2) - // assert.True(t, isUpdated) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName == "ca1" { - // assert.True(t, caList.PCs.Subject == "after") - // } - // } -} - -// TestAddRPC: test AddRPC -// update with new RPC -> AddRPC() should return true -// update with old RPC -> AddRPC() should return false -// then check if all the RPC are correctly added -func TestAddRPC(t *testing.T) { - // rpc1 := common.RPC{ - // CAName: "ca1", - // Subject: "before", - // } - - // rpc2 := common.RPC{ - // CAName: "ca1", - // Subject: "after", - // } - - // rpc3 := common.RPC{ - // CAName: "ca2", - // Subject: "after", - // } - - // domainEntry := &DomainEntry{} - - // isUpdated := domainEntry.AddRPC(&rpc1) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddRPC(&rpc3) - // assert.True(t, isUpdated) - - // isUpdated = domainEntry.AddRPC(&rpc1) - // assert.False(t, isUpdated) - - // isUpdated = domainEntry.AddRPC(&rpc3) - // assert.False(t, isUpdated) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName == "ca1" { - // assert.True(t, caList.RPCs.Subject == "before") - // } - // } - - // isUpdated = domainEntry.AddRPC(&rpc2) - // assert.True(t, isUpdated) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName == "ca1" { - // assert.True(t, caList.RPCs.Subject == "after") - // } - // } -} - -func generateRandomBytes() []byte { - token := make([]byte, 40) - rand.Read(token) - return token -} - -func generateRandomBytesArray() [][]byte { - return [][]byte{generateRandomBytes()} -} diff --git a/pkg/mapserver/common/tools.go b/pkg/mapserver/common/tools.go index f9d51f84..e8390267 100644 --- a/pkg/mapserver/common/tools.go +++ b/pkg/mapserver/common/tools.go @@ -34,6 +34,7 @@ func getRootCertificateSubject(cert *x509.Certificate, certChain []*x509.Certifi // AddCert: add a x509 cert to one domain entry. Return whether the domain entry is updated. func (domainEntry *DomainEntry) AddCert(cert *x509.Certificate, certChain []*x509.Certificate) bool { + panic("deprecated: should never be called") // caName := getRootCertificateSubject(cert, certChain) // isFound := false diff --git a/pkg/mapserver/common/tools_test.go b/pkg/mapserver/common/tools_test.go deleted file mode 100644 index 805d0c79..00000000 --- a/pkg/mapserver/common/tools_test.go +++ /dev/null @@ -1 +0,0 @@ -package common diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index 2abb6e63..b2abf58f 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -28,7 +28,7 @@ func (mapUpdater *MapUpdater) DeletemeUpdateDomainEntriesTableUsingCerts( int, error, ) { - + panic("deprecated: should never be called") if len(certs) == 0 { return nil, 0, nil } @@ -139,6 +139,7 @@ func UpdateDomainEntries( certChainDomainMap map[string][][]*ctx509.Certificate, ) (uniqueSet, error) { + panic("deprecated: should never be called") updatedDomainHash := make(uniqueSet) // read from previous map // the map records: domain - certs pair @@ -175,6 +176,7 @@ func UpdateDomainEntries( // updateDomainEntry: insert certificate into correct CAEntry // return: if this domain entry is updated func updateDomainEntry(domainEntry *mcommon.DomainEntry, cert *ctx509.Certificate, certChain []*ctx509.Certificate) bool { + panic("deprecated: should never be called") return domainEntry.AddCert(cert, certChain) } @@ -198,6 +200,8 @@ func GetDomainEntriesToWrite(updatedDomain uniqueSet, func DeletemeSerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.DomainEntry) ( []*db.KeyValuePair, error) { + panic("this function is deprecated and should never be called") + result := make([]*db.KeyValuePair, 0, len(domains)) for domainNameHash, domainEntry := range domains { diff --git a/pkg/mapserver/updater/deleteme.go b/pkg/mapserver/updater/deleteme.go deleted file mode 100644 index eb4cb03f..00000000 --- a/pkg/mapserver/updater/deleteme.go +++ /dev/null @@ -1,246 +0,0 @@ -package updater - -import ( - "context" - "fmt" - "time" - - "github.com/google/certificate-transparency-go/x509" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/domain" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -// deleteme! only used to print extra info in benchmarks: -// functions for measuring the bottleneck - -func (u *MapUpdater) UpdateNextBatchReturnTimeList(ctx context.Context) ( - int, []string, []string, error, []*db.KeyValuePair, []*db.KeyValuePair, int) { - - certs, err := u.Fetcher.NextBatch(ctx) - if err != nil { - return 0, nil, nil, fmt.Errorf("CollectCerts | GetCertMultiThread | %w", err), nil, nil, 0 - } - - certSize := 0.0 - for _, cert := range certs { - certSize = certSize + float64(len(cert.Raw)) - } - fmt.Println("certs size: ", certSize/1024/1024, " MB") - - names := parseCertDomainName(certs) - timeList, err, writePair, readPair, smtSize := u.updateCertsReturnTime(ctx, certs) - return len(certs), timeList, names, err, writePair, readPair, smtSize -} - -func (mapUpdater *MapUpdater) updateCertsReturnTime(ctx context.Context, certs []*ctx509.Certificate) ( - []string, error, []*db.KeyValuePair, []*db.KeyValuePair, int) { - - timeList := []string{} - totalStart := time.Now() - start := time.Now() - keyValuePairs, _, times, err, writePairs, readPairs := - mapUpdater.UpdateDomainEntriesTableUsingCertsReturnTime(ctx, certs) - if err != nil { - return nil, fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err), nil, nil, 0 - } - end := time.Now() - fmt.Println() - fmt.Println("============================================") - fmt.Println("(db and memory) time to update domain entries: ", end.Sub(start)) - timeList = append(timeList, end.Sub(start).String()) - - if len(keyValuePairs) == 0 { - return nil, nil, []*db.KeyValuePair{}, []*db.KeyValuePair{}, 0 - } - - _, _, err = keyValuePairToSMTInput(keyValuePairs) - if err != nil { - return nil, fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err), nil, nil, 0 - } - - totalEnd := time.Now() - - timeList = append(timeList, totalEnd.Sub(totalStart).String()) - timeList = append(timeList, times...) - - return timeList, nil, writePairs, readPairs, len(keyValuePairs) -} - -// UpdateDomainEntriesTableUsingCerts: Update the domain entries using the domain certificates -func (mapUpdater *MapUpdater) UpdateDomainEntriesTableUsingCertsReturnTime(ctx context.Context, - certs []*x509.Certificate) ([]*db.KeyValuePair, int, []string, error, []*db.KeyValuePair, []*db.KeyValuePair) { - - timeList := []string{} - if len(certs) == 0 { - return nil, 0, nil, nil, nil, nil - } - - start := time.Now() - // get the unique list of affected domains - emptyCertChains := make([][]*x509.Certificate, len(certs)) - affectedDomainsMap, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap(certs, emptyCertChains) - end := time.Now() - fmt.Println("(memory) time to process certs: ", end.Sub(start)) - timeList = append(timeList, end.Sub(start).String()) - - // if no domain to update - if len(affectedDomainsMap) == 0 { - return nil, 0, nil, nil, nil, nil - } - - start = time.Now() - // retrieve (possibly)affected domain entries from db - // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, readData, err := mapUpdater.retrieveAffectedDomainFromDBReturnReadDomains(ctx, affectedDomainsMap) - if err != nil { - return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | retrieveAffectedDomainFromDB | %w", err), nil, nil - } - end = time.Now() - - timeList = append(timeList, end.Sub(start).String()) - fmt.Println("(db) time to retrieve domain entries: ", end.Sub(start)) - - //readSize := 0.0 - //for _, v := range domainEntriesMap { - // readSize = readSize + float64(countDomainEntriesSize(v)) - //} - //fmt.Println("(db) time to retrieve domain entries: ", end.Sub(start), " ", readSize/1024/1024, " MB") - - start = time.Now() - // update the domain entries - updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) - if err != nil { - return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | updateDomainEntries | %w", err), nil, nil - } - end = time.Now() - fmt.Println("(db) time to update domain entries: ", end.Sub(start)) - timeList = append(timeList, end.Sub(start).String()) - - // if during this updates, no cert is added, directly return - if len(updatedDomains) == 0 { - return nil, 0, nil, nil, nil, nil - } - - start = time.Now() - // get the domain entries only if they are updated, from DB - domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - if err != nil { - return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | getDomainEntriesToWrite | %w", err), nil, nil - } - //readSize = 0.0 - //for _, v := range domainEntriesToWrite { - // readSize = readSize + float64(countDomainEntriesSize(v)) - //} - //fmt.Println(" domain entries size: ", readSize/1024/1024, " MB") - - // serialized the domainEntry -> key-value pair - keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) - if err != nil { - return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | serializeUpdatedDomainEntries | %w", err), nil, nil - } - end = time.Now() - fmt.Println("(memory) time to process updated domains: ", end.Sub(start)) - timeList = append(timeList, end.Sub(start).String()) - - start = time.Now() - // commit changes to db - num, err := mapUpdater.writeChangesToDB(ctx, keyValuePairs) - if err != nil { - return nil, 0, nil, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | writeChangesToDB | %w", err), nil, nil - } - end = time.Now() - fmt.Println("(db) time to write updated domain entries: ", end.Sub(start)) - fmt.Println("*******************") - fmt.Println("num of writes: ", len(keyValuePairs)) - size := 0.0 - for _, pair := range keyValuePairs { - size = size + float64(len(pair.Value)) - } - fmt.Println("write size: ", size/1024/1024, " MB") - fmt.Println("*******************") - timeList = append(timeList, end.Sub(start).String()) - - return keyValuePairs, num, timeList, nil, keyValuePairs, readData -} - -func countDBWriteSize(keyValuePairs []*db.KeyValuePair) int { - totalSize := 0 - for _, pair := range keyValuePairs { - totalSize = totalSize + len(pair.Value) - totalSize = totalSize + len(pair.Key) - } - return totalSize -} - -func parseCertDomainName(certs []*ctx509.Certificate) []string { - result := []string{} - for _, cert := range certs { - _, err := domain.ParseDomainName(cert.Subject.CommonName) - if err == nil { - result = append(result, cert.Subject.CommonName) - } - } - return result -} - -// retrieveAffectedDomainFromDB: get affected domain entries from db -func (mapUpdater *MapUpdater) retrieveAffectedDomainFromDBReturnReadDomains(ctx context.Context, - affectedDomainsMap uniqueSet) (map[common.SHA256Output]*mapCommon.DomainEntry, []*db.KeyValuePair, error) { - - // XXX(juagargi) review why passing a set (we need to convert it to a slice) - // list of domain hashes to fetch the domain entries from db - affectedDomainHashes := make([]common.SHA256Output, 0, len(affectedDomainsMap)) - for k := range affectedDomainsMap { - affectedDomainHashes = append(affectedDomainHashes, k) - } - - // work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) { - // domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) - // resultChan <- dbResult{pairs: domainEntries, err: err} - // } - work := func(domainHashes []common.SHA256Output, resultChan chan dbResult) {} - - resultChan := make(chan dbResult) - - totalNum := len(affectedDomainHashes) - numOfWorker := totalNum / batchSize - remaining := totalNum % batchSize - - workerCounter := 0 - for i := 0; i < numOfWorker; i++ { - workerCounter++ - go work(affectedDomainHashes[i*batchSize:i*batchSize+batchSize], resultChan) - } - if remaining != 0 { - workerCounter++ - go work(affectedDomainHashes[numOfWorker*batchSize:], resultChan) - } - - domainEntries := []*db.KeyValuePair{} - - for workerCounter > 0 { - newResult := <-resultChan - if newResult.err != nil { - return nil, nil, newResult.err - } - //fmt.Println("pair length ", len(newResult.pairs)) - domainEntries = append(domainEntries, newResult.pairs...) - workerCounter-- - } - - start := time.Now() - - //fmt.Println(len(domainEntries)) - // parse the key-value pair -> domain map - domainEntriesMap, err := deletemeParseDomainBytes(domainEntries) - if err != nil { - return nil, nil, fmt.Errorf("retrieveAffectedDomainFromDB | %w", err) - } - end := time.Now() - fmt.Println("time to parse domain entries", end.Sub(start)) - //fmt.Println(len(domainEntriesMap)) - return domainEntriesMap, domainEntries, nil -} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 2090e1ca..c1a021ba 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -97,7 +97,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // updateCerts: update the tables and SMT (in memory) using certificates func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { - + panic("deprecated: should never be called") keyValuePairs, numOfUpdates, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) if err != nil { return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) From dbf7f911f7b8ef268dd97adccddcbdf00ab1d93e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 22 May 2023 17:16:02 +0200 Subject: [PATCH 120/187] Tests in pkg/mapserver/trie pass again. --- pkg/mapserver/trie/trie_test.go | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/pkg/mapserver/trie/trie_test.go b/pkg/mapserver/trie/trie_test.go index 372a91b8..9336a499 100644 --- a/pkg/mapserver/trie/trie_test.go +++ b/pkg/mapserver/trie/trie_test.go @@ -13,13 +13,13 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/stretchr/testify/require" ) // TestTrieEmpty: test empty SMT func TestTrieEmpty(t *testing.T) { - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -32,7 +32,7 @@ func TestTrieUpdateAndGet(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -72,7 +72,7 @@ func TestTrieAtomicUpdate(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -102,7 +102,7 @@ func TestTriePublicUpdateAndGet(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -139,7 +139,7 @@ func TestTrieUpdateAndDelete(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -177,7 +177,7 @@ func TestTrieMerkleProof(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -209,7 +209,7 @@ func TestTrieMerkleProofCompressed(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) @@ -239,7 +239,7 @@ func TestHeight0LeafShortcut(t *testing.T) { defer cancelF() keySize := 32 - db := tests.NewMockDB() + db := testdb.NewMockDB() smt, err := NewTrie(nil, common.SHA256Hash, db) require.NoError(t, err) From ac79f624e1062f05a276ce4b8c53ede9c412ae7c Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 09:21:41 +0200 Subject: [PATCH 121/187] WIP temporarily disable some pkg/mapserver/updater tests. --- pkg/mapserver/updater/certs_updater_test.go | 3 +++ pkg/mapserver/updater/rpc_updater_test.go | 2 ++ pkg/mapserver/updater/updater_test.go | 20 ++++++++++++-------- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 488e9a06..94ada91d 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -18,6 +18,8 @@ import ( // TestUpdateDomainEntriesUsingCerts: test UpdateDomainEntriesUsingCerts // This test tests the individual functions of the UpdateDomainEntriesUsingCerts() func TestUpdateDomainEntriesUsingCerts(t *testing.T) { + t.Skip() // deleteme + certs := []*ctx509.Certificate{} // load test certs @@ -130,6 +132,7 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { // TestUpdateSameCertTwice: update the same certs twice, number of updates should be zero func TestUpdateSameCertTwice(t *testing.T) { + t.Skip() // deleteme certs := []*ctx509.Certificate{} // check if files, err := ioutil.ReadDir("./testdata/certs/") diff --git a/pkg/mapserver/updater/rpc_updater_test.go b/pkg/mapserver/updater/rpc_updater_test.go index 921e945f..35331103 100644 --- a/pkg/mapserver/updater/rpc_updater_test.go +++ b/pkg/mapserver/updater/rpc_updater_test.go @@ -144,6 +144,8 @@ func TestUpdateDomainEntriesWithRPCAndPC(t *testing.T) { // TestUpdateSameRPCTwice: update the same RPC twice, number of updates should be zero func TestUpdateSameRPCTwice(t *testing.T) { + t.Skip() // deleteme + pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) require.NoError(t, err, "GetPCAndRPC error") diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 01da1f37..411efad8 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -11,7 +11,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" - "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,11 +19,13 @@ import ( // TestUpdateCerts: test updateCerts() func TestUpdateCerts(t *testing.T) { - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) + t.Skip() // deleteme + + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := tests.NewMockDB() + updaterDB := testdb.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -79,14 +81,16 @@ func TestUpdateCerts(t *testing.T) { // TestUpdateRPCAndPC: test updateRPCAndPC() func TestUpdateRPCAndPC(t *testing.T) { + t.Skip() // deleteme + pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 20) require.NoError(t, err) - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := tests.NewMockDB() + updaterDB := testdb.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -145,11 +149,11 @@ func TestUpdateRPCAndPC(t *testing.T) { // TestFetchUpdatedDomainHash: test fetchUpdatedDomainHash() func TestFetchUpdatedDomainHash(t *testing.T) { - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, tests.NewMockDB()) + smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) require.NoError(t, err) smt.CacheHeightLimit = 233 - updaterDB := tests.NewMockDB() + updaterDB := testdb.NewMockDB() updater, err := getMockUpdater(smt, updaterDB) require.NoError(t, err) @@ -218,7 +222,7 @@ func getRandomHash() projectCommon.SHA256Output { } // get a updater using mock db -func getMockUpdater(smt *trie.Trie, updaterDB *tests.MockDB) (*MapUpdater, error) { +func getMockUpdater(smt *trie.Trie, updaterDB *testdb.MockDB) (*MapUpdater, error) { return &MapUpdater{ smt: smt, dbConn: updaterDB, From b773cd308b311ce513a340f8f669535a086c7759 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 10:04:27 +0200 Subject: [PATCH 122/187] Replace CTX509CertFromFile with CertReader. Use a new function in pkg/util. Test that CertReader can also read plain certificates. --- pkg/common/cert.go | 30 ------ pkg/mapserver/updater/certs_updater_test.go | 8 +- pkg/mapserver/updater/tools_test.go | 105 -------------------- pkg/mapserver/updater/updater_test.go | 11 +- pkg/mapserver/util/proof.go | 61 ------------ pkg/util/certReader.go | 8 +- pkg/util/certReader_test.go | 12 +++ pkg/util/io.go | 17 ++++ pkg/util/io_test.go | 9 ++ tests/testdata/1-regular-cert.pem | 38 +++++++ 10 files changed, 92 insertions(+), 207 deletions(-) delete mode 100644 pkg/mapserver/updater/tools_test.go delete mode 100644 pkg/mapserver/util/proof.go create mode 100644 tests/testdata/1-regular-cert.pem diff --git a/pkg/common/cert.go b/pkg/common/cert.go index 85eb452a..f5da66a9 100644 --- a/pkg/common/cert.go +++ b/pkg/common/cert.go @@ -7,8 +7,6 @@ import ( "errors" "fmt" "io/ioutil" - - ctX509 "github.com/google/certificate-transparency-go/x509" ) // RsaPublicKeyToPemBytes: marshall public key to bytes @@ -85,31 +83,3 @@ func LoadRSAPrivateKeyFromFile(keyPath string) (*rsa.PrivateKey, error) { } return keyPair, nil } - -// X509CertFromFile: read x509 cert from files -func CTX509CertFromFile(fileName string) (*ctX509.Certificate, error) { - content, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, fmt.Errorf("X509CertFromFile | failed to read %s: %w", fileName, err) - } - - var block *pem.Block - block, _ = pem.Decode(content) - - switch { - case block == nil: - return nil, fmt.Errorf("X509CertFromFile | no pem block in %s", fileName) - case block.Type != "CERTIFICATE": - return nil, fmt.Errorf("X509CertFromFile | %s contains data other than certificate", fileName) - } - - cert, err := ctX509.ParseCertificate(block.Bytes) - if err != nil { - cert, err = ctX509.ParseTBSCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("X509CertFromFile | ParseTBSCertificate | %w", err) - } - } - - return cert, nil -} diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go index 94ada91d..6fe54bb0 100644 --- a/pkg/mapserver/updater/certs_updater_test.go +++ b/pkg/mapserver/updater/certs_updater_test.go @@ -27,8 +27,8 @@ func TestUpdateDomainEntriesUsingCerts(t *testing.T) { require.NoError(t, err, "ioutil.ReadDir") certChains := make([][]*ctx509.Certificate, len(files)) for _, file := range files { - cert, err := common.CTX509CertFromFile("./testdata/certs/" + file.Name()) - require.NoError(t, err, "projectCommon.CTX509CertFromFile") + cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) + require.NoError(t, err) certs = append(certs, cert) } @@ -139,8 +139,8 @@ func TestUpdateSameCertTwice(t *testing.T) { require.NoError(t, err, "ioutil.ReadDir") certChains := make([][]*ctx509.Certificate, len(files)) for _, file := range files { - cert, err := common.CTX509CertFromFile("./testdata/certs/" + file.Name()) - require.NoError(t, err, "projectCommon.CTX509CertFromFile") + cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) + require.NoError(t, err) certs = append(certs, cert) } diff --git a/pkg/mapserver/updater/tools_test.go b/pkg/mapserver/updater/tools_test.go deleted file mode 100644 index d6dce281..00000000 --- a/pkg/mapserver/updater/tools_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package updater - -import ( - "crypto/rand" - "testing" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestExtractCertDomains: test extractCertDomains() -func TestExtractCertDomains(t *testing.T) { - - cert, err := common.CTX509CertFromFile("./testdata/certs/adiq.com.br144.cer") - require.NoError(t, err, "projectCommon.CTX509CertFromFile") - - result := util.ExtractCertDomains(cert) - assert.Equal(t, 2, len(result)) - assert.Contains(t, result, "*.adiq.com.br") - assert.Contains(t, result, "adiq.com.br") -} - -// TestSortDomainEntry: test SortDomainEntry() -func TestSortDomainEntry(t *testing.T) { - // // prepare test data - // cert1 := generateRandomBytes(100) - // cert2 := generateRandomBytes(100) - // cert3 := generateRandomBytes(103) - // cert4 := generateRandomBytes(10) - - // caEntry1 := mapCommon.Entry{ - // CAName: "ca1", - // CAHash: common.SHA256Hash([]byte("ca1")), - // DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, - // } - - // caEntry1_ := mapCommon.Entry{ - // CAName: "ca1", - // CAHash: common.SHA256Hash([]byte("ca1")), - // DomainCerts: [][]byte{cert2, cert4, cert3, cert1}, - // } - - // caEntry2 := mapCommon.Entry{ - // CAName: "ca2", - // CAHash: common.SHA256Hash([]byte("ca2")), - // DomainCerts: [][]byte{cert1, cert2, cert3, cert4}, - // } - - // caEntry2_ := mapCommon.Entry{ - // CAName: "ca2", - // CAHash: common.SHA256Hash([]byte("ca2")), - // DomainCerts: [][]byte{cert2, cert4, cert1, cert3}, - // } - - // caEntry3 := mapCommon.Entry{ - // CAName: "ca3", - // CAHash: common.SHA256Hash([]byte("ca3")), - // DomainCerts: [][]byte{cert1, cert3, cert2, cert4}, - // } - - // caEntry3_ := mapCommon.Entry{ - // CAName: "ca3", - // CAHash: common.SHA256Hash([]byte("ca3")), - // DomainCerts: [][]byte{cert2, cert1, cert3, cert4}, - // } - - // // add the same cert and CA entries in different orders - // domainEntry1 := &mapCommon.DomainEntry{ - // Entries: []mapCommon.Entry{caEntry1, caEntry2, caEntry3_}, - // } - - // domainEntry2 := &mapCommon.DomainEntry{ - // Entries: []mapCommon.Entry{caEntry1_, caEntry3, caEntry2_}, - // } - - // domainEntry3 := &mapCommon.DomainEntry{ - // Entries: []mapCommon.Entry{caEntry3, caEntry2_, caEntry1_}, - // } - - // sortDomainEntry(domainEntry1) - // sortDomainEntry(domainEntry2) - // sortDomainEntry(domainEntry3) - - // for i := 0; i < 3; i++ { - // // check ca entry order is correct - // assert.Equal(t, domainEntry1.Entries[i].CAName, domainEntry2.Entries[i].CAName, domainEntry3.Entries[i].CAName) - // for j := 0; j < 4; j++ { - // assert.Equal(t, domainEntry1.Entries[i].DomainCerts[j], domainEntry2.Entries[i].DomainCerts[j], - // domainEntry3.Entries[i].DomainCerts[j]) - // } - // } -} - -// ------------------------------------------------------------- -// -// funcs for testing -// -// ------------------------------------------------------------- -func generateRandomBytes(size int) []byte { - token := make([]byte, size) - rand.Read(token) - return token -} diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 411efad8..11313319 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -11,6 +11,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/assert" @@ -35,8 +36,8 @@ func TestUpdateCerts(t *testing.T) { require.NoError(t, err, "ioutil.ReadDir") for _, file := range files { - cert, err := projectCommon.CTX509CertFromFile("./testdata/certs/" + file.Name()) - require.NoError(t, err, "projectCommon.CTX509CertFromFile") + cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) + require.NoError(t, err) certs = append(certs, cert) } @@ -162,7 +163,7 @@ func TestFetchUpdatedDomainHash(t *testing.T) { randomKeys := []projectCommon.SHA256Output{} for i := 0; i < 15; i++ { - newRandomKey := getRandomHash() + newRandomKey := getRandomHash(t) updaterDB.UpdatesTable[newRandomKey] = struct{}{} randomKeys = append(randomKeys, newRandomKey) } @@ -217,8 +218,8 @@ func TestRunWhenFalse(t *testing.T) { } } -func getRandomHash() projectCommon.SHA256Output { - return projectCommon.SHA256Hash32Bytes(generateRandomBytes(50)) +func getRandomHash(t *testing.T) projectCommon.SHA256Output { + return projectCommon.SHA256Hash32Bytes(random.RandomBytesForTest(t, 50)) } // get a updater using mock db diff --git a/pkg/mapserver/util/proof.go b/pkg/mapserver/util/proof.go deleted file mode 100644 index c6924d4d..00000000 --- a/pkg/mapserver/util/proof.go +++ /dev/null @@ -1,61 +0,0 @@ -package util - -import ( - "bytes" - "fmt" - "strings" - - ctx509 "github.com/google/certificate-transparency-go/x509" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/prover" -) - -// CheckProof checks the validity of the proof. The CA from the certificate is checked for -// those subdomains where entries are found in the mapserver. -func CheckProof( - proof []*mapCommon.MapServerResponse, - name string, - cert *ctx509.Certificate, -) error { - - caName := cert.Issuer.String() - foundPoP := false - for i, proof := range proof { - if !strings.Contains(name, proof.Domain) { - return fmt.Errorf("proof step %d of %s: subdomain %s not in name %s", - i, name, proof.Domain, name) - } - proofType, correct, err := prover.VerifyProofByDomain(proof) - if err != nil { - return fmt.Errorf("proof step %d of %s: verifying proof: %w", - i, name, err) - } - if !correct { - return fmt.Errorf("proof step %d of %s: incorrect proof", i, name) - } - if proofType == mapCommon.PoP { - foundPoP = true - domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - return fmt.Errorf("proof step %d of %s: deserializing payload: %w", - i, name, err) - } - // Find the CA entry that corresponds to the CA in this certificate. - for _, ca := range domainEntry.Entries { - if ca.CAName == caName { - for _, raw := range ca.DomainCerts { - if bytes.Equal(raw, cert.Raw) { - return nil - } - } - } - } - } else { - if len(proof.DomainEntryBytes) != 0 { - return fmt.Errorf("payload for a absence step (%s)", name) - } - } - } - return fmt.Errorf("certificate/CA not found; all proof steps are PoA? %v", - !foundPoP) -} diff --git a/pkg/util/certReader.go b/pkg/util/certReader.go index df71bd0c..87a62808 100644 --- a/pkg/util/certReader.go +++ b/pkg/util/certReader.go @@ -63,10 +63,14 @@ func (r *CertReader) Read(certs []*ctx509.Certificate) (int, error) { // Wrong PEM block, try to find another one. continue } - // It must be a certificate. Complain if parsing fails. + // It must be a To Be Signed Certificate. c, err := ctx509.ParseTBSCertificate(block.Bytes) if err != nil { - return 0, err + // If it wasn't a TBS Cert, try with a regular one. + c, err = ctx509.ParseCertificate(block.Bytes) + if err != nil { + return 0, err + } } certPointers[0] = c certPointers = certPointers[1:] diff --git a/pkg/util/certReader_test.go b/pkg/util/certReader_test.go index 80ec22f2..bed0a730 100644 --- a/pkg/util/certReader_test.go +++ b/pkg/util/certReader_test.go @@ -111,3 +111,15 @@ func TestCertReaderReadAll(t *testing.T) { require.NoError(t, err) require.ElementsMatch(t, certs, threeCerts) } + +func TestCertReaderLoadsRegularCertificate(t *testing.T) { + // The file contains a regular (not TBS) x509 certificate. + f, err := os.Open("../../tests/testdata/1-regular-cert.pem") + require.NoError(t, err) + + r := NewCertReader(f) + N := 1 + certs, err := r.ReadAll() + require.NoError(t, err) + require.Len(t, certs, N) +} diff --git a/pkg/util/io.go b/pkg/util/io.go index 41b9d124..232dabdc 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -61,6 +61,23 @@ func ReadAllGzippedFile(filename string) ([]byte, error) { return buff, r.Close() } +// CertificateFromFile uses a CertReader to read just one certificate. It returns nil if no +// certificate was found. The file must encode the certificate in PEM format. +func CertificateFromPEMFile(filename string) (*ctx509.Certificate, error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + r := NewCertReader(f) + certs := make([]*ctx509.Certificate, 1) + _, err = r.Read(certs) + if err != nil { + return nil, err + } + // If no certificate was found, certs[0] will be nil. + return certs[0], nil +} + // LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of the leaf certificate // payload, its ID, its parent ID, and its names, for each certificate and its ancestry chain. // The returned names contains nil unless the corresponding certificate is a leaf certificate. diff --git a/pkg/util/io_test.go b/pkg/util/io_test.go index c9382063..4dea1a69 100644 --- a/pkg/util/io_test.go +++ b/pkg/util/io_test.go @@ -19,3 +19,12 @@ func TestNewGzipReader(t *testing.T) { err = r.Close() require.NoError(t, err) } + +// TestCertificateFromPEMFile checks that the CertificateFromPEMFile loads a regular x509 Cert. +func TestCertificateFromPEMFile(t *testing.T) { + cert, err := CertificateFromPEMFile("../../tests/testdata/1-regular-cert.pem") + require.NoError(t, err) + + result := ExtractCertDomains(cert) + require.ElementsMatch(t, result, []string{"*.adiq.com.br", "adiq.com.br"}) +} diff --git a/tests/testdata/1-regular-cert.pem b/tests/testdata/1-regular-cert.pem new file mode 100644 index 00000000..09d2bc35 --- /dev/null +++ b/tests/testdata/1-regular-cert.pem @@ -0,0 +1,38 @@ +-----BEGIN CERTIFICATE----- +MIIGrjCCBZagAwIBAgIIE/08vT21V3owDQYJKoZIhvcNAQELBQAwgbQxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRow +GAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjEtMCsGA1UECxMkaHR0cDovL2NlcnRz +LmdvZGFkZHkuY29tL3JlcG9zaXRvcnkvMTMwMQYDVQQDEypHbyBEYWRkeSBTZWN1 +cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwHhcNMTkwNTI0MTUwMjM0WhcN +MjEwNTI0MTUwMjM0WjA7MSEwHwYDVQQLExhEb21haW4gQ29udHJvbCBWYWxpZGF0 +ZWQxFjAUBgNVBAMMDSouYWRpcS5jb20uYnIwggEiMA0GCSqGSIb3DQEBAQUAA4IB +DwAwggEKAoIBAQDQFlWfzZmTZICFcUzaglr7A+fH6lwtfyBPQgHVKxGv/drit1W9 +8NUBlWwkn5PUmEWYDF3gP/HDdN2oGplWdt6HI1Y9IKB3xGFWMrKfY5Z317+omfKC +oCPpUNYVty96wT3m53CB6mglZWywvMYCUuch0BYPh/L9PrGKPOFDVBGCUPA6Dh1z +0r5akhy3iUfT2OvYvpMBsWoUIo8sqD6VnSKAVixpBdRyBukJ7Qf47G1cOGFbK1Uy +D29b3WDUBKGNhGsC2kMG1fH6W1aJgE+4+M5Nqbgzrgf+f5CbsVhM4Q9I3KzZDokC +eMsn1jT8UJf0pThUVW7m/6OqKWCceyFRdx+pAgMBAAGjggM6MIIDNjAMBgNVHRMB +Af8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAOBgNVHQ8BAf8E +BAMCBaAwOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL2NybC5nb2RhZGR5LmNvbS9n +ZGlnMnMxLTExMzEuY3JsMF0GA1UdIARWMFQwSAYLYIZIAYb9bQEHFwEwOTA3Bggr +BgEFBQcCARYraHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9yZXBvc2l0 +b3J5LzAIBgZngQwBAgEwdgYIKwYBBQUHAQEEajBoMCQGCCsGAQUFBzABhhhodHRw +Oi8vb2NzcC5nb2RhZGR5LmNvbS8wQAYIKwYBBQUHMAKGNGh0dHA6Ly9jZXJ0aWZp +Y2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeS9nZGlnMi5jcnQwHwYDVR0jBBgw +FoAUQMK9J47MNIMwojPX+2yz8LQsgM4wJQYDVR0RBB4wHIINKi5hZGlxLmNvbS5i +coILYWRpcS5jb20uYnIwHQYDVR0OBBYEFAMSUloRqH1nuPQxJgWs3YHxqw/cMIIB +fQYKKwYBBAHWeQIEAgSCAW0EggFpAWcAdgCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb +37jjd80OyA3cEAAAAWrqXXn4AAAEAwBHMEUCIQDdUUQAL8sTLTDpJhajvhRr8H5G +sx8pQECKqRpFYNNe7AIgeGUzC57nXHrZ59GFIy6Oeahnw3C1p89+abNCVh2bbHUA +dQDuS723dc5guuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWrqXX8RAAAEAwBG +MEQCIF5ik8ZpFpuMTbXOEm2PmfwFWRdvDQwyh2cGiCwguOBlAiAr2POWVLFD7uz/ +0cFEuxmg3putQDfQPpkPV0Mz16ulOgB2AESUZS6w7s6vxEAH2Kj+KMDa5oK+2Msx +tT/TM5a1toGoAAABaupdg/0AAAQDAEcwRQIgRPyx0eYTHDAcI0gf4XiRJGAq3EDK +phC8YHe3myjX+acCIQDoa/zVhG7XUsNbV0FPitBnVSNIpuPGFhYfC8wHd1Q92jAN +BgkqhkiG9w0BAQsFAAOCAQEACHuaEL9h0Vp0oYmM+UA2uxLSls+MbCtOjoXfLxvA +6dj1TH1LxoD3WR6sdNJPQZBaAdli3saJdeGfDnCD/oCFyCszLvDlKXrfDjL30rX1 +DpZcAkILIMui8TtOxPy1Uvr6Tw01gcaMXH/XX00DqZbqMGT2eyY4/jmy2U6cd7mW +9Xl//47GGbNtKdTGY79SsKBRiuHJjG0LUmaPlonywc545w0VHTnCrU8maZDlSGtI +RkdjANUKhxmm7sU7VNUiZjTKptwImDuHrsPodnPMx3SnSO7yr9dgqS9QUTd0LzKp +f3rgrxlFbIeE68TEN2BTbTGgzPED7kknhBuRBtVu6C2mmg== +-----END CERTIFICATE----- From 0327babe729e6366eb28a5ca116e05a1aa06e46a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 10:37:07 +0200 Subject: [PATCH 123/187] Replace X509CertFromFile with CertificateFromPEMFile. Cleanup common tests. --- pkg/common/cert.go | 25 ---- pkg/common/crypto.go | 7 +- pkg/common/crypto_test.go | 104 ++++++++-------- pkg/common/json_test.go | 97 +++++++-------- pkg/common/structure_test.go | 223 +++++++++++++++++++---------------- pkg/tests/random/random.go | 9 +- pkg/util/io.go | 1 + 7 files changed, 218 insertions(+), 248 deletions(-) diff --git a/pkg/common/cert.go b/pkg/common/cert.go index f5da66a9..20e06388 100644 --- a/pkg/common/cert.go +++ b/pkg/common/cert.go @@ -43,31 +43,6 @@ func PemBytesToRsaPublicKey(pubkey []byte) (*rsa.PublicKey, error) { return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") } -// X509CertFromFile: read x509 cert from files -func X509CertFromFile(fileName string) (*x509.Certificate, error) { - content, err := ioutil.ReadFile(fileName) - if err != nil { - return nil, fmt.Errorf("X509CertFromFile | failed to read %s: %w", fileName, err) - } - - var block *pem.Block - block, _ = pem.Decode(content) - - switch { - case block == nil: - return nil, fmt.Errorf("X509CertFromFile | no pem block in %s", fileName) - case block.Type != "CERTIFICATE": - return nil, fmt.Errorf("X509CertFromFile | %s contains data other than certificate", fileName) - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - return nil, fmt.Errorf("X509CertFromFile | ParseCertificate | %w", err) - } - - return cert, nil -} - // LoadRSAPrivateKeyFromFile loads a RSA private key from file func LoadRSAPrivateKeyFromFile(keyPath string) (*rsa.PrivateKey, error) { bytes, err := ioutil.ReadFile(keyPath) diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index ffca5b22..d71d2e2e 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -5,9 +5,10 @@ import ( "crypto/rand" "crypto/rsa" "crypto/sha256" - "crypto/x509" "fmt" "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" ) // SignatureAlgorithm: Enum of supported signature algorithm; Currently only SHA256 @@ -152,7 +153,7 @@ func RCSRGenerateRPC(rcsr *RCSR, notBefore time.Time, serialNumber int, caPrivKe // ---------------------------------------------------------------------------------- // RPCVerifyCASignature: used by domain owner, check whether CA signature is correct -func RPCVerifyCASignature(caCert *x509.Certificate, rpc *RPC) error { +func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *RPC) error { pubKey := caCert.PublicKey.(*rsa.PublicKey) // Serialize without CA signature or SPTs: @@ -234,7 +235,7 @@ func CASignSP(psr *PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) } // VerifyCASigInSP: verify CA's signature -func VerifyCASigInSP(caCert *x509.Certificate, sp *SP) error { +func VerifyCASigInSP(caCert *ctx509.Certificate, sp *SP) error { if len(sp.CASignature) == 0 { return fmt.Errorf("VerifyCASigInPC | no valid CA signature") } diff --git a/pkg/common/crypto_test.go b/pkg/common/crypto_test.go index 7487ce55..d611a758 100644 --- a/pkg/common/crypto_test.go +++ b/pkg/common/crypto_test.go @@ -1,40 +1,42 @@ -package common +package common_test import ( - "crypto/rand" "testing" "time" + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/tests/random" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // TestSignatureOfRCSR: Generate RCSR -> generate signature for RCSR -> verify signature func TestSignatureOfRCSR(t *testing.T) { - privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") - test := &RCSR{ - PolicyObjectBase: PolicyObjectBase{ + test := &common.RCSR{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "this is a test", }, Version: 44, TimeStamp: time.Now(), - PublicKeyAlgorithm: RSA, - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), - Signature: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + SignatureAlgorithm: common.SHA256, + PRCSignature: random.RandomBytesForTest(t, 32), + Signature: random.RandomBytesForTest(t, 32), } - pubKeyBytes, err := RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) require.NoError(t, err, "RSA key to bytes error") test.PublicKey = pubKeyBytes - err = RCSRCreateSignature(privKey, test) + err = common.RCSRCreateSignature(privKey, test) require.NoError(t, err, "RCSR sign signature error") - err = RCSRVerifySignature(test) + err = common.RCSRVerifySignature(test) require.NoError(t, err, "RCSR verify signature error") } @@ -43,40 +45,40 @@ func TestIssuanceOfRPC(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - rcsr := &RCSR{ - PolicyObjectBase: PolicyObjectBase{ + rcsr := &common.RCSR{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "this is a test", }, Version: 44, TimeStamp: time.Now(), - PublicKeyAlgorithm: RSA, - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), - Signature: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + SignatureAlgorithm: common.SHA256, + PRCSignature: random.RandomBytesForTest(t, 32), + Signature: random.RandomBytesForTest(t, 32), } // add public key - pubKeyBytes, err := RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") rcsr.PublicKey = pubKeyBytes // generate signature for rcsr - err = RCSRCreateSignature(privKey, rcsr) + err = common.RCSRCreateSignature(privKey, rcsr) require.NoError(t, err, "RCSR Create Signature error") // ------------------------------------- // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = RCSRVerifySignature(rcsr) + err = common.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") - rpc, err := RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + pcaPrivKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") + rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") @@ -85,10 +87,10 @@ func TestIssuanceOfRPC(t *testing.T) { // phase 3: domain owner check rpc // ------------------------------------- - caCert, err := X509CertFromFile("./testdata/servercert.pem") + caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") require.NoError(t, err, "X509 Cert From File error") - err = RPCVerifyCASignature(caCert, rpc) + err = common.RPCVerifyCASignature(caCert, rpc) require.NoError(t, err, "RPC Verify CA Signature error") } @@ -97,39 +99,40 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - rcsr := &RCSR{ - PolicyObjectBase: PolicyObjectBase{ + rcsr := &common.RCSR{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "this is a test", }, Version: 44, TimeStamp: time.Now(), - PublicKeyAlgorithm: RSA, - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + SignatureAlgorithm: common.SHA256, + PRCSignature: random.RandomBytesForTest(t, 32), } // add public key - pubKeyBytes, err := RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") rcsr.PublicKey = pubKeyBytes // generate signature for rcsr - err = RCSRCreateSignature(privKey, rcsr) + err = common.RCSRCreateSignature(privKey, rcsr) require.NoError(t, err, "RCSR Create Signature error") // ------------------------------------- // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = RCSRVerifySignature(rcsr) + err = common.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") - rpc, err := RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + pcaPrivKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") + require.NoError(t, err) + rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") @@ -137,44 +140,29 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 3: domain owner generate SP // ------------------------------------- - psr := &PSR{ + psr := &common.PSR{ TimeStamp: time.Now(), DomainName: "test_SP", } - err = DomainOwnerSignPSR(privKey, psr) + err = common.DomainOwnerSignPSR(privKey, psr) require.NoError(t, err, "DomainOwnerSignPSR error") // ------------------------------------- // phase 4: pca check psr // ------------------------------------- - err = VerifyPSRUsingRPC(psr, rpc) + err = common.VerifyPSRUsingRPC(psr, rpc) require.NoError(t, err, "VerifyPSRUsingRPC error") - sp, err := CASignSP(psr, pcaPrivKey, "test ca", 22) + sp, err := common.CASignSP(psr, pcaPrivKey, "test ca", 22) require.NoError(t, err, "CASignSP error") // ------------------------------------- // phase 5: domain owner check sp // ------------------------------------- - caCert, err := X509CertFromFile("./testdata/servercert.pem") + caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") require.NoError(t, err, "X509CertFromFile error") - err = VerifyCASigInSP(caCert, sp) + err = common.VerifyCASigInSP(caCert, sp) require.NoError(t, err, "VerifyCASigInSP error") } - -// ------------------------------------------------------------- -// -// funcs for testing -// -// ------------------------------------------------------------- -func generateRandomBytes() []byte { - token := make([]byte, 40) - rand.Read(token) - return token -} - -func generateRandomBytesArray() [][]byte { - return [][]byte{generateRandomBytes()} -} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 594d3964..3e88af87 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -1,13 +1,13 @@ -package common +package common_test import ( "bytes" "strings" "testing" - "github.com/google/trillian" - trilliantypes "github.com/google/trillian/types" "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" ) // TestPolicyObjects checks that the structure types in the test cases can be converted to JSON and @@ -18,54 +18,54 @@ func TestPolicyObjects(t *testing.T) { data any }{ "rpcPtr": { - data: randomRPC(), + data: randomRPC(t), }, "rpcValue": { - data: *randomRPC(), + data: *randomRPC(t), }, "rcsr": { - data: randomRCSR(), + data: randomRCSR(t), }, "sp": { - data: randomSP(), + data: randomSP(t), }, "spt": { - data: *randomSPT(), + data: *randomSPT(t), }, "list": { data: []any{ - randomRPC(), - randomRCSR(), - randomSP(), - randomSPRT(), - randomPSR(), - randomTrillianProof(), - randomLogRootV1(), + randomRPC(t), + randomRCSR(t), + randomSP(t), + randomSPRT(t), + randomPSR(t), + randomTrillianProof(t), + randomLogRootV1(t), }, }, "list_embedded": { data: []any{ - randomRPC(), + randomRPC(t), []any{ - randomSP(), - randomSPT(), + randomSP(t), + randomSPT(t), }, []any{ - randomTrillianProof(), - randomTrillianProof(), + randomTrillianProof(t), + randomTrillianProof(t), }, }, }, "multiListPtr": { data: &[]any{ - randomRPC(), - *randomRPC(), + randomRPC(t), + *randomRPC(t), []any{ - randomSP(), - *randomSP(), + randomSP(t), + *randomSP(t), &[]any{ - randomSPT(), - *randomSPT(), + randomSPT(t), + *randomSPT(t), }, }, }, @@ -76,10 +76,10 @@ func TestPolicyObjects(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() // Serialize. - data, err := ToJSON(tc.data) + data, err := common.ToJSON(tc.data) require.NoError(t, err) // Deserialize. - deserialized, err := FromJSON(data, WithSkipCopyJSONIntoPolicyObjects) + deserialized, err := common.FromJSON(data, common.WithSkipCopyJSONIntoPolicyObjects) require.NoError(t, err) // Compare. require.Equal(t, tc.data, deserialized) @@ -97,40 +97,40 @@ func TestPolicyObjectBaseRaw(t *testing.T) { getRawElemsFcn func(obj any) [][]byte // Return the Raw components of this thing. }{ "rpc": { - obj: randomRPC(), + obj: randomRPC(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - rpc := obj.(*RPC) + rpc := obj.(*common.RPC) return [][]byte{rpc.RawJSON} }, }, "spPtr": { - obj: randomSP(), + obj: randomSP(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - sp := obj.(*SP) + sp := obj.(*common.SP) return [][]byte{sp.RawJSON} }, }, "spValue": { - obj: *randomSP(), + obj: *randomSP(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - sp := obj.(SP) + sp := obj.(common.SP) return [][]byte{sp.RawJSON} }, }, "list": { obj: []any{ - randomSP(), - randomRPC(), + randomSP(t), + randomRPC(t), }, rawElemsCount: 2, getRawElemsFcn: func(obj any) [][]byte { l := obj.([]any) return [][]byte{ - l[0].(*SP).RawJSON, - l[1].(*RPC).RawJSON, + l[0].(*common.SP).RawJSON, + l[1].(*common.RPC).RawJSON, } }, }, @@ -140,10 +140,10 @@ func TestPolicyObjectBaseRaw(t *testing.T) { t.Run(name, func(t *testing.T) { t.Parallel() // Serialize. - data, err := ToJSON(tc.obj) + data, err := common.ToJSON(tc.obj) require.NoError(t, err) // Deserialize. - obj, err := FromJSON(data) + obj, err := common.FromJSON(data) require.NoError(t, err) t.Logf("This object is of type %T", obj) raws := tc.getRawElemsFcn(obj) @@ -168,20 +168,3 @@ func TestPolicyObjectBaseRaw(t *testing.T) { }) } } - -func randomTrillianProof() *trillian.Proof { - return &trillian.Proof{ - LeafIndex: 1, - Hashes: generateRandomBytesArray(), - } -} - -func randomLogRootV1() *trilliantypes.LogRootV1 { - return &trilliantypes.LogRootV1{ - TreeSize: 1, - RootHash: generateRandomBytes(), - TimestampNanos: 11, - Revision: 3, - Metadata: generateRandomBytes(), - } -} diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index b8fbadc1..cef1807b 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -1,4 +1,4 @@ -package common +package common_test import ( "math/rand" @@ -7,9 +7,13 @@ import ( "testing" "time" - "github.com/netsec-ethz/fpki/pkg/tests" - "github.com/stretchr/testify/assert" + "github.com/google/trillian" + trilliantypes "github.com/google/trillian/types" "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/tests/random" ) var update = tests.UpdateGoldenFiles() @@ -17,222 +21,222 @@ var update = tests.UpdateGoldenFiles() func TestGenerateGoldenFiles(t *testing.T) { // Update the JSON files in tests/testdata if *update { - obj := []any{randomSP(), randomSP()} - err := ToJSONFile(obj, "../../tests/testdata/2-SPs.json") + obj := []any{randomSP(t), randomSP(t)} + err := common.ToJSONFile(obj, "../../tests/testdata/2-SPs.json") require.NoError(t, err) } } // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { - rcsr := &RCSR{ - PolicyObjectBase: PolicyObjectBase{ + rcsr := &common.RCSR{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "bandqhvdbdlwnd", }, Version: 6789, TimeStamp: time.Now(), - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), - Signature: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + PublicKey: random.RandomBytesForTest(t, 32), + SignatureAlgorithm: common.SHA256, + PRCSignature: random.RandomBytesForTest(t, 32), + Signature: random.RandomBytesForTest(t, 32), } - assert.True(t, rcsr.Equal(rcsr), "RCSR Equal() error") + require.True(t, rcsr.Equal(rcsr), "RCSR Equal() error") - spt1 := SPT{ + spt1 := common.SPT{ Version: 12313, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "hihihihihhi", }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, CertType: 0x11, AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 131678, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), } - spt2 := SPT{ + spt2 := common.SPT{ Version: 12368713, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "hohohoho", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, CertType: 0x21, AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 114378, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), } - assert.True(t, spt1.Equal(spt1) && spt2.Equal(spt2) && !spt1.Equal(spt2) && !spt2.Equal(spt1), "SPT Equal() error") + require.True(t, spt1.Equal(spt1) && spt2.Equal(spt2) && !spt1.Equal(spt2) && !spt2.Equal(spt1), "SPT Equal() error") - sprt := &SPRT{ - SPT: SPT{ + sprt := &common.SPRT{ + SPT: common.SPT{ Version: 12314, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "bad domain", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1729381, CertType: 0x21, AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 1729381, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), }, Reason: 1729381, } - assert.True(t, sprt.Equal(sprt), "SPRT Equal() error") + require.True(t, sprt.Equal(sprt), "SPRT Equal() error") - rpc := &RPC{ + rpc := &common.RPC{ SerialNumber: 1729381, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "bad domain", }, Version: 1729381, - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + PublicKey: random.RandomBytesForTest(t, 32), NotBefore: time.Now(), NotAfter: time.Now(), CAName: "bad domain", - SignatureAlgorithm: SHA256, + SignatureAlgorithm: common.SHA256, TimeStamp: time.Now(), - PRCSignature: generateRandomBytes(), - CASignature: generateRandomBytes(), - SPTs: []SPT{spt1, spt2}, + PRCSignature: random.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), + SPTs: []common.SPT{spt1, spt2}, } - assert.True(t, rpc.Equal(rpc), "RPC Equal() error") + require.True(t, rpc.Equal(rpc), "RPC Equal() error") } // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) func TestJsonReadWrite(t *testing.T) { - spt1 := &SPT{ + spt1 := &common.SPT{ Version: 12313, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "hihihihihhi", }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, CertType: 0x11, AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 131678, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), } - spt2 := &SPT{ + spt2 := &common.SPT{ Version: 12368713, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "hohohoho", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, CertType: 0x21, AddedTS: time.Now(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 114378, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), } - rpc := &RPC{ + rpc := &common.RPC{ SerialNumber: 1729381, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "bad domain", }, Version: 1729381, - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + PublicKey: random.RandomBytesForTest(t, 32), NotBefore: time.Now(), NotAfter: time.Now(), CAName: "bad domain", - SignatureAlgorithm: SHA256, + SignatureAlgorithm: common.SHA256, TimeStamp: time.Now(), - PRCSignature: generateRandomBytes(), - CASignature: generateRandomBytes(), - SPTs: []SPT{*spt1, *spt2}, + PRCSignature: random.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), + SPTs: []common.SPT{*spt1, *spt2}, } tempFile := path.Join(os.TempDir(), "rpctest.json") defer os.Remove(tempFile) - err := ToJSONFile(rpc, tempFile) + err := common.ToJSONFile(rpc, tempFile) require.NoError(t, err, "Json Struct To File error") - rpc1, err := JsonFileToRPC(tempFile) + rpc1, err := common.JsonFileToRPC(tempFile) require.NoError(t, err, "Json File To RPC error") - assert.True(t, rpc.Equal(rpc1), "Json error") + require.True(t, rpc.Equal(rpc1), "Json error") } -func randomRPC() *RPC { - return &RPC{ +func randomRPC(t tests.T) *common.RPC { + return &common.RPC{ SerialNumber: 1729381, - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "RPC CA", }, Version: 1729381, - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + PublicKey: random.RandomBytesForTest(t, 32), NotBefore: nowWithoutMonotonic(), NotAfter: nowWithoutMonotonic(), CAName: "RPC CA", - SignatureAlgorithm: SHA256, + SignatureAlgorithm: common.SHA256, TimeStamp: nowWithoutMonotonic(), - PRCSignature: generateRandomBytes(), - CASignature: generateRandomBytes(), - SPTs: []SPT{*randomSPT(), *randomSPT()}, + PRCSignature: random.RandomBytesForTest(t, 32), + CASignature: random.RandomBytesForTest(t, 32), + SPTs: []common.SPT{*randomSPT(t), *randomSPT(t)}, } } -func randomRCSR() *RCSR { - return &RCSR{ - PolicyObjectBase: PolicyObjectBase{ +func randomRCSR(t tests.T) *common.RCSR { + return &common.RCSR{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "subject", }, Version: 6789, TimeStamp: nowWithoutMonotonic(), - PublicKeyAlgorithm: RSA, - PublicKey: generateRandomBytes(), - SignatureAlgorithm: SHA256, - PRCSignature: generateRandomBytes(), - Signature: generateRandomBytes(), + PublicKeyAlgorithm: common.RSA, + PublicKey: random.RandomBytesForTest(t, 32), + SignatureAlgorithm: common.SHA256, + PRCSignature: random.RandomBytesForTest(t, 32), + Signature: random.RandomBytesForTest(t, 32), } } -func randomSP() *SP { - return &SP{ - Policies: Policy{ +func randomSP(t tests.T) *common.SP { + return &common.SP{ + Policies: common.Policy{ TrustedCA: []string{"ca1", "ca2"}, }, TimeStamp: nowWithoutMonotonic(), - PolicyObjectBase: PolicyObjectBase{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "domainname.com", }, CAName: "ca1", SerialNumber: rand.Int(), - CASignature: generateRandomBytes(), - RootCertSignature: generateRandomBytes(), - SPTs: []SPT{ - *randomSPT(), - *randomSPT(), - *randomSPT(), + CASignature: random.RandomBytesForTest(t, 32), + RootCertSignature: random.RandomBytesForTest(t, 32), + SPTs: []common.SPT{ + *randomSPT(t), + *randomSPT(t), + *randomSPT(t), }, } } -func randomSPT() *SPT { - return &SPT{ - PolicyObjectBase: PolicyObjectBase{ +func randomSPT(t tests.T) *common.SPT { + return &common.SPT{ + PolicyObjectBase: common.PolicyObjectBase{ Subject: "hohohoho", }, Version: 12368713, @@ -240,29 +244,46 @@ func randomSPT() *SPT { LogID: 1324123, CertType: 0x21, AddedTS: nowWithoutMonotonic(), - STH: generateRandomBytes(), - PoI: generateRandomBytes(), + STH: random.RandomBytesForTest(t, 32), + PoI: random.RandomBytesForTest(t, 32), STHSerialNumber: 114378, - Signature: generateRandomBytes(), + Signature: random.RandomBytesForTest(t, 32), } } -func randomSPRT() *SPRT { - return &SPRT{ - SPT: *randomSPT(), +func randomSPRT(t tests.T) *common.SPRT { + return &common.SPRT{ + SPT: *randomSPT(t), Reason: 1729381, } } -func randomPSR() *PSR { - return &PSR{ - Policies: Policy{ +func randomPSR(t tests.T) *common.PSR { + return &common.PSR{ + Policies: common.Policy{ TrustedCA: []string{"one CA", "another CA"}, AllowedSubdomains: []string{"sub1.com", "sub2.com"}, }, TimeStamp: nowWithoutMonotonic(), DomainName: "domain_name.com", - RootCertSignature: generateRandomBytes(), + RootCertSignature: random.RandomBytesForTest(t, 32), + } +} + +func randomTrillianProof(t tests.T) *trillian.Proof { + return &trillian.Proof{ + LeafIndex: 1, + Hashes: [][]byte{random.RandomBytesForTest(t, 32)}, + } +} + +func randomLogRootV1(t tests.T) *trilliantypes.LogRootV1 { + return &trilliantypes.LogRootV1{ + TreeSize: 1, + RootHash: random.RandomBytesForTest(t, 32), + TimestampNanos: 11, + Revision: 3, + Metadata: random.RandomBytesForTest(t, 40), } } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 3f8afa04..ac7c7bcf 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -8,11 +8,12 @@ import ( "github.com/google/certificate-transparency-go/x509/pkix" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) -func RandomBytesForTest(t require.TestingT, size int) []byte { +func RandomBytesForTest(t tests.T, size int) []byte { buff := make([]byte, size) n, err := rand.Read(buff) require.NoError(t, err) @@ -20,7 +21,7 @@ func RandomBytesForTest(t require.TestingT, size int) []byte { return buff } -func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { +func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { return &ctx509.Certificate{ DNSNames: []string{domain}, Subject: pkix.Name{ @@ -32,7 +33,7 @@ func RandomX509Cert(t require.TestingT, domain string) *ctx509.Certificate { } } -func BuildTestRandomPolicyHierarchy(t require.TestingT, domainName string) []common.PolicyObject { +func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyObject { // Create one RPC and one SP for that name. rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ @@ -58,7 +59,7 @@ func BuildTestRandomPolicyHierarchy(t require.TestingT, domainName string) []com // BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate // chains: the first chain is domainName->c1.com->c0.com , and the second chain is // domainName->c0.com . -func BuildTestRandomCertHierarchy(t require.TestingT, domainName string) ( +func BuildTestRandomCertHierarchy(t tests.T, domainName string) ( certs []*ctx509.Certificate, IDs, parentIDs []*common.SHA256Output, names [][]string) { // Create all certificates. diff --git a/pkg/util/io.go b/pkg/util/io.go index 232dabdc..362e270f 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -10,6 +10,7 @@ import ( "strings" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" ) From a803f006bcf4ec0445dda9ebf72eace535e37244 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 11:27:48 +0200 Subject: [PATCH 124/187] WIP refactoring crypto and tests. --- pkg/common/cert.go | 45 +++++++++------------------- pkg/common/cert_test.go | 23 -------------- pkg/common/crypto_test.go | 16 +++++----- pkg/mapserver/responder/responder.go | 3 +- pkg/pca/pca.go | 2 +- pkg/util/io.go | 24 +++++++++++++++ pkg/util/pem.go | 42 ++++++++++++++++++++++++++ pkg/util/pem_test.go | 22 ++++++++++++++ 8 files changed, 113 insertions(+), 64 deletions(-) delete mode 100644 pkg/common/cert_test.go create mode 100644 pkg/util/pem.go create mode 100644 pkg/util/pem_test.go diff --git a/pkg/common/cert.go b/pkg/common/cert.go index 20e06388..593faa44 100644 --- a/pkg/common/cert.go +++ b/pkg/common/cert.go @@ -6,23 +6,22 @@ import ( "encoding/pem" "errors" "fmt" - "io/ioutil" ) -// RsaPublicKeyToPemBytes: marshall public key to bytes -func RsaPublicKeyToPemBytes(pubkey *rsa.PublicKey) ([]byte, error) { - pubkey_bytes, err := x509.MarshalPKIXPublicKey(pubkey) - if err != nil { - return nil, fmt.Errorf("RsaPublicKeyToPemBytes | MarshalPKIXPublicKey | %w", err) - } - - return pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: pubkey_bytes, - }, - ), nil -} +// // RsaPublicKeyToPemBytes: marshall public key to bytes +// func RsaPublicKeyToPemBytes(pubkey *rsa.PublicKey) ([]byte, error) { +// pubkey_bytes, err := x509.MarshalPKIXPublicKey(pubkey) +// if err != nil { +// return nil, fmt.Errorf("RsaPublicKeyToPemBytes | MarshalPKIXPublicKey | %w", err) +// } + +// return pem.EncodeToMemory( +// &pem.Block{ +// Type: "RSA PUBLIC KEY", +// Bytes: pubkey_bytes, +// }, +// ), nil +// } // PemBytesToRsaPublicKey: unmarshal bytes to public key func PemBytesToRsaPublicKey(pubkey []byte) (*rsa.PublicKey, error) { @@ -42,19 +41,3 @@ func PemBytesToRsaPublicKey(pubkey []byte) (*rsa.PublicKey, error) { } return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") } - -// LoadRSAPrivateKeyFromFile loads a RSA private key from file -func LoadRSAPrivateKeyFromFile(keyPath string) (*rsa.PrivateKey, error) { - bytes, err := ioutil.ReadFile(keyPath) - if err != nil { - return nil, fmt.Errorf("LoadRSAPrivateKeyFromFile | read file | %w", err) - } - - block, _ := pem.Decode(bytes) - - keyPair, err := x509.ParsePKCS1PrivateKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("LoadRSAPrivateKeyFromFile | ParsePKCS1PrivateKey | %w", err) - } - return keyPair, nil -} diff --git a/pkg/common/cert_test.go b/pkg/common/cert_test.go deleted file mode 100644 index aaf94ca9..00000000 --- a/pkg/common/cert_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package common - -import ( - "crypto/rand" - "crypto/rsa" - "testing" - - "github.com/stretchr/testify/require" -) - -// TestEncAndDecOfPubKey public key -> bytes -> public key -func TestEncAndDecOfPubKey(t *testing.T) { - privateKeyPair, err := rsa.GenerateKey(rand.Reader, 2048) - require.NoError(t, err) - - bytes, err := RsaPublicKeyToPemBytes(&privateKeyPair.PublicKey) - require.NoError(t, err, "encoding error") - - pubKey, err := PemBytesToRsaPublicKey(bytes) - require.NoError(t, err, "decoding error") - - require.Equal(t, privateKeyPair.PublicKey, *pubKey, "parsing error") -} diff --git a/pkg/common/crypto_test.go b/pkg/common/crypto_test.go index d611a758..59d8d5e5 100644 --- a/pkg/common/crypto_test.go +++ b/pkg/common/crypto_test.go @@ -13,7 +13,7 @@ import ( // TestSignatureOfRCSR: Generate RCSR -> generate signature for RCSR -> verify signature func TestSignatureOfRCSR(t *testing.T) { - privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") test := &common.RCSR{ @@ -28,7 +28,7 @@ func TestSignatureOfRCSR(t *testing.T) { Signature: random.RandomBytesForTest(t, 32), } - pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "RSA key to bytes error") test.PublicKey = pubKeyBytes @@ -45,7 +45,7 @@ func TestIssuanceOfRPC(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &common.RCSR{ @@ -61,7 +61,7 @@ func TestIssuanceOfRPC(t *testing.T) { } // add public key - pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") rcsr.PublicKey = pubKeyBytes @@ -77,7 +77,7 @@ func TestIssuanceOfRPC(t *testing.T) { err = common.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") + pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") @@ -99,7 +99,7 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/clientkey.pem") + privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") rcsr := &common.RCSR{ @@ -114,7 +114,7 @@ func TestIssuanceOfSP(t *testing.T) { } // add public key - pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&privKey.PublicKey) + pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") rcsr.PublicKey = pubKeyBytes @@ -130,7 +130,7 @@ func TestIssuanceOfSP(t *testing.T) { err = common.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := common.LoadRSAPrivateKeyFromFile("./testdata/serverkey.pem") + pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") require.NoError(t, err) rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index 3285e4b8..a47cdd7c 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -9,6 +9,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/domain" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + "github.com/netsec-ethz/fpki/pkg/util" ) type MapResponder struct { @@ -116,7 +117,7 @@ func (r *MapResponder) signTreeHead(configFile string) error { } // Load private key from configuration. - privateKey, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) + privateKey, err := util.RSAKeyFromPEMFile(config.KeyPath) if err != nil { return fmt.Errorf("LoadRSAKeyPairFromFile | %w", err) } diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index fdbf49b9..a9be36e9 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -60,7 +60,7 @@ func NewPCA(configPath string) (*PCA, error) { return nil, fmt.Errorf("NewPCA | ReadConfigFromFile | %w", err) } // load rsa key pair - keyPair, err := common.LoadRSAPrivateKeyFromFile(config.KeyPath) + keyPair, err := util.RSAKeyFromPEMFile(config.KeyPath) if err != nil { return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) } diff --git a/pkg/util/io.go b/pkg/util/io.go index 362e270f..8f659e85 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -3,8 +3,11 @@ package util import ( "bytes" "compress/gzip" + "crypto/rsa" "encoding/base64" "encoding/csv" + "encoding/pem" + "fmt" "io/ioutil" "os" "strings" @@ -79,6 +82,27 @@ func CertificateFromPEMFile(filename string) (*ctx509.Certificate, error) { return certs[0], nil } +// RSAKeyFromFile loads an RSA private key from file in PEM format. +func RSAKeyFromPEMFile(keyPath string) (*rsa.PrivateKey, error) { + bytes, err := ioutil.ReadFile(keyPath) + if err != nil { + return nil, err + } + + block, _ := pem.Decode(bytes) + if block.Type != "RSA PRIVATE KEY" { + // wrong type. + return nil, fmt.Errorf("wrong type. Got '%s' expected '%s'", + block.Type, "RSA PRIVATE KEY") + } + + keyPair, err := ctx509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, err + } + return keyPair, nil +} + // LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of the leaf certificate // payload, its ID, its parent ID, and its names, for each certificate and its ancestry chain. // The returned names contains nil unless the corresponding certificate is a leaf certificate. diff --git a/pkg/util/pem.go b/pkg/util/pem.go new file mode 100644 index 00000000..5e025c37 --- /dev/null +++ b/pkg/util/pem.go @@ -0,0 +1,42 @@ +package util + +import ( + "crypto/rsa" + "encoding/pem" + "errors" + "fmt" + + ctx509 "github.com/google/certificate-transparency-go/x509" +) + +func RSAPublicToPEM(pubkey *rsa.PublicKey) ([]byte, error) { + pubkey_bytes, err := ctx509.MarshalPKIXPublicKey(pubkey) + if err != nil { + return nil, err + } + + return pem.EncodeToMemory( + &pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: pubkey_bytes, + }, + ), nil +} + +func PEMToRSAPublic(pubkey []byte) (*rsa.PublicKey, error) { + block, _ := pem.Decode(pubkey) + if block == nil { + return nil, fmt.Errorf("PemBytesToRsaPublicKey | Decode | block empty") + } + + pub, err := ctx509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("PemBytesToRsaPublicKey | ParsePKIXPublicKey | %w", err) + } + + pubKeyResult, ok := pub.(*rsa.PublicKey) + if ok { + return pubKeyResult, nil + } + return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") +} diff --git a/pkg/util/pem_test.go b/pkg/util/pem_test.go new file mode 100644 index 00000000..9a21aa1a --- /dev/null +++ b/pkg/util/pem_test.go @@ -0,0 +1,22 @@ +package util + +import ( + "crypto/rsa" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRSAPublicToPEMAndBack(t *testing.T) { + privateKeyPair, err := rsa.GenerateKey(rand.New(rand.NewSource(0)), 2048) + require.NoError(t, err) + + bytes, err := RSAPublicToPEM(&privateKeyPair.PublicKey) + require.NoError(t, err) + + pubKey, err := PEMToRSAPublic(bytes) + require.NoError(t, err) + + require.Equal(t, privateKeyPair.PublicKey, *pubKey) +} From 8c36b85ba3c544241dd826aed3018e0520348a23 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 11:40:19 +0200 Subject: [PATCH 125/187] Created pkg/common/crypto. --- pkg/common/cert.go | 43 --- pkg/common/crypto.go | 251 ----------------- pkg/common/crypto/crypto.go | 259 ++++++++++++++++++ pkg/common/{ => crypto}/crypto_test.go | 34 +-- .../{ => crypto}/testdata/clientcert.pem | 0 .../{ => crypto}/testdata/clientkey.pem | 0 .../{ => crypto}/testdata/servercert.pem | 0 .../{ => crypto}/testdata/serverkey.pem | 0 pkg/mapserver/responder/responder.go | 3 +- pkg/pca/sign_and_log.go | 9 +- 10 files changed, 284 insertions(+), 315 deletions(-) delete mode 100644 pkg/common/cert.go create mode 100644 pkg/common/crypto/crypto.go rename pkg/common/{ => crypto}/crypto_test.go (87%) rename pkg/common/{ => crypto}/testdata/clientcert.pem (100%) rename pkg/common/{ => crypto}/testdata/clientkey.pem (100%) rename pkg/common/{ => crypto}/testdata/servercert.pem (100%) rename pkg/common/{ => crypto}/testdata/serverkey.pem (100%) diff --git a/pkg/common/cert.go b/pkg/common/cert.go deleted file mode 100644 index 593faa44..00000000 --- a/pkg/common/cert.go +++ /dev/null @@ -1,43 +0,0 @@ -package common - -import ( - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "errors" - "fmt" -) - -// // RsaPublicKeyToPemBytes: marshall public key to bytes -// func RsaPublicKeyToPemBytes(pubkey *rsa.PublicKey) ([]byte, error) { -// pubkey_bytes, err := x509.MarshalPKIXPublicKey(pubkey) -// if err != nil { -// return nil, fmt.Errorf("RsaPublicKeyToPemBytes | MarshalPKIXPublicKey | %w", err) -// } - -// return pem.EncodeToMemory( -// &pem.Block{ -// Type: "RSA PUBLIC KEY", -// Bytes: pubkey_bytes, -// }, -// ), nil -// } - -// PemBytesToRsaPublicKey: unmarshal bytes to public key -func PemBytesToRsaPublicKey(pubkey []byte) (*rsa.PublicKey, error) { - block, _ := pem.Decode(pubkey) - if block == nil { - return nil, fmt.Errorf("PemBytesToRsaPublicKey | Decode | block empty") - } - - pub, err := x509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("PemBytesToRsaPublicKey | ParsePKIXPublicKey | %w", err) - } - - pubKeyResult, ok := pub.(*rsa.PublicKey) - if ok { - return pubKeyResult, nil - } - return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") -} diff --git a/pkg/common/crypto.go b/pkg/common/crypto.go index d71d2e2e..4dd0126f 100644 --- a/pkg/common/crypto.go +++ b/pkg/common/crypto.go @@ -1,16 +1,5 @@ package common -import ( - "crypto" - "crypto/rand" - "crypto/rsa" - "crypto/sha256" - "fmt" - "time" - - ctx509 "github.com/google/certificate-transparency-go/x509" -) - // SignatureAlgorithm: Enum of supported signature algorithm; Currently only SHA256 // currently only SHA256 and RSA is supported type SignatureAlgorithm int @@ -25,243 +14,3 @@ type PublicKeyAlgorithm int const ( RSA PublicKeyAlgorithm = iota ) - -func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { - hashOutput := sha256.Sum256(b) - signature, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, hashOutput[:]) - if err != nil { - return nil, fmt.Errorf("SignBytes | SignPKCS1v15 | %w", err) - } - return signature, nil -} - -// ---------------------------------------------------------------------------------- -// functions on RCSR -// ---------------------------------------------------------------------------------- - -// RCSRCreateSignature: Generate a signature, and fill the signature in the RCSR -func RCSRCreateSignature(domainOwnerPrivKey *rsa.PrivateKey, rcsr *RCSR) error { - // clear signature; normally should be empty - rcsr.Signature = []byte{} - - signature, err := signStructRSASHA256(rcsr, domainOwnerPrivKey) - if err != nil { - return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) - } - - rcsr.Signature = signature - return nil -} - -// RCSRGenerateRPCSignature: Generate RPC signature and fill it in the RCSR; -// -// (in paper, if new rcsr has the signature from previous rpc, the cool-off can be bypassed) -func RCSRGenerateRPCSignature(rcsr *RCSR, prevPrivKeyOfPRC *rsa.PrivateKey) error { - // clear the co-responding fields - rcsr.Signature = []byte{} - rcsr.PRCSignature = []byte{} - - rpcSignature, err := signStructRSASHA256(rcsr, prevPrivKeyOfPRC) - if err != nil { - return fmt.Errorf("RCSRGenerateRPCSignature | SignStructRSASHA256 | %w", err) - } - - rcsr.PRCSignature = rpcSignature - return nil -} - -// RCSRVerifySignature: verify the signature using the public key in hash -func RCSRVerifySignature(rcsr *RCSR) error { - // Serialize without signature: - sig := rcsr.Signature - rcsr.Signature = nil - serializedStruct, err := ToJSON(rcsr) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - rcsr.Signature = sig - - // Get the pub key: - pubKey, err := PemBytesToRsaPublicKey(rcsr.PublicKey) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | PemBytesToRsaPublicKey | %w", err) - } - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.Signature) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | VerifyPKCS1v15 | %w", err) - } - return nil -} - -// RCSRVerifyRPCSignature: verify the RCSR using RPC; verify the RPC signature -func RCSRVerifyRPCSignature(rcsr *RCSR, rpc *RPC) error { - // Serialize without signature: - sig := rcsr.Signature - rcsr.Signature = nil - serializedStruct, err := ToJSON(rcsr) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - rcsr.Signature = sig - - pubKey, err := PemBytesToRsaPublicKey(rpc.PublicKey) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) - } - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.PRCSignature) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) - } - return nil -} - -// RCSRGenerateRPC: called by PCA. Sign the RCSR and generate RPC; SPT field is (should be) empty -func RCSRGenerateRPC(rcsr *RCSR, notBefore time.Time, serialNumber int, caPrivKey *rsa.PrivateKey, caName string) (*RPC, error) { - rpc := &RPC{ - PolicyObjectBase: PolicyObjectBase{ - Subject: rcsr.Subject, - }, - Version: rcsr.Version, - PublicKeyAlgorithm: rcsr.PublicKeyAlgorithm, - PublicKey: rcsr.PublicKey, - CAName: caName, - SignatureAlgorithm: SHA256, - TimeStamp: time.Now(), - PRCSignature: rcsr.PRCSignature, - NotBefore: notBefore, - NotAfter: time.Now().AddDate(0, 0, 90), - SerialNumber: serialNumber, - CASignature: []byte{}, - SPTs: []SPT{}, - } - - signature, err := signStructRSASHA256(rpc, caPrivKey) - if err != nil { - return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) - } - - rpc.CASignature = signature - return rpc, nil -} - -// ---------------------------------------------------------------------------------- -// functions on RPC -// ---------------------------------------------------------------------------------- - -// RPCVerifyCASignature: used by domain owner, check whether CA signature is correct -func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *RPC) error { - pubKey := caCert.PublicKey.(*rsa.PublicKey) - - // Serialize without CA signature or SPTs: - caSig, SPTs := rpc.CASignature, rpc.SPTs - rpc.CASignature, rpc.SPTs = nil, nil - bytes, err := ToJSON(rpc) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - rpc.CASignature, rpc.SPTs = caSig, SPTs - - hashOutput := sha256.Sum256(bytes) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.CASignature) - if err != nil { - return fmt.Errorf("RPCVerifyCASignature | VerifyPKCS1v15 | %w", err) - } - return nil -} - -// ---------------------------------------------------------------------------------- -// functions on SP -// ---------------------------------------------------------------------------------- - -// DomainOwnerSignSP: Used by domain owner to sign the PC -func DomainOwnerSignPSR(domainOwnerPrivKey *rsa.PrivateKey, psr *PSR) error { - signature, err := signStructRSASHA256(psr, domainOwnerPrivKey) - if err != nil { - return fmt.Errorf("DomainOwnerSignPC | SignStructRSASHA256 | %w", err) - } - - psr.RootCertSignature = signature - return nil -} - -func VerifyPSRUsingRPC(psr *PSR, rpc *RPC) error { - // Serialize without signature: - sig := psr.RootCertSignature - psr.RootCertSignature = nil - serializedStruct, err := ToJSON(psr) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - psr.RootCertSignature = sig - - pubKey, err := PemBytesToRsaPublicKey(rpc.PublicKey) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) - } - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], psr.RootCertSignature) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) - } - - return nil -} - -// CAVerifySPAndSign: verify the signature and sign the signature -func CASignSP(psr *PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) (*SP, error) { - sp := &SP{ - PolicyObjectBase: PolicyObjectBase{ - Subject: psr.DomainName, - }, - Policies: psr.Policies, - RootCertSignature: psr.RootCertSignature, - TimeStamp: time.Now(), - CAName: caName, - SerialNumber: serialNum, - } - - caSignature, err := signStructRSASHA256(sp, caPrivKey) - if err != nil { - return &SP{}, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) - } - - sp.CASignature = caSignature - return sp, nil -} - -// VerifyCASigInSP: verify CA's signature -func VerifyCASigInSP(caCert *ctx509.Certificate, sp *SP) error { - if len(sp.CASignature) == 0 { - return fmt.Errorf("VerifyCASigInPC | no valid CA signature") - } - - // Serialize without CA signature or SPTs: - caSig, SPTs := sp.CASignature, sp.SPTs - sp.CASignature, sp.SPTs = nil, nil - serializedStruct, err := ToJSON(sp) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - sp.CASignature, sp.SPTs = caSig, SPTs - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(caCert.PublicKey.(*rsa.PublicKey), crypto.SHA256, hashOutput[:], sp.CASignature) - if err != nil { - return fmt.Errorf("VerifyCASigInPC | VerifyPKCS1v15 | %w", err) - } - return nil -} - -// signStructRSASHA256: generate a signature using SHA256 and RSA -func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { - b, err := ToJSON(s) - if err != nil { - return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) - } - return SignBytes(b, key) -} diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go new file mode 100644 index 00000000..70e64b87 --- /dev/null +++ b/pkg/common/crypto/crypto.go @@ -0,0 +1,259 @@ +package crypto + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" + "crypto/sha256" + "fmt" + "time" + + ctx509 "github.com/google/certificate-transparency-go/x509" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/util" +) + +func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { + hashOutput := sha256.Sum256(b) + signature, err := rsa.SignPKCS1v15(rand.Reader, key, crypto.SHA256, hashOutput[:]) + if err != nil { + return nil, fmt.Errorf("SignBytes | SignPKCS1v15 | %w", err) + } + return signature, nil +} + +// ---------------------------------------------------------------------------------- +// functions on RCSR +// ---------------------------------------------------------------------------------- + +// RCSRCreateSignature: Generate a signature, and fill the signature in the RCSR +func RCSRCreateSignature(domainOwnerPrivKey *rsa.PrivateKey, rcsr *common.RCSR) error { + // clear signature; normally should be empty + rcsr.Signature = []byte{} + + signature, err := signStructRSASHA256(rcsr, domainOwnerPrivKey) + if err != nil { + return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) + } + + rcsr.Signature = signature + return nil +} + +// RCSRGenerateRPCSignature: Generate RPC signature and fill it in the RCSR; +// +// (in paper, if new rcsr has the signature from previous rpc, the cool-off can be bypassed) +func RCSRGenerateRPCSignature(rcsr *common.RCSR, prevPrivKeyOfPRC *rsa.PrivateKey) error { + // clear the co-responding fields + rcsr.Signature = []byte{} + rcsr.PRCSignature = []byte{} + + rpcSignature, err := signStructRSASHA256(rcsr, prevPrivKeyOfPRC) + if err != nil { + return fmt.Errorf("RCSRGenerateRPCSignature | SignStructRSASHA256 | %w", err) + } + + rcsr.PRCSignature = rpcSignature + return nil +} + +// RCSRVerifySignature: verify the signature using the public key in hash +func RCSRVerifySignature(rcsr *common.RCSR) error { + // Serialize without signature: + sig := rcsr.Signature + rcsr.Signature = nil + serializedStruct, err := common.ToJSON(rcsr) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + } + rcsr.Signature = sig + + // Get the pub key: + pubKey, err := util.PEMToRSAPublic(rcsr.PublicKey) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | PemBytesToRsaPublicKey | %w", err) + } + + hashOutput := sha256.Sum256(serializedStruct) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.Signature) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | VerifyPKCS1v15 | %w", err) + } + return nil +} + +// RCSRVerifyRPCSignature: verify the RCSR using RPC; verify the RPC signature +func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.RPC) error { + // Serialize without signature: + sig := rcsr.Signature + rcsr.Signature = nil + serializedStruct, err := common.ToJSON(rcsr) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + } + rcsr.Signature = sig + + pubKey, err := util.PEMToRSAPublic(rpc.PublicKey) + if err != nil { + return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) + } + + hashOutput := sha256.Sum256(serializedStruct) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.PRCSignature) + if err != nil { + return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) + } + return nil +} + +// RCSRGenerateRPC: called by PCA. Sign the RCSR and generate RPC; SPT field is (should be) empty +func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, + caPrivKey *rsa.PrivateKey, caName string) (*common.RPC, error) { + + rpc := &common.RPC{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: rcsr.Subject, + }, + Version: rcsr.Version, + PublicKeyAlgorithm: rcsr.PublicKeyAlgorithm, + PublicKey: rcsr.PublicKey, + CAName: caName, + SignatureAlgorithm: common.SHA256, + TimeStamp: time.Now(), + PRCSignature: rcsr.PRCSignature, + NotBefore: notBefore, + NotAfter: time.Now().AddDate(0, 0, 90), + SerialNumber: serialNumber, + CASignature: []byte{}, + SPTs: []common.SPT{}, + } + + signature, err := signStructRSASHA256(rpc, caPrivKey) + if err != nil { + return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) + } + + rpc.CASignature = signature + return rpc, nil +} + +// ---------------------------------------------------------------------------------- +// functions on RPC +// ---------------------------------------------------------------------------------- + +// RPCVerifyCASignature: used by domain owner, check whether CA signature is correct +func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *common.RPC) error { + pubKey := caCert.PublicKey.(*rsa.PublicKey) + + // Serialize without CA signature or SPTs: + caSig, SPTs := rpc.CASignature, rpc.SPTs + rpc.CASignature, rpc.SPTs = nil, nil + bytes, err := common.ToJSON(rpc) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + } + rpc.CASignature, rpc.SPTs = caSig, SPTs + + hashOutput := sha256.Sum256(bytes) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.CASignature) + if err != nil { + return fmt.Errorf("RPCVerifyCASignature | VerifyPKCS1v15 | %w", err) + } + return nil +} + +// ---------------------------------------------------------------------------------- +// functions on SP +// ---------------------------------------------------------------------------------- + +// DomainOwnerSignSP: Used by domain owner to sign the PC +func DomainOwnerSignPSR(domainOwnerPrivKey *rsa.PrivateKey, psr *common.PSR) error { + signature, err := signStructRSASHA256(psr, domainOwnerPrivKey) + if err != nil { + return fmt.Errorf("DomainOwnerSignPC | SignStructRSASHA256 | %w", err) + } + + psr.RootCertSignature = signature + return nil +} + +func VerifyPSRUsingRPC(psr *common.PSR, rpc *common.RPC) error { + // Serialize without signature: + sig := psr.RootCertSignature + psr.RootCertSignature = nil + serializedStruct, err := common.ToJSON(psr) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + } + psr.RootCertSignature = sig + + pubKey, err := util.PEMToRSAPublic(rpc.PublicKey) + if err != nil { + return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) + } + + hashOutput := sha256.Sum256(serializedStruct) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], psr.RootCertSignature) + if err != nil { + return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) + } + + return nil +} + +// CAVerifySPAndSign: verify the signature and sign the signature +func CASignSP(psr *common.PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) ( + *common.SP, error) { + + sp := &common.SP{ + PolicyObjectBase: common.PolicyObjectBase{ + Subject: psr.DomainName, + }, + Policies: psr.Policies, + RootCertSignature: psr.RootCertSignature, + TimeStamp: time.Now(), + CAName: caName, + SerialNumber: serialNum, + } + + caSignature, err := signStructRSASHA256(sp, caPrivKey) + if err != nil { + return &common.SP{}, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) + } + + sp.CASignature = caSignature + return sp, nil +} + +// VerifyCASigInSP: verify CA's signature +func VerifyCASigInSP(caCert *ctx509.Certificate, sp *common.SP) error { + if len(sp.CASignature) == 0 { + return fmt.Errorf("VerifyCASigInPC | no valid CA signature") + } + + // Serialize without CA signature or SPTs: + caSig, SPTs := sp.CASignature, sp.SPTs + sp.CASignature, sp.SPTs = nil, nil + serializedStruct, err := common.ToJSON(sp) + if err != nil { + return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + } + sp.CASignature, sp.SPTs = caSig, SPTs + + hashOutput := sha256.Sum256(serializedStruct) + err = rsa.VerifyPKCS1v15(caCert.PublicKey.(*rsa.PublicKey), crypto.SHA256, hashOutput[:], sp.CASignature) + if err != nil { + return fmt.Errorf("VerifyCASigInPC | VerifyPKCS1v15 | %w", err) + } + return nil +} + +// signStructRSASHA256: generate a signature using SHA256 and RSA +func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { + b, err := common.ToJSON(s) + if err != nil { + return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) + } + return SignBytes(b, key) +} diff --git a/pkg/common/crypto_test.go b/pkg/common/crypto/crypto_test.go similarity index 87% rename from pkg/common/crypto_test.go rename to pkg/common/crypto/crypto_test.go index 59d8d5e5..93cbb39b 100644 --- a/pkg/common/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -1,14 +1,16 @@ -package common_test +package crypto_test import ( "testing" "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" ) // TestSignatureOfRCSR: Generate RCSR -> generate signature for RCSR -> verify signature @@ -33,10 +35,10 @@ func TestSignatureOfRCSR(t *testing.T) { test.PublicKey = pubKeyBytes - err = common.RCSRCreateSignature(privKey, test) + err = crypto.RCSRCreateSignature(privKey, test) require.NoError(t, err, "RCSR sign signature error") - err = common.RCSRVerifySignature(test) + err = crypto.RCSRVerifySignature(test) require.NoError(t, err, "RCSR verify signature error") } @@ -67,18 +69,18 @@ func TestIssuanceOfRPC(t *testing.T) { rcsr.PublicKey = pubKeyBytes // generate signature for rcsr - err = common.RCSRCreateSignature(privKey, rcsr) + err = crypto.RCSRCreateSignature(privKey, rcsr) require.NoError(t, err, "RCSR Create Signature error") // ------------------------------------- // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = common.RCSRVerifySignature(rcsr) + err = crypto.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") - rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") @@ -90,7 +92,7 @@ func TestIssuanceOfRPC(t *testing.T) { caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") require.NoError(t, err, "X509 Cert From File error") - err = common.RPCVerifyCASignature(caCert, rpc) + err = crypto.RPCVerifyCASignature(caCert, rpc) require.NoError(t, err, "RPC Verify CA Signature error") } @@ -120,19 +122,19 @@ func TestIssuanceOfSP(t *testing.T) { rcsr.PublicKey = pubKeyBytes // generate signature for rcsr - err = common.RCSRCreateSignature(privKey, rcsr) + err = crypto.RCSRCreateSignature(privKey, rcsr) require.NoError(t, err, "RCSR Create Signature error") // ------------------------------------- // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = common.RCSRVerifySignature(rcsr) + err = crypto.RCSRVerifySignature(rcsr) require.NoError(t, err, "RCSR Verify Signature error") pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") require.NoError(t, err) - rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") @@ -145,16 +147,16 @@ func TestIssuanceOfSP(t *testing.T) { DomainName: "test_SP", } - err = common.DomainOwnerSignPSR(privKey, psr) + err = crypto.DomainOwnerSignPSR(privKey, psr) require.NoError(t, err, "DomainOwnerSignPSR error") // ------------------------------------- // phase 4: pca check psr // ------------------------------------- - err = common.VerifyPSRUsingRPC(psr, rpc) + err = crypto.VerifyPSRUsingRPC(psr, rpc) require.NoError(t, err, "VerifyPSRUsingRPC error") - sp, err := common.CASignSP(psr, pcaPrivKey, "test ca", 22) + sp, err := crypto.CASignSP(psr, pcaPrivKey, "test ca", 22) require.NoError(t, err, "CASignSP error") // ------------------------------------- @@ -163,6 +165,6 @@ func TestIssuanceOfSP(t *testing.T) { caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") require.NoError(t, err, "X509CertFromFile error") - err = common.VerifyCASigInSP(caCert, sp) + err = crypto.VerifyCASigInSP(caCert, sp) require.NoError(t, err, "VerifyCASigInSP error") } diff --git a/pkg/common/testdata/clientcert.pem b/pkg/common/crypto/testdata/clientcert.pem similarity index 100% rename from pkg/common/testdata/clientcert.pem rename to pkg/common/crypto/testdata/clientcert.pem diff --git a/pkg/common/testdata/clientkey.pem b/pkg/common/crypto/testdata/clientkey.pem similarity index 100% rename from pkg/common/testdata/clientkey.pem rename to pkg/common/crypto/testdata/clientkey.pem diff --git a/pkg/common/testdata/servercert.pem b/pkg/common/crypto/testdata/servercert.pem similarity index 100% rename from pkg/common/testdata/servercert.pem rename to pkg/common/crypto/testdata/servercert.pem diff --git a/pkg/common/testdata/serverkey.pem b/pkg/common/crypto/testdata/serverkey.pem similarity index 100% rename from pkg/common/testdata/serverkey.pem rename to pkg/common/crypto/testdata/serverkey.pem diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index a47cdd7c..c3041549 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" @@ -123,7 +124,7 @@ func (r *MapResponder) signTreeHead(configFile string) error { } // Sign the tree head. - signature, err := common.SignBytes(r.smt.Root, privateKey) + signature, err := crypto.SignBytes(r.smt.Root, privateKey) if err != nil { return fmt.Errorf("SignStructRSASHA256 | %w", err) } diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index 31134c37..a0b460f1 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -7,12 +7,13 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" ) // SignAndLogRCSR: sign the rcsr and generate a rpc -> store the rpc to the "fileExchange" folder; policy log will fetch rpc from the folder func (pca *PCA) SignAndLogRCSR(rcsr *common.RCSR) error { // verify the signature in the rcsr; check if the domain's pub key is correct - err := common.RCSRVerifySignature(rcsr) + err := crypto.RCSRVerifySignature(rcsr) if err != nil { return fmt.Errorf("SignAndLogRCSR | RCSRVerifySignature | %w", err) } @@ -20,7 +21,7 @@ func (pca *PCA) SignAndLogRCSR(rcsr *common.RCSR) error { pca.increaseSerialNumber() // generate pre-RPC (without SPT) - rpc, err := common.RCSRGenerateRPC(rcsr, time.Now(), pca.serialNumber, pca.rsaKeyPair, pca.caName) + rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), pca.serialNumber, pca.rsaKeyPair, pca.caName) if err != nil { return fmt.Errorf("SignAndLogRCSR | RCSRGenerateRPC | %w", err) } @@ -51,7 +52,7 @@ func (pca *PCA) SignAndLogSP(psr *common.PSR) error { pca.increaseSerialNumber() - sp, err := common.CASignSP(psr, pca.rsaKeyPair, pca.caName, pca.serialNumber) + sp, err := crypto.CASignSP(psr, pca.rsaKeyPair, pca.caName, pca.serialNumber) if err != nil { return fmt.Errorf("SignAndLogPSR | CASignSP | %w", err) } @@ -77,7 +78,7 @@ func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { return fmt.Errorf("findRPCAndVerifyPSR | validRPCsByDomains | no valid rpc at this moment") } - err := common.VerifyPSRUsingRPC(psr, rpc) + err := crypto.VerifyPSRUsingRPC(psr, rpc) if err != nil { return fmt.Errorf("findRPCAndVerifyPSR | VerifyPSRUsingRPC | %w", err) } From aeaab17ce1b5a2f934f8f0c5b54c06593af38d36 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 23 May 2023 14:21:36 +0200 Subject: [PATCH 126/187] Fix bug coalesce not always sorted IDs. --- pkg/mapserver/updater/dbutil_test.go | 42 ---------------------------- tools/create_schema.sh | 8 +++--- 2 files changed, 4 insertions(+), 46 deletions(-) delete mode 100644 pkg/mapserver/updater/dbutil_test.go diff --git a/pkg/mapserver/updater/dbutil_test.go b/pkg/mapserver/updater/dbutil_test.go deleted file mode 100644 index dac21e4e..00000000 --- a/pkg/mapserver/updater/dbutil_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package updater - -import ( - "testing" -) - -// TestParseDomainBytes: test ParseDomainBytes() -func TestParseDomainBytes(t *testing.T) { - // domainEntry := &common.DomainEntry{ - // DomainName: "test domain", - // Entries: []common.Entry{ - // { - // CAName: "ca1", - // DomainCerts: [][]byte{{1, 2, 3}}, - // }, - // { - // CAName: "ca2", - // DomainCerts: [][]byte{{2, 3, 4}}, - // }, - // }, - // } - - // serializedBytes, err := common.SerializeDomainEntry(domainEntry) - // require.NoError(t, err) - - // keyValuePairs := []*db.KeyValuePair{ - // { - // Key: [32]byte{1}, - // Value: serializedBytes, - // }, - // } - - // result, err := parseDomainBytes(keyValuePairs) - // require.NoError(t, err) - - // domainEntry_, ok := result[[32]byte{1}] - // assert.True(t, ok) - - // assert.Equal(t, domainEntry.DomainName, domainEntry_.DomainName) - // assert.Equal(t, domainEntry.Entries[0].CAName, domainEntry_.Entries[0].CAName) - // assert.Equal(t, domainEntry.Entries[1].CAName, domainEntry_.Entries[1].CAName) -} diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 3c42e85b..5a9afc1d 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -315,7 +315,7 @@ BEGIN SELECT domain_id, cert_ids, UNHEX(SHA2(cert_ids, 256)) FROM ( -- Subquery to compute the SHA256 in place. -- Select the concatenation of all cert IDs (sorted) of a domain. - SELECT domain_id, GROUP_CONCAT(cert_id SEPARATOR '') AS cert_ids FROM( + SELECT domain_id, GROUP_CONCAT(cert_id ORDER BY cert_id SEPARATOR '') AS cert_ids FROM( -- The CTE lists all certs that are reachable by the domain_id WITH RECURSIVE cte AS ( -- Base case: specify which leaf certs we choose: those that @@ -343,7 +343,7 @@ BEGIN FROM certs JOIN cte ON certs.cert_id = cte.parent_id ) - SELECT DISTINCT domain_id, cert_id FROM cte ORDER BY cert_id + SELECT DISTINCT domain_id, cert_id FROM cte ) AS collate_cert_ids_query GROUP BY domain_id ) AS hasher_query; @@ -372,7 +372,7 @@ BEGIN SELECT domain_id, policy_ids, UNHEX(SHA2(policy_ids, 256)) FROM ( -- Subquery to compute the SHA256 in place. -- Select the concatenation of all policy IDs (sorted) of a domain. - SELECT domain_id, GROUP_CONCAT(policy_id SEPARATOR '') AS policy_ids FROM( + SELECT domain_id, GROUP_CONCAT(policy_id ORDER BY policy_id SEPARATOR '') AS policy_ids FROM( -- The CTE lists all policies that are reachable by the domain_id WITH RECURSIVE cte AS ( -- Base case: specify which leaf policies we choose: those that @@ -400,7 +400,7 @@ BEGIN FROM policies JOIN cte ON policies.policy_id = cte.parent_id ) - SELECT DISTINCT domain_id, policy_id FROM cte ORDER BY policy_id + SELECT DISTINCT domain_id, policy_id FROM cte ) AS collate_policy_ids_query GROUP BY domain_id ) AS hasher_query; From 8b69d04991b61c1f7d30d8bf4aac75c15bbec0e5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 10:03:07 +0200 Subject: [PATCH 127/187] WIP refactoring the updater type. Also changing tests. --- pkg/db/mysql/export_test.go | 24 ++ pkg/db/mysql/mysql.go | 32 ++- pkg/db/mysql/mysql_test.go | 57 ++++ pkg/mapserver/updater/certs_updater.go | 64 ----- pkg/mapserver/updater/dbutil.go | 179 ------------- pkg/mapserver/updater/rpc_updater.go | 51 ---- pkg/mapserver/updater/updater.go | 80 +++--- pkg/mapserver/updater/updater_test.go | 247 +++++++----------- pkg/mapserver/updater/updater_test_adapter.go | 14 +- 9 files changed, 253 insertions(+), 495 deletions(-) create mode 100644 pkg/db/mysql/export_test.go delete mode 100644 pkg/mapserver/updater/dbutil.go diff --git a/pkg/db/mysql/export_test.go b/pkg/db/mysql/export_test.go new file mode 100644 index 00000000..8a4da6f2 --- /dev/null +++ b/pkg/db/mysql/export_test.go @@ -0,0 +1,24 @@ +package mysql + +import ( + "context" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" +) + +type MysqlDBForTests struct { + *mysqlDB +} + +func NewMysqlDBForTests(db db.Conn) *MysqlDBForTests { + return &MysqlDBForTests{ + mysqlDB: db.(*mysqlDB), + } +} + +func (c *MysqlDBForTests) DebugCheckCertsExist(ctx context.Context, ids []*common.SHA256Output, + present []bool) error { + + return c.checkCertsExist(ctx, ids, present) +} diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 23048e9d..3467d817 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -168,6 +168,32 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu // If empty, return empty. return nil, nil } + presence := make([]bool, len(ids)) + + // The query won't accept more than batchSize elements. Make batches. + for i := 0; i < len(ids)-batchSize; i += batchSize { + to := i + batchSize + if err := c.checkCertsExist(ctx, ids[i:to], presence[i:to]); err != nil { + return nil, err + } + } + // Do the last batch, if non empty. + from := len(ids) / batchSize * batchSize + to := from + len(ids)%batchSize + var err error + if to > from { + err = c.checkCertsExist(ctx, ids[from:to], presence[from:to]) + } + return presence, err +} + +// checkCertsExist should not be called with larger than ~1000 elements, the query being used +// may fail with a message like: +// Error 1436 (HY000): Thread stack overrun: 1028624 bytes used of a 1048576 byte stack, +// and 20000 bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack. +func (c *mysqlDB) checkCertsExist(ctx context.Context, ids []*common.SHA256Output, + present []bool) error { + // Slice to be used in the SQL query: data := make([]interface{}, len(ids)) for i, id := range ids { @@ -190,11 +216,9 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu "certs.cert_id = request.cert_id) AS t" // Return slice of booleans: - present := make([]bool, len(ids)) - var value string if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { - return nil, err + return err } for i, c := range value { if c == '1' { @@ -202,7 +226,7 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu } } - return present, nil + return nil } // CheckPoliciesExist returns a slice of true/false values. Each value indicates if diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index ec426865..c4deb304 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + mysqldriver "github.com/go-sql-driver/mysql" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/require" @@ -20,6 +21,62 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) +func TestCheckCertsExist(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + // DB will have the same name as the test function. + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + + // Create a new DB with that name. On exiting the function, it will be removed. + err := testdb.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = testdb.RemoveTestDB(ctx, config) + require.NoError(t, err) + }() + + // Connect to the DB. + conn, err := mysql.Connect(config) + require.NoError(t, err) + defer conn.Close() + + // Obtain a convenient MysqlDBForTests object (only in tests). + c := mysql.NewMysqlDBForTests(conn) + createIDs := func(n int) []*common.SHA256Output { + ids := make([]*common.SHA256Output, n) + for i := range ids { + id := common.SHA256Output{} + ids[i] = &id + } + return ids + } + + // Check the function with 10 elements, it should work. + N := 10 + ids := createIDs(N) + presence := make([]bool, N) + err = c.DebugCheckCertsExist(ctx, ids, presence) + require.NoError(t, err) + + // Check now with 10000 elements, will fail. + N = 10000 + ids = createIDs(N) + presence = make([]bool, N) + err = c.DebugCheckCertsExist(ctx, ids, presence) + require.Error(t, err) + t.Logf("error type is: %T, message: %s", err, err) + require.IsType(t, &mysqldriver.MySQLError{}, err) + myErr := err.(*mysqldriver.MySQLError) + require.Equal(t, myErr.Number, uint16(1436)) // Thread stack overrun. + + // With 10000 elements but using the public function, it will work. + presence, err = c.CheckCertsExist(ctx, ids) + require.NoError(t, err) + require.Len(t, presence, len(ids)) +} + func TestCoalesceForDirtyDomains(t *testing.T) { // Because we are using "random" bytes deterministically here, set a fixed seed. rand.Seed(1) diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go index b2abf58f..2907bd2f 100644 --- a/pkg/mapserver/updater/certs_updater.go +++ b/pkg/mapserver/updater/certs_updater.go @@ -1,7 +1,6 @@ package updater import ( - "context" "fmt" ctx509 "github.com/google/certificate-transparency-go/x509" @@ -18,69 +17,6 @@ import ( type uniqueSet map[common.SHA256Output]struct{} type uniqueStringSet map[string]struct{} -// DeletemeUpdateDomainEntriesTableUsingCerts: Update the domain entries using the domain certificates -func (mapUpdater *MapUpdater) DeletemeUpdateDomainEntriesTableUsingCerts( - ctx context.Context, - certs []*ctx509.Certificate, - certChains [][]*ctx509.Certificate, -) ( - []*db.KeyValuePair, - int, - error, -) { - panic("deprecated: should never be called") - if len(certs) == 0 { - return nil, 0, nil - } - - // get the unique list of affected domains - affectedDomainsSet, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap( - certs, certChains) - - // if no domain to update - if len(affectedDomainsSet) == 0 { - return nil, 0, nil - } - - // retrieve (possibly)affected domain entries from db - // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, err := mapUpdater.deletemeRetrieveAffectedDomainFromDB(ctx, affectedDomainsSet) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | %w", err) - } - - // update the domain entries - updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | updateDomainEntries | %w", err) - } - - // if during this updates, no cert is added, directly return - if len(updatedDomains) == 0 { - return nil, 0, nil - } - - // get the domain entries only if they are updated, from DB - domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | getDomainEntriesToWrite | %w", err) - } - - // serialized the domainEntry -> key-value pair - keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | serializeUpdatedDomainEntries | %w", err) - } - - // commit changes to db - num, err := mapUpdater.writeChangesToDB(ctx, keyValuePairs) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingCerts | writeChangesToDB | %w", err) - } - - return keyValuePairs, num, nil -} - // return affected domains. // First return value: map of hashes of updated domain name. TODO(yongzhe): change this to a list maybe // Second return value: "domain name" -> certs. So later, one can look through the map to decide which certs to diff --git a/pkg/mapserver/updater/dbutil.go b/pkg/mapserver/updater/dbutil.go deleted file mode 100644 index c3bcf77c..00000000 --- a/pkg/mapserver/updater/dbutil.go +++ /dev/null @@ -1,179 +0,0 @@ -package updater - -import ( - "context" - "fmt" - "time" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -const batchSize = 1000 - -type dbResult struct { - pairs []*db.KeyValuePair - err error -} - -// deletemeRetrieveAffectedDomainFromDB: get affected domain entries from db -func (mapUpdater *MapUpdater) deletemeRetrieveAffectedDomainFromDB(ctx context.Context, - affectedDomainsSet uniqueSet) (map[common.SHA256Output]*mapCommon.DomainEntry, error) { - - // XXX(juagargi) review why passing a set (we need to convert it to a slice) - // list of domain hashes to fetch the domain entries from db - affectedDomainHashes := make([]*common.SHA256Output, 0, len(affectedDomainsSet)) - for k := range affectedDomainsSet { - affectedDomainHashes = append(affectedDomainHashes, &k) - } - - work := func(domainHashes []*common.SHA256Output, resultChan chan dbResult) { - domainEntries, err := mapUpdater.dbConn.RetrieveDomainEntries(ctx, domainHashes) - resultChan <- dbResult{pairs: domainEntries, err: err} - } - - resultChan := make(chan dbResult) - - totalNum := len(affectedDomainHashes) - numOfWorker := totalNum / batchSize - remaining := totalNum % batchSize - - workerCounter := 0 - for i := 0; i < numOfWorker; i++ { - workerCounter++ - go work(affectedDomainHashes[i*batchSize:i*batchSize+batchSize], resultChan) - } - if remaining != 0 { - workerCounter++ - go work(affectedDomainHashes[numOfWorker*batchSize:], resultChan) - } - - domainEntries := []*db.KeyValuePair{} - - for workerCounter > 0 { - newResult := <-resultChan - if newResult.err != nil { - return nil, newResult.err - } - //fmt.Println("pair length ", len(newResult.pairs)) - domainEntries = append(domainEntries, newResult.pairs...) - workerCounter-- - } - - start := time.Now() - - //fmt.Println(len(domainEntries)) - // parse the key-value pair -> domain map - domainEntriesMap, err := deletemeParseDomainBytes(domainEntries) - if err != nil { - return nil, fmt.Errorf("retrieveAffectedDomainFromDB | %w", err) - } - end := time.Now() - fmt.Println("time to parse domain entries", end.Sub(start)) - //fmt.Println(len(domainEntriesMap)) - return domainEntriesMap, nil -} - -// writeChangesToDB: commit changes to domain entries table and updates table -func (mapUpdater *MapUpdater) writeChangesToDB(ctx context.Context, - updatesToDomainEntriesTable []*db.KeyValuePair) (int, error) { - - _, err := mapUpdater.dbConn.UpdateDomainEntries(ctx, updatesToDomainEntriesTable) - if err != nil { - return 0, fmt.Errorf("writeChangesToDB | %w", err) - } - - return len(updatesToDomainEntriesTable), nil -} - -const domainParserWorker = 64 - -type parserResult struct { - keys [][32]byte - entries []*mapCommon.DomainEntry - err error -} - -// domain bytes -> domain entries -func deletemeParseDomainBytes(domainEntries []*db.KeyValuePair) ( - map[common.SHA256Output]*mapCommon.DomainEntry, error) { - /* - unique := make(map[[32]byte]byte) - for _, v := range domainEntries { - unique[v.Key] = 1 - } - fmt.Println(len(unique)) - */ - if len(domainEntries) == 0 { - return make(map[common.SHA256Output]*mapCommon.DomainEntry), nil - } - - workerNum := domainParserWorker - count := len(domainEntries) - - if count < 64 { - workerNum = 1 - } - - step := count / workerNum - remaining := count % workerNum - - resultChan := make(chan parserResult) - - work := func(domainBytes []*db.KeyValuePair) { - entries := []*mapCommon.DomainEntry{} - keys := [][32]byte{} - for _, entry := range domainBytes { - newPair, err := mapCommon.DeletemeDeserializeDomainEntry(entry.Value) - if err != nil { - resultChan <- parserResult{err: err} - } - entries = append(entries, newPair) - keys = append(keys, entry.Key) - } - resultChan <- parserResult{keys: keys, entries: entries} - } - - activeWorker := 0 - for i := 0; i < workerNum; i++ { - activeWorker++ - //fmt.Println(i*step, " ", i*step+step-1) - go work(domainEntries[i*step : i*step+step]) - } - if remaining != 0 { - activeWorker++ - //fmt.Println(workerNum * step) - go work(domainEntries[workerNum*step:]) - } - - //fmt.Println(activeWorker) - - entries := []*mapCommon.DomainEntry{} - keys := [][32]byte{} - - for activeWorker > 0 { - threadResult := <-resultChan - if threadResult.err != nil { - return nil, fmt.Errorf("parseDomainBytes | %w", threadResult.err) - } - entries = append(entries, threadResult.entries...) - keys = append(keys, threadResult.keys...) - activeWorker-- - //fmt.Println(activeWorker) - } - - result := make(map[common.SHA256Output]*mapCommon.DomainEntry) - //fmt.Println(len(entries)) - - for i, k := range entries { - result[keys[i]] = k - } - - if len(domainEntries) != len(result) { - fmt.Println(len(domainEntries), " ", len(result)) - return nil, fmt.Errorf("parseDomainBytes | incomplete parsing") - } - - return result, nil -} diff --git a/pkg/mapserver/updater/rpc_updater.go b/pkg/mapserver/updater/rpc_updater.go index 1ac48a46..06d1d4c1 100644 --- a/pkg/mapserver/updater/rpc_updater.go +++ b/pkg/mapserver/updater/rpc_updater.go @@ -1,11 +1,7 @@ package updater import ( - "context" - "fmt" - projectCommon "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/domain" "github.com/netsec-ethz/fpki/pkg/mapserver/common" ) @@ -16,53 +12,6 @@ type newUpdates struct { pc []*projectCommon.SP } -// DeletemeUpdateDomainEntriesTableUsingRPCAndPC: update the domain entries table, given RPC and PC -func (mapUpdater *MapUpdater) DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx context.Context, - rpc []*projectCommon.RPC, pc []*projectCommon.SP, readerNum int) ( - []*db.KeyValuePair, int, error) { - - if len(rpc) == 0 && len(pc) == 0 { - return nil, 0, nil - } - - affectedDomainsMap, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpc, pc) - if len(affectedDomainsMap) == 0 { - return nil, 0, nil - } - - // retrieve (possibly)affected domain entries from db - // It's possible that no records will be changed, because the certs are already recorded. - domainEntriesMap, err := mapUpdater.deletemeRetrieveAffectedDomainFromDB(ctx, affectedDomainsMap) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | retrieveAffectedDomainFromDB | %w", err) - } - - // update the domain entries - updatedDomains, err := updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | updateDomainEntriesWithRPCAndPC | %w", err) - } - - // get the domain entries only if they are updated - domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | getDomainEntriesToWrite | %w", err) - } - - // serialize the domainEntry -> key-value pair - keyValuePairs, err := DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | serializeUpdatedDomainEntries | %w", err) - } - - // commit changes to db - numOfWrites, err := mapUpdater.writeChangesToDB(ctx, keyValuePairs) - if err != nil { - return nil, 0, fmt.Errorf("UpdateDomainEntriesTableUsingRPCAndPC | serializeUpdatedDomainEntries | %w", err) - } - return keyValuePairs, numOfWrites, nil -} - // getAffectedDomainAndCertMapPCAndRPC: return a map of affected domains, and cert map func getAffectedDomainAndCertMapPCAndRPC(rpc []*projectCommon.RPC, pc []*projectCommon.SP) ( uniqueSet, map[string]*newUpdates) { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index c1a021ba..6fa0b93a 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -98,26 +98,26 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // updateCerts: update the tables and SMT (in memory) using certificates func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { panic("deprecated: should never be called") - keyValuePairs, numOfUpdates, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) - if err != nil { - return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) - } else if numOfUpdates == 0 { - return nil - } - - if len(keyValuePairs) == 0 { - return nil - } - - keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) - if err != nil { - return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) - } - - _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) - if err != nil { - return fmt.Errorf("CollectCerts | Update | %w", err) - } + // keyValuePairs, numOfUpdates, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) + // if err != nil { + // return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) + // } else if numOfUpdates == 0 { + // return nil + // } + + // if len(keyValuePairs) == 0 { + // return nil + // } + + // keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) + // if err != nil { + // return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) + // } + + // _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) + // if err != nil { + // return fmt.Errorf("CollectCerts | Update | %w", err) + // } return nil } @@ -139,26 +139,28 @@ func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList // updateRPCAndPC: update the tables and SMT (in memory) using PC and RPC func (mapUpdater *MapUpdater) updateRPCAndPC(ctx context.Context, pcList []*common.SP, rpcList []*common.RPC) error { - // update the domain and - keyValuePairs, _, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx, rpcList, pcList, 10) - if err != nil { - return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingRPCAndPC | %w", err) - } - - if len(keyValuePairs) == 0 { - return nil - } - - keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) - if err != nil { - return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) - } + panic("deprecated: should never be called") - // update Sparse Merkle Tree - _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) - if err != nil { - return fmt.Errorf("CollectCerts | Update | %w", err) - } + // // update the domain and + // keyValuePairs, _, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx, rpcList, pcList, 10) + // if err != nil { + // return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingRPCAndPC | %w", err) + // } + + // if len(keyValuePairs) == 0 { + // return nil + // } + + // keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) + // if err != nil { + // return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) + // } + + // // update Sparse Merkle Tree + // _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) + // if err != nil { + // return fmt.Errorf("CollectCerts | Update | %w", err) + // } return nil } diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 11313319..6ffd335e 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -2,15 +2,17 @@ package updater import ( "context" - "io/ioutil" + "encoding/hex" + "fmt" + "math/rand" + "os" "testing" "time" - "github.com/google/certificate-transparency-go/x509" - projectCommon "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" + ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" + "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" @@ -18,162 +20,107 @@ import ( "github.com/stretchr/testify/require" ) -// TestUpdateCerts: test updateCerts() -func TestUpdateCerts(t *testing.T) { - t.Skip() // deleteme +// TestUpdateWithKeepExisting checks that the UpdateWithKeepExisting function can update a large +// number of certificates and policy objects. +func TestUpdateWithKeepExisting(t *testing.T) { + // Because we are using "random" bytes deterministically here, set a fixed seed. + rand.Seed(111) - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) - require.NoError(t, err) - smt.CacheHeightLimit = 233 - - updaterDB := testdb.NewMockDB() - updater, err := getMockUpdater(smt, updaterDB) - require.NoError(t, err) + // ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) //deleteme + defer cancelF() - certs := []*x509.Certificate{} - // load test certs - files, err := ioutil.ReadDir("./testdata/certs/") - require.NoError(t, err, "ioutil.ReadDir") + // DB will have the same name as the test function. + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - for _, file := range files { - cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) + // Create a new DB with that name. On exiting the function, it will be removed. + err := testdb.CreateTestDB(ctx, dbName) + require.NoError(t, err) + defer func() { + err = testdb.RemoveTestDB(ctx, config) require.NoError(t, err) - certs = append(certs, cert) - } - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() + }() - // update the db using the certs - emptyCertChains := make([][]*x509.Certificate, len(certs)) - err = updater.updateCerts(ctx, certs, emptyCertChains) + // Connect to the DB. + conn, err := mysql.Connect(config) require.NoError(t, err) + defer conn.Close() - // update table should be empty - assert.Equal(t, 0, len(updaterDB.UpdatesTable)) - - // check whether certs are correctly added to the db - for _, cert := range certs { - domains := domain.ExtractAffectedDomains(util.ExtractCertDomains(cert)) - - for _, domain := range domains { - domainHash := projectCommon.SHA256Hash32Bytes([]byte(domain)) - assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - - // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - // require.NoError(t, err) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName != cert.Issuer.String() { - // assert.NotContains(t, caList.DomainCerts, cert.Raw) - // } else { - // assert.Contains(t, caList.DomainCerts, cert.Raw) - // } - // } - - // test if SMT response is correct - _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) - assert.True(t, isPoP) - require.NoError(t, err) - } + // leafCerts contains the names of the leaf certificates we will test. + leafCerts := []string{ + "leaf.certs.com", + "example.certs.com", + } + // Add many more leaf certificates for the test. + for i := 0; i < 20000; i++ { + leafCerts = append(leafCerts, fmt.Sprintf("leaf-%d.auto.certs.com", i+1)) } -} -// TestUpdateRPCAndPC: test updateRPCAndPC() -func TestUpdateRPCAndPC(t *testing.T) { - t.Skip() // deleteme + // Create a random certificate test hierarchy for each leaf. + var certs []*ctx509.Certificate + var certIDs, parentCertIDs []*common.SHA256Output + var certNames [][]string + for _, leaf := range leafCerts { + // Create two mock x509 chains on top of leaf: + certs2, certIDs2, parentCertIDs2, certNames2 := random.BuildTestRandomCertHierarchy(t, leaf) + certs = append(certs, certs2...) + certIDs = append(certIDs, certIDs2...) + parentCertIDs = append(parentCertIDs, parentCertIDs2...) + certNames = append(certNames, certNames2...) + } - pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 20) + // Ingest two mock policies. + data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") require.NoError(t, err) - - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) + pols, err := util.LoadPoliciesFromRaw(data) require.NoError(t, err) - smt.CacheHeightLimit = 233 - updaterDB := testdb.NewMockDB() - updater, err := getMockUpdater(smt, updaterDB) + // Update with certificates and policies. + t0 := time.Now() + err = UpdateWithKeepExisting(ctx, conn, certNames, certIDs, parentCertIDs, + certs, util.ExtractExpirations(certs), pols) require.NoError(t, err) + t.Logf("time needed to update %d certificates: %s", len(certIDs), time.Since(t0)) - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - err = updater.updateRPCAndPC(ctx, pcList, rpcList) + // Coalescing of payloads. + err = CoalescePayloadsForDirtyDomains(ctx, conn) require.NoError(t, err) - // check pc list in memory - for _, pc := range pcList { - domainHash := projectCommon.SHA256Hash32Bytes([]byte(pc.Subject)) - assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - - // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - // require.NoError(t, err) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName != pc.CAName { - // assert.Equal(t, pc, caList.PCs) - // } else { - // assert.NotEqual(t, pc, caList.PCs) - // } - // } - - // test if SMT response is correct - _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) - assert.True(t, isPoP) + // Check the certificate coalescing: under leaf there must be 4 IDs, for the certs. + for i, leaf := range leafCerts { + domainID := common.SHA256Hash32Bytes([]byte(leaf)) + // t.Logf("%s: %s", leaf, hex.EncodeToString(domainID[:])) + gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) require.NoError(t, err) + expectedSize := common.SHA256Size * len(certs) / len(leafCerts) + require.Len(t, gotCertIDs, expectedSize, "bad length, should be %d but it's %d", + expectedSize, len(gotCertIDs)) + // From the certificate IDs, grab the IDs corresponding to this leaf: + N := len(certIDs) / len(leafCerts) // IDs per leaf = total / leaf_count + expectedCertIDs, expectedCertIDsID := glueSortedIDsAndComputeItsID(certIDs[i*N : (i+1)*N]) + // t.Logf("expectedCertIDs:\t%s\n", hex.EncodeToString(expectedCertIDs)) + // t.Logf("gotCertIDs: \t%s\n", hex.EncodeToString(gotCertIDs)) + require.Equal(t, expectedCertIDs, gotCertIDs) + require.Equal(t, expectedCertIDsID, gotCertIDsID) } - // check rpc list in memory - for _, rpc := range rpcList { - domainHash := projectCommon.SHA256Hash32Bytes([]byte(rpc.Subject)) - assert.Contains(t, updaterDB.DomainEntriesTable, domainHash) - // domainEntryBytes := updaterDB.DomainEntriesTable[domainHash] - - // domainEntry, err := common.DeserializeDomainEntry(domainEntryBytes) - // require.NoError(t, err) - - // for _, caList := range domainEntry.Entries { - // if caList.CAName != rpc.CAName { - // assert.Equal(t, rpc, caList.RPCs) - // } else { - // assert.NotEqual(t, rpc, caList.RPCs) - // } - // } - - // test if SMT response is correct - _, isPoP, _, _, err := smt.MerkleProof(ctx, domainHash[:]) - assert.True(t, isPoP) - require.NoError(t, err) + // Check policy coalescing. + policiesPerName := make(map[string][]common.PolicyObject, len(pols)) + for _, pol := range pols { + policiesPerName[pol.Domain()] = append(policiesPerName[pol.Domain()], pol) } -} - -// TestFetchUpdatedDomainHash: test fetchUpdatedDomainHash() -func TestFetchUpdatedDomainHash(t *testing.T) { - smt, err := trie.NewTrie(nil, projectCommon.SHA256Hash, testdb.NewMockDB()) - require.NoError(t, err) - smt.CacheHeightLimit = 233 - - updaterDB := testdb.NewMockDB() - updater, err := getMockUpdater(smt, updaterDB) - require.NoError(t, err) - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - randomKeys := []projectCommon.SHA256Output{} - for i := 0; i < 15; i++ { - newRandomKey := getRandomHash(t) - updaterDB.UpdatesTable[newRandomKey] = struct{}{} - randomKeys = append(randomKeys, newRandomKey) + for name, policies := range policiesPerName { + id := common.SHA256Hash32Bytes([]byte(name)) + gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesPayload(ctx, id) + require.NoError(t, err) + // For each sequence of policies, compute the ID of their JSON. + polIDs := computeIDsOfPolicies(policies) + expectedPolIDs, expectedPolIDsID := glueSortedIDsAndComputeItsID(polIDs) + t.Logf("expectedPolIDs: %s\n", hex.EncodeToString(expectedPolIDs)) + require.Equal(t, expectedPolIDs, gotPolIDs) + require.Equal(t, expectedPolIDsID, gotPolIDsID) } - - // result is not important. - _, err = updater.fetchUpdatedDomainHash(ctx) - require.NoError(t, err) - - // make sure the db is cleaned. - assert.Equal(t, 0, len(updaterDB.UpdatesTable)) } func TestRunWhenFalse(t *testing.T) { @@ -218,14 +165,18 @@ func TestRunWhenFalse(t *testing.T) { } } -func getRandomHash(t *testing.T) projectCommon.SHA256Output { - return projectCommon.SHA256Hash32Bytes(random.RandomBytesForTest(t, 50)) +func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { + gluedIDs := common.SortIDsAndGlue(IDs) + // Compute the hash of the glued IDs. + id := common.SHA256Hash32Bytes(gluedIDs) + return gluedIDs, &id } -// get a updater using mock db -func getMockUpdater(smt *trie.Trie, updaterDB *testdb.MockDB) (*MapUpdater, error) { - return &MapUpdater{ - smt: smt, - dbConn: updaterDB, - }, nil +func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { + IDs := make([]*common.SHA256Output, len(policies)) + for i, pol := range policies { + id := common.SHA256Hash32Bytes(pol.Raw()) + IDs[i] = &id + } + return IDs } diff --git a/pkg/mapserver/updater/updater_test_adapter.go b/pkg/mapserver/updater/updater_test_adapter.go index 2a70561c..829829aa 100644 --- a/pkg/mapserver/updater/updater_test_adapter.go +++ b/pkg/mapserver/updater/updater_test_adapter.go @@ -11,10 +11,10 @@ import ( type UpdaterTestAdapter MapUpdater -func NewMapTestUpdater(config *db.Configuration, root []byte, cacheHeight int) (*UpdaterTestAdapter, error) { - up, err := NewMapUpdater(config, root, cacheHeight) - return (*UpdaterTestAdapter)(up), err -} +// func NewMapTestUpdater(config *db.Configuration, root []byte, cacheHeight int) (*UpdaterTestAdapter, error) { +// up, err := NewMapUpdater(config, root, cacheHeight) +// return (*UpdaterTestAdapter)(up), err +// } func (a *UpdaterTestAdapter) Conn() db.Conn { return (*MapUpdater)(a).dbConn @@ -24,12 +24,6 @@ func (u *UpdaterTestAdapter) UpdateCerts(ctx context.Context, certs []*ctx509.Ce return (*MapUpdater)(u).updateCerts(ctx, certs, certChains) } -func (u *UpdaterTestAdapter) UpdateDomainEntriesUsingCerts(ctx context.Context, - certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate, readerNum int) ([]*db.KeyValuePair, int, error) { - - return (*MapUpdater)(u).DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) -} - func (a *UpdaterTestAdapter) FetchUpdatedDomainHash(ctx context.Context) ( []common.SHA256Output, error) { return (*MapUpdater)(a).fetchUpdatedDomainHash(ctx) From ad20a9f3a7097c834014130d4ba9696b5b6ccb3f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 10:16:24 +0200 Subject: [PATCH 128/187] Cleanup pkg/mapserver/updater --- pkg/mapserver/updater/certs_updater.go | 152 --- pkg/mapserver/updater/certs_updater_test.go | 164 --- pkg/mapserver/updater/const.go | 3 - pkg/mapserver/updater/hash.go | 30 - pkg/mapserver/updater/rpc_updater.go | 125 -- pkg/mapserver/updater/rpc_updater_test.go | 172 --- .../updater/testdata/certs/adiq.com.br144.cer | 38 - .../testdata/certs/angloeastern.com101.cer | 39 - .../testdata/certs/archedata.eu514.cer | 41 - .../updater/testdata/certs/bijhuis.be463.cer | 37 - .../testdata/certs/brandell.net972.cer | 23 - .../updater/testdata/certs/capsys.ca960.cer | 35 - .../testdata/certs/carproof.com961.cer | 38 - .../certs/dev2.mortgagebotlos.com82.cer | 25 - .../certs/dfs.core.windows.net859.cer | 58 - .../testdata/certs/efsrecrute.fr620.cer | 26 - .../updater/testdata/domain_list/domains.txt | 1049 ----------------- pkg/mapserver/updater/tools.go | 26 - pkg/mapserver/updater/updater.go | 2 + pkg/mapserver/updater/updater_test.go | 3 +- pkg/mapserver/updater/updater_test_adapter.go | 60 - 21 files changed, 3 insertions(+), 2143 deletions(-) delete mode 100644 pkg/mapserver/updater/certs_updater.go delete mode 100644 pkg/mapserver/updater/certs_updater_test.go delete mode 100644 pkg/mapserver/updater/const.go delete mode 100644 pkg/mapserver/updater/hash.go delete mode 100644 pkg/mapserver/updater/rpc_updater.go delete mode 100644 pkg/mapserver/updater/rpc_updater_test.go delete mode 100644 pkg/mapserver/updater/testdata/certs/adiq.com.br144.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/angloeastern.com101.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/archedata.eu514.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/bijhuis.be463.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/brandell.net972.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/capsys.ca960.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/carproof.com961.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/dev2.mortgagebotlos.com82.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/dfs.core.windows.net859.cer delete mode 100644 pkg/mapserver/updater/testdata/certs/efsrecrute.fr620.cer delete mode 100644 pkg/mapserver/updater/testdata/domain_list/domains.txt delete mode 100644 pkg/mapserver/updater/tools.go delete mode 100644 pkg/mapserver/updater/updater_test_adapter.go diff --git a/pkg/mapserver/updater/certs_updater.go b/pkg/mapserver/updater/certs_updater.go deleted file mode 100644 index 2907bd2f..00000000 --- a/pkg/mapserver/updater/certs_updater.go +++ /dev/null @@ -1,152 +0,0 @@ -package updater - -import ( - "fmt" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/domain" - mcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/util" -) - -// TODO(yongzhe): make the list if size is already known. -// TODO(yongzhe): unit test for updateDomainEntryWithRPC and - -type uniqueSet map[common.SHA256Output]struct{} -type uniqueStringSet map[string]struct{} - -// return affected domains. -// First return value: map of hashes of updated domain name. TODO(yongzhe): change this to a list maybe -// Second return value: "domain name" -> certs. So later, one can look through the map to decide which certs to -// -// added to which domain. -func GetAffectedDomainAndCertMap(certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) (uniqueSet, - map[string][]*ctx509.Certificate, map[string][][]*ctx509.Certificate) { - // Set with the SHAs of the updated domains. - affectedDomainsMap := make(uniqueSet) - - // Map "domain name" -> cert list (certs to be added to this domain). - domainCertMap := make(map[string][]*ctx509.Certificate) - - // Analogous to the map above except that we map "domain name" -> cert chains. - domainCertChainMap := make(map[string][][]*ctx509.Certificate) - - // extract the affected domain of every certificates - for i, cert := range certs { - // get cert chain for cert - certChain := certChains[i] - - // get unique list of domain names - domains := util.ExtractCertDomains(cert) - if len(domains) == 0 { - continue - } - - // get affected domains - affectedDomains := domain.ExtractAffectedDomains(domains) - if len(affectedDomains) == 0 { - continue - } - - for _, domainName := range affectedDomains { - var domainNameHash common.SHA256Output - copy(domainNameHash[:], common.SHA256Hash([]byte(domainName))) - - affectedDomainsMap[domainNameHash] = struct{}{} - _, ok := domainCertMap[domainName] - if ok { - domainCertMap[domainName] = append(domainCertMap[domainName], cert) - domainCertChainMap[domainName] = append(domainCertChainMap[domainName], certChain) - } else { - domainCertMap[domainName] = []*ctx509.Certificate{cert} - domainCertChainMap[domainName] = [][]*ctx509.Certificate{certChain} - } - } - } - return affectedDomainsMap, domainCertMap, domainCertChainMap -} - -// update domain entries -func UpdateDomainEntries( - domainEntries map[common.SHA256Output]*mcommon.DomainEntry, - certDomainMap map[string][]*ctx509.Certificate, - certChainDomainMap map[string][][]*ctx509.Certificate, -) (uniqueSet, error) { - - panic("deprecated: should never be called") - updatedDomainHash := make(uniqueSet) - // read from previous map - // the map records: domain - certs pair - // Which domain will be affected by which certificates - for domainName, certs := range certDomainMap { - certChains := certChainDomainMap[domainName] - //iterStart := time.Now() - for i, cert := range certs { - certChain := certChains[i] - var domainNameHash common.SHA256Output - copy(domainNameHash[:], common.SHA256Hash([]byte(domainName))) - // get domain entries - domainEntry, ok := domainEntries[domainNameHash] - // if domain entry does not exist in the db - if !ok { - // create an empty domain entry - newDomainEntry := &mcommon.DomainEntry{DomainName: domainName} - domainEntries[domainNameHash] = newDomainEntry - domainEntry = newDomainEntry - } - - isUpdated := updateDomainEntry(domainEntry, cert, certChain) - if isUpdated { - // flag the updated domains - updatedDomainHash[domainNameHash] = struct{}{} - } - - } - } - - return updatedDomainHash, nil -} - -// updateDomainEntry: insert certificate into correct CAEntry -// return: if this domain entry is updated -func updateDomainEntry(domainEntry *mcommon.DomainEntry, cert *ctx509.Certificate, certChain []*ctx509.Certificate) bool { - panic("deprecated: should never be called") - return domainEntry.AddCert(cert, certChain) -} - -// GetDomainEntriesToWrite: get updated domains, and extract the domain bytes -func GetDomainEntriesToWrite(updatedDomain uniqueSet, - domainEntries map[common.SHA256Output]*mcommon.DomainEntry) (map[common.SHA256Output]*mcommon.DomainEntry, error) { - - result := make(map[common.SHA256Output]*mcommon.DomainEntry) - for k := range updatedDomain { - domainEntry, ok := domainEntries[k] - if !ok { - return nil, fmt.Errorf("getDomainEntriesToWrite | updated domain not recorded") - } - result[k] = domainEntry - sortDomainEntry(domainEntry) - } - return result, nil -} - -// DeletemeSerializeUpdatedDomainEntries: serialize the updated domains -func DeletemeSerializeUpdatedDomainEntries(domains map[common.SHA256Output]*mcommon.DomainEntry) ( - []*db.KeyValuePair, error) { - - panic("this function is deprecated and should never be called") - - result := make([]*db.KeyValuePair, 0, len(domains)) - - for domainNameHash, domainEntry := range domains { - domainBytes, err := mcommon.DeletemeSerializeDomainEntry(domainEntry) - if err != nil { - return nil, fmt.Errorf("serializeUpdatedDomainEntries | SerializedDomainEntry | %w", err) - } - - result = append(result, &db.KeyValuePair{Key: domainNameHash, Value: domainBytes}) - } - return result, nil -} diff --git a/pkg/mapserver/updater/certs_updater_test.go b/pkg/mapserver/updater/certs_updater_test.go deleted file mode 100644 index 6fe54bb0..00000000 --- a/pkg/mapserver/updater/certs_updater_test.go +++ /dev/null @@ -1,164 +0,0 @@ -package updater - -import ( - "bytes" - "io/ioutil" - "testing" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/domain" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/util" -) - -// TestUpdateDomainEntriesUsingCerts: test UpdateDomainEntriesUsingCerts -// This test tests the individual functions of the UpdateDomainEntriesUsingCerts() -func TestUpdateDomainEntriesUsingCerts(t *testing.T) { - t.Skip() // deleteme - - certs := []*ctx509.Certificate{} - - // load test certs - files, err := ioutil.ReadDir("./testdata/certs/") - require.NoError(t, err, "ioutil.ReadDir") - certChains := make([][]*ctx509.Certificate, len(files)) - for _, file := range files { - cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) - require.NoError(t, err) - certs = append(certs, cert) - } - - // get affected domain map and domain cert map - affectedDomainsMap, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap( - certs, certChains) - - // test if all the certs are correctly added to the affectedDomainsMap and domainCertMap - for _, cert := range certs { - // get common name and SAN of the certificate - domainNames := util.ExtractCertDomains(cert) - - // get the valid domain name from domainNames list - affectedDomains := domain.ExtractAffectedDomains(domainNames) - if len(affectedDomains) == 0 { - // if cert does not have a valid domain name - continue - } - - // check the affected domain is correctly added to the affectedDomains - for _, affectedDomain := range affectedDomains { - var affectedNameHash common.SHA256Output - copy(affectedNameHash[:], common.SHA256Hash([]byte(affectedDomain))) - - _, ok := affectedDomainsMap[affectedNameHash] - assert.True(t, ok, "domain not found in affectedDomainsMap") - } - - // check if the domainCertMap is correct - for domainName, newCerts := range domainCertMap { - if includedIn(affectedDomains, domainName) { - isFound := false - for _, newCert := range newCerts { - if bytes.Equal(newCert.Raw, cert.Raw) { - isFound = true - } - } - assert.True(t, isFound, "cert not found in domainCertMap") - } else { - for _, newCert := range newCerts { - assert.False(t, bytes.Equal(newCert.Raw, cert.Raw), "cert should not be here") - } - } - } - } - - // empty domainEntriesMap - domainEntriesMap := make(map[common.SHA256Output]*mapCommon.DomainEntry) - updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) - require.NoError(t, err, "updateDomainEntries") - - assert.Equal(t, len(updatedDomains), len(affectedDomainsMap), "len(updatedDomains) should equals to len(affectedDomainsMap)") - - // check if domainEntriesMap is correctly updated - for _, cert := range certs { - domainNames := util.ExtractCertDomains(cert) - // caName := cert.Issuer.String() - - // check if this cert has valid name - affectedDomains := domain.ExtractAffectedDomains(domainNames) - if len(affectedDomains) == 0 { - continue - } - - // check domainEntriesMap - for _, domainName := range affectedDomains { - var domainHash common.SHA256Output - copy(domainHash[:], common.SHA256Hash([]byte(domainName))) - - domainEntry, ok := domainEntriesMap[domainHash] - assert.True(t, ok, "domainEntriesMap error") - - // check domain name is correct - assert.True(t, domainEntry.DomainName == domainName) - // for _, caList := range domainEntry.Entries { - // if caList.CAName == caName { - // isFound := false - // for _, newCert := range caList.DomainCerts { - // if bytes.Equal(newCert, cert.Raw) { - // isFound = true - // } - // } - // assert.True(t, isFound, "cert not found") - // } else { - // for _, newCert := range caList.DomainCerts { - // assert.False(t, bytes.Equal(newCert, cert.Raw), "cert should not be here") - // } - // } - // } - } - } - - // get the domain entries only if they are updated, from DB - domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - require.NoError(t, err) - - // serialized the domainEntry -> key-value pair - _, err = DeletemeSerializeUpdatedDomainEntries(domainEntriesToWrite) - require.NoError(t, err) -} - -// TestUpdateSameCertTwice: update the same certs twice, number of updates should be zero -func TestUpdateSameCertTwice(t *testing.T) { - t.Skip() // deleteme - certs := []*ctx509.Certificate{} - // check if - files, err := ioutil.ReadDir("./testdata/certs/") - require.NoError(t, err, "ioutil.ReadDir") - certChains := make([][]*ctx509.Certificate, len(files)) - for _, file := range files { - cert, err := util.CertificateFromPEMFile("./testdata/certs/" + file.Name()) - require.NoError(t, err) - certs = append(certs, cert) - } - - _, domainCertMap, domainCertChainMap := GetAffectedDomainAndCertMap(certs, certChains) - - domainEntriesMap := make(map[common.SHA256Output]*mapCommon.DomainEntry) - - // update domain entry with certs - updatedDomains, err := UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) - require.NoError(t, err, "updateDomainEntries") - - // Length of updatedDomains should be that of the affected domains: - assert.Equal(t, len(domainCertMap), len(updatedDomains), "updated domain should be 0") - - // update domain entry with same certs - updatedDomains, err = UpdateDomainEntries(domainEntriesMap, domainCertMap, domainCertChainMap) - require.NoError(t, err, "updateDomainEntries") - - // Now the length of updatedDomains should be zero. - assert.Equal(t, 0, len(updatedDomains), "updated domain should be 0") -} diff --git a/pkg/mapserver/updater/const.go b/pkg/mapserver/updater/const.go deleted file mode 100644 index 4f3485d9..00000000 --- a/pkg/mapserver/updater/const.go +++ /dev/null @@ -1,3 +0,0 @@ -package updater - -const readBatchSize = 100000 diff --git a/pkg/mapserver/updater/hash.go b/pkg/mapserver/updater/hash.go deleted file mode 100644 index 36c93cab..00000000 --- a/pkg/mapserver/updater/hash.go +++ /dev/null @@ -1,30 +0,0 @@ -package updater - -// UpdateInput: key-value pair for updating -// key: hash of domain name -// value: hash of serilised DomainEntry - -// // DeletemeHashDomainEntriesThenSort: hash the DomainEntry, then sort them according to key -// func DeletemeHashDomainEntriesThenSort(domainEntries []mapCommon.DomainEntry) ([]UpdateInput, error) { -// result := make([]UpdateInput, 0, len(domainEntries)) -// for _, v := range domainEntries { -// domainEntryBytes, err := mapCommon.SerializeDomainEntry(&v) -// if err != nil { -// return nil, fmt.Errorf("HashDomainEntriesThenSort | SerializedDomainEntry | %w", err) -// } -// var domainHash common.SHA256Output -// copy(domainHash[:], common.SHA256Hash([]byte(v.DomainName))) -// hashInput := UpdateInput{ -// Key: domainHash, -// Value: common.SHA256Hash(domainEntryBytes), -// } -// result = append(result, hashInput) -// } - -// // sort according to key -// sort.Slice(result, func(i, j int) bool { -// return bytes.Compare(result[i].Key[:], result[j].Key[:]) == -1 -// }) - -// return result, nil -// } diff --git a/pkg/mapserver/updater/rpc_updater.go b/pkg/mapserver/updater/rpc_updater.go deleted file mode 100644 index 06d1d4c1..00000000 --- a/pkg/mapserver/updater/rpc_updater.go +++ /dev/null @@ -1,125 +0,0 @@ -package updater - -import ( - projectCommon "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -// newUpdates: structure for updates -type newUpdates struct { - rpc []*projectCommon.RPC - pc []*projectCommon.SP -} - -// getAffectedDomainAndCertMapPCAndRPC: return a map of affected domains, and cert map -func getAffectedDomainAndCertMapPCAndRPC(rpc []*projectCommon.RPC, pc []*projectCommon.SP) ( - uniqueSet, map[string]*newUpdates) { - - // unique list of the updated domains - affectedDomainsMap := make(uniqueSet) - domainCertMap := make(map[string]*newUpdates) - - // deal with RPC - for _, newRPC := range rpc { - domainName := newRPC.Subject - if !domain.IsValidDomain(domainName) { - continue - } - - var domainNameHash projectCommon.SHA256Output - copy(domainNameHash[:], projectCommon.SHA256Hash([]byte(domainName))) - - // attach domain hash to unique map - affectedDomainsMap[domainNameHash] = struct{}{} - certMapElement, ok := domainCertMap[domainName] - if ok { - certMapElement.rpc = append(certMapElement.rpc, newRPC) - } else { - domainCertMap[domainName] = &newUpdates{rpc: []*projectCommon.RPC{newRPC}} - } - } - - // deal with PC - for _, newPC := range pc { - domainName := newPC.Subject - if !domain.IsValidDomain(domainName) { - continue - } - - var domainNameHash projectCommon.SHA256Output - copy(domainNameHash[:], projectCommon.SHA256Hash([]byte(domainName))) - - affectedDomainsMap[domainNameHash] = struct{}{} - certMapElement, ok := domainCertMap[domainName] - if ok { - certMapElement.pc = append(certMapElement.pc, newPC) - } else { - domainCertMap[domainName] = &newUpdates{pc: []*projectCommon.SP{newPC}} - } - } - return affectedDomainsMap, domainCertMap -} - -// updateDomainEntriesWithRPCAndPC: update domain entries -func updateDomainEntriesWithRPCAndPC(domainEntries map[projectCommon.SHA256Output]*common.DomainEntry, - certDomainMap map[string]*newUpdates) (uniqueSet, error) { - updatedDomainHash := make(uniqueSet) - // read from previous map - // the map records: domain - certs pair - // Which domain will be affected by which certificates - for domainName, updates := range certDomainMap { - for _, rpc := range updates.rpc { - var domainNameHash projectCommon.SHA256Output - copy(domainNameHash[:], projectCommon.SHA256Hash([]byte(domainName))) - - // get domain entries - domainEntry, ok := domainEntries[domainNameHash] - // if domain entry exists in the db - if !ok { - // create an empty domain entry - newDomainEntry := &common.DomainEntry{DomainName: domainName} - domainEntries[domainNameHash] = newDomainEntry - domainEntry = newDomainEntry - } - - isUpdated := updateDomainEntryWithRPC(domainEntry, rpc) - if isUpdated { - // flag the updated domains - updatedDomainHash[domainNameHash] = struct{}{} - } - } - - for _, pc := range updates.pc { - var domainNameHash projectCommon.SHA256Output - copy(domainNameHash[:], projectCommon.SHA256Hash([]byte(domainName))) - - // get domain entries - domainEntry, ok := domainEntries[domainNameHash] - // if domain entry exists in the db - if !ok { - // create an empty domain entry - newDomainEntry := &common.DomainEntry{DomainName: domainName} - domainEntries[domainNameHash] = newDomainEntry - domainEntry = newDomainEntry - } - - isUpdated := updateDomainEntryWithPC(domainEntry, pc) - if isUpdated { - // flag the updated domains - updatedDomainHash[domainNameHash] = struct{}{} - } - } - } - return updatedDomainHash, nil -} - -// updateDomainEntryWithRPC: insert RPC into correct CAEntry -func updateDomainEntryWithRPC(domainEntry *common.DomainEntry, rpc *projectCommon.RPC) bool { - return domainEntry.AddRPC(rpc) -} - -// updateDomainEntryWithPC: insert PC into correct CAEntry -func updateDomainEntryWithPC(domainEntry *common.DomainEntry, pc *projectCommon.SP) bool { - return domainEntry.AddPC(pc) -} diff --git a/pkg/mapserver/updater/rpc_updater_test.go b/pkg/mapserver/updater/rpc_updater_test.go deleted file mode 100644 index 35331103..00000000 --- a/pkg/mapserver/updater/rpc_updater_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package updater - -import ( - "testing" - - projectCommon "github.com/netsec-ethz/fpki/pkg/common" - - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TestRPCAndPC: test getAffectedDomainAndCertMapPCAndRPC() -func TestRPCAndPC(t *testing.T) { - // get PC and RPC - pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) - require.NoError(t, err, "GetPCAndRPC error") - - // add the affectedDomainsSet and domainCertMap - affectedDomainsSet, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpcList, pcList) - - // check affectedDomainsMap and domainCertMap are correct - for _, pc := range pcList { - subjectName := pc.Subject - var subjectNameHash projectCommon.SHA256Output - copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - assert.Contains(t, affectedDomainsSet, subjectNameHash) - - for domainHash, newUpdate := range domainCertMap { - if domainHash == subjectName { - assert.Contains(t, newUpdate.pc, pc) - } else { - assert.NotContains(t, newUpdate.pc, pc) - } - } - } - - // check affectedDomainsMap and domainCertMap are correct - for _, rpc := range rpcList { - subjectName := rpc.Subject - var subjectNameHash projectCommon.SHA256Output - copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - _, ok := affectedDomainsSet[subjectNameHash] - assert.True(t, ok, "domain not found") - - for domainHash, newUpdate := range domainCertMap { - if domainHash == subjectName { - assert.Contains(t, newUpdate.rpc, rpc) - } else { - assert.NotContains(t, newUpdate.rpc, rpc) - } - } - } - - // length should be the same - assert.Equal(t, len(affectedDomainsSet), len(domainCertMap)) - -} - -// TestUpdateDomainEntriesWithRPCAndPC: test updateDomainEntriesWithRPCAndPC(), getDomainEntriesToWrite() -// and serializeUpdatedDomainEntries() -func TestUpdateDomainEntriesWithRPCAndPC(t *testing.T) { - // // get PC and RPC - // pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) - // require.NoError(t, err, "GetPCAndRPC error") - - // // empty map(mock result from db). - // domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) - - // // add the affectedDomainsSet and domainCertMap - // _, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpcList, pcList) - - // updatedDomains, err := updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) - // require.NoError(t, err) - // assert.Equal(t, len(updatedDomains), len(domainEntriesMap), "size of domainEntriesMap should be the size of updatedDomains") - - // // check PC - // for _, pc := range pcList { - // subjectName := pc.Subject - // caName := pc.CAName - // var subjectNameHash projectCommon.SHA256Output - // copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - // for domainHash, domainEntry := range domainEntriesMap { - // switch { - // case domainHash == subjectNameHash: - // isFound := false - // for _, caList := range domainEntry.Entries { - // if caList.CAName == caName { - // isFound = true - // assert.True(t, caList.PCs.Equal(*pc), "PC missing") - // } else { - // assert.False(t, caList.PCs.Equal(*pc), "PC in wrong place") - // } - // } - // assert.True(t, isFound, "new PC not included in domainEntriesMap") - // case domainHash != subjectNameHash: - // for _, caList := range domainEntry.Entries { - // assert.False(t, caList.PCs.Equal(*pc)) - // } - // } - // } - // } - - // // check RPC - // for _, rpc := range rpcList { - // subjectName := rpc.Subject - // caName := rpc.CAName - // var subjectNameHash projectCommon.SHA256Output - // copy(subjectNameHash[:], projectCommon.SHA256Hash([]byte(subjectName))) - - // for domainHash, domainEntry := range domainEntriesMap { - // switch { - // case domainHash == subjectNameHash: - // isFound := false - // for _, caList := range domainEntry.Entries { - // if caList.CAName == caName { - // isFound = true - // assert.True(t, caList.RPCs.Equal(rpc), "RPC missing") - // } else { - // assert.False(t, caList.RPCs.Equal(rpc), "RPC in wrong place") - // } - // } - // assert.True(t, isFound, "new RPC not included in domainEntriesMap") - // case domainHash != subjectNameHash: - // for _, caList := range domainEntry.Entries { - // assert.False(t, caList.RPCs.Equal(rpc)) - // } - // } - // } - // } - - // // get the domain entries only if they are updated - // domainEntriesToWrite, err := GetDomainEntriesToWrite(updatedDomains, domainEntriesMap) - // require.NoError(t, err) - - // // serialize the domainEntry -> key-value pair - // _, err = SerializeUpdatedDomainEntries(domainEntriesToWrite) - // require.NoError(t, err) -} - -// TestUpdateSameRPCTwice: update the same RPC twice, number of updates should be zero -func TestUpdateSameRPCTwice(t *testing.T) { - t.Skip() // deleteme - - pcList, rpcList, err := logpicker.GetPCAndRPC("./testdata/domain_list/domains.txt", 0, 0, 0) - require.NoError(t, err, "GetPCAndRPC error") - - _, domainCertMap := getAffectedDomainAndCertMapPCAndRPC(rpcList, pcList) - - domainEntriesMap := make(map[projectCommon.SHA256Output]*common.DomainEntry) - - updatedDomains, err := updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) - require.NoError(t, err, "updateDomainEntriesWithRPCAndPC error") - assert.Equal(t, len(updatedDomains), len(domainEntriesMap), "size of domainEntriesMap should be the size of updatedDomains") - - updatedDomains, err = updateDomainEntriesWithRPCAndPC(domainEntriesMap, domainCertMap) - require.NoError(t, err, "updateDomainEntriesWithRPCAndPC error") - assert.Equal(t, 0, len(updatedDomains), "updated domain should be 0") -} - -func includedIn(input []string, searchedString string) bool { - for _, v := range input { - if v == searchedString { - return true - } - } - return false -} diff --git a/pkg/mapserver/updater/testdata/certs/adiq.com.br144.cer b/pkg/mapserver/updater/testdata/certs/adiq.com.br144.cer deleted file mode 100644 index 09d2bc35..00000000 --- a/pkg/mapserver/updater/testdata/certs/adiq.com.br144.cer +++ /dev/null @@ -1,38 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGrjCCBZagAwIBAgIIE/08vT21V3owDQYJKoZIhvcNAQELBQAwgbQxCzAJBgNV -BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRow -GAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjEtMCsGA1UECxMkaHR0cDovL2NlcnRz -LmdvZGFkZHkuY29tL3JlcG9zaXRvcnkvMTMwMQYDVQQDEypHbyBEYWRkeSBTZWN1 -cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IC0gRzIwHhcNMTkwNTI0MTUwMjM0WhcN -MjEwNTI0MTUwMjM0WjA7MSEwHwYDVQQLExhEb21haW4gQ29udHJvbCBWYWxpZGF0 -ZWQxFjAUBgNVBAMMDSouYWRpcS5jb20uYnIwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDQFlWfzZmTZICFcUzaglr7A+fH6lwtfyBPQgHVKxGv/drit1W9 -8NUBlWwkn5PUmEWYDF3gP/HDdN2oGplWdt6HI1Y9IKB3xGFWMrKfY5Z317+omfKC -oCPpUNYVty96wT3m53CB6mglZWywvMYCUuch0BYPh/L9PrGKPOFDVBGCUPA6Dh1z -0r5akhy3iUfT2OvYvpMBsWoUIo8sqD6VnSKAVixpBdRyBukJ7Qf47G1cOGFbK1Uy -D29b3WDUBKGNhGsC2kMG1fH6W1aJgE+4+M5Nqbgzrgf+f5CbsVhM4Q9I3KzZDokC -eMsn1jT8UJf0pThUVW7m/6OqKWCceyFRdx+pAgMBAAGjggM6MIIDNjAMBgNVHRMB -Af8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAOBgNVHQ8BAf8E -BAMCBaAwOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL2NybC5nb2RhZGR5LmNvbS9n -ZGlnMnMxLTExMzEuY3JsMF0GA1UdIARWMFQwSAYLYIZIAYb9bQEHFwEwOTA3Bggr -BgEFBQcCARYraHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9yZXBvc2l0 -b3J5LzAIBgZngQwBAgEwdgYIKwYBBQUHAQEEajBoMCQGCCsGAQUFBzABhhhodHRw -Oi8vb2NzcC5nb2RhZGR5LmNvbS8wQAYIKwYBBQUHMAKGNGh0dHA6Ly9jZXJ0aWZp -Y2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeS9nZGlnMi5jcnQwHwYDVR0jBBgw -FoAUQMK9J47MNIMwojPX+2yz8LQsgM4wJQYDVR0RBB4wHIINKi5hZGlxLmNvbS5i -coILYWRpcS5jb20uYnIwHQYDVR0OBBYEFAMSUloRqH1nuPQxJgWs3YHxqw/cMIIB -fQYKKwYBBAHWeQIEAgSCAW0EggFpAWcAdgCkuQmQtBhYFIe7E6LMZ3AKPDWYBPkb -37jjd80OyA3cEAAAAWrqXXn4AAAEAwBHMEUCIQDdUUQAL8sTLTDpJhajvhRr8H5G -sx8pQECKqRpFYNNe7AIgeGUzC57nXHrZ59GFIy6Oeahnw3C1p89+abNCVh2bbHUA -dQDuS723dc5guuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9ywAAAWrqXX8RAAAEAwBG -MEQCIF5ik8ZpFpuMTbXOEm2PmfwFWRdvDQwyh2cGiCwguOBlAiAr2POWVLFD7uz/ -0cFEuxmg3putQDfQPpkPV0Mz16ulOgB2AESUZS6w7s6vxEAH2Kj+KMDa5oK+2Msx -tT/TM5a1toGoAAABaupdg/0AAAQDAEcwRQIgRPyx0eYTHDAcI0gf4XiRJGAq3EDK -phC8YHe3myjX+acCIQDoa/zVhG7XUsNbV0FPitBnVSNIpuPGFhYfC8wHd1Q92jAN -BgkqhkiG9w0BAQsFAAOCAQEACHuaEL9h0Vp0oYmM+UA2uxLSls+MbCtOjoXfLxvA -6dj1TH1LxoD3WR6sdNJPQZBaAdli3saJdeGfDnCD/oCFyCszLvDlKXrfDjL30rX1 -DpZcAkILIMui8TtOxPy1Uvr6Tw01gcaMXH/XX00DqZbqMGT2eyY4/jmy2U6cd7mW -9Xl//47GGbNtKdTGY79SsKBRiuHJjG0LUmaPlonywc545w0VHTnCrU8maZDlSGtI -RkdjANUKhxmm7sU7VNUiZjTKptwImDuHrsPodnPMx3SnSO7yr9dgqS9QUTd0LzKp -f3rgrxlFbIeE68TEN2BTbTGgzPED7kknhBuRBtVu6C2mmg== ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/angloeastern.com101.cer b/pkg/mapserver/updater/testdata/certs/angloeastern.com101.cer deleted file mode 100644 index f023fa9e..00000000 --- a/pkg/mapserver/updater/testdata/certs/angloeastern.com101.cer +++ /dev/null @@ -1,39 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGvjCCBaagAwIBAgIJAKjUZF9hK539MA0GCSqGSIb3DQEBCwUAMIG0MQswCQYD -VQQGEwJVUzEQMA4GA1UECBMHQXJpem9uYTETMBEGA1UEBxMKU2NvdHRzZGFsZTEa -MBgGA1UEChMRR29EYWRkeS5jb20sIEluYy4xLTArBgNVBAsTJGh0dHA6Ly9jZXJ0 -cy5nb2RhZGR5LmNvbS9yZXBvc2l0b3J5LzEzMDEGA1UEAxMqR28gRGFkZHkgU2Vj -dXJlIENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTE5MDUyMTAyMjIxOFoX -DTIxMDYxMjA5MjcwMVowQDEhMB8GA1UECxMYRG9tYWluIENvbnRyb2wgVmFsaWRh -dGVkMRswGQYDVQQDDBIqLmFuZ2xvZWFzdGVybi5jb20wggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCoAO1rJP5WnHpOALq/+4HfLj67XfY4pxQCzul5BFD/ -0gPHusv0jtTbC/kQBCLssWEsRAvtl8LA7r2GU8C30AgaMYoPy99wHF7LHLM6MrPR -5+D7Z+uoEbYr/R8YEMlih3P1ua83FdH4ipk5YpexF20vY9QLcW4zZl0KqiQ1KZDf -HyQhTKUa9/qFQ//TpHkad55MhQ9VqH3eqRyX4HRnPWZPLuBIOxkUMR3VPL8pSu23 -IKv2rhs9kSVVD0hIGY6/W+a8Sf9M9usANtVqNDy+ed2Y/qmc8/8BeT+7ED6o3PRd -YPon/qa5wC3zDpuG/32yKPaCFBFK0JcdP1yAIViunfHxAgMBAAGjggNEMIIDQDAM -BgNVHRMBAf8EAjAAMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAOBgNV -HQ8BAf8EBAMCBaAwOAYDVR0fBDEwLzAtoCugKYYnaHR0cDovL2NybC5nb2RhZGR5 -LmNvbS9nZGlnMnMxLTExMjQuY3JsMF0GA1UdIARWMFQwSAYLYIZIAYb9bQEHFwEw -OTA3BggrBgEFBQcCARYraHR0cDovL2NlcnRpZmljYXRlcy5nb2RhZGR5LmNvbS9y -ZXBvc2l0b3J5LzAIBgZngQwBAgEwdgYIKwYBBQUHAQEEajBoMCQGCCsGAQUFBzAB -hhhodHRwOi8vb2NzcC5nb2RhZGR5LmNvbS8wQAYIKwYBBQUHMAKGNGh0dHA6Ly9j -ZXJ0aWZpY2F0ZXMuZ29kYWRkeS5jb20vcmVwb3NpdG9yeS9nZGlnMi5jcnQwHwYD -VR0jBBgwFoAUQMK9J47MNIMwojPX+2yz8LQsgM4wLwYDVR0RBCgwJoISKi5hbmds -b2Vhc3Rlcm4uY29tghBhbmdsb2Vhc3Rlcm4uY29tMB0GA1UdDgQWBBSMcKbQvBO0 -89hzbZ+kxYQ5+vZUAjCCAX0GCisGAQQB1nkCBAIEggFtBIIBaQFnAHUApLkJkLQY -WBSHuxOizGdwCjw1mAT5G9+443fNDsgN3BAAAAFq2DJcjAAABAMARjBEAiAYzhEu -nRyNauN5c65kz1XK70qaXTT29sno5Dv06XFSSwIgKNX7M+5O7u1tEt7/f7dQqP3n -0YR8L37xvUr2J9J6pjEAdgDuS723dc5guuFCaR+r4Z5mow9+X7By2IMAxHuJeqj9 -ywAAAWrYMmGdAAAEAwBHMEUCIQD4sNHzY0gjkDMttpo/Biwy4IOpbrh1hRKZyhKR -IZW8tgIgah9WkmopHqqECXY6B56WtweqsWVg/f4J9fVzncSwW7cAdgBElGUusO7O -r8RAB9io/ijA2uaCvtjLMbU/0zOWtbaBqAAAAWrYMmZ5AAAEAwBHMEUCIQCFQmH8 -Iz3pIRfg9mSwW9U1MeIa5kCzMa2JxeB/h5pyxQIgXOdyrtLiSAC7ks5fEGQbvsrT -yZxzBpNTa3tVpNEMUIcwDQYJKoZIhvcNAQELBQADggEBAHkE1zEa+xMXk+IveFWx -gDqIKM+X6XvklQPcp1d/oBgG/0ZzDzJbCfMyoLV+7PMpfjCUtZA/w8MDMQM55xX2 -08Fw3A13m3xFvwb42mlbgvB0GG1xqQeVg2iltyxq5xyZp3UTQriQacJHeF4Pbch5 -WwZzlN/qIT4iC5MZdzNingiNNu+CuEN0Wf+tuLmRwp3zQJ8iDemnqiwuM+9klTQc -piN5nzh7iOS9I7qz+F/AzTmhpo+H/XC3RKXyAzOn/L7WRKWHa7gCaV3ucdCd+j18 -cNDF3Jr/W+6TvfBnVKZtMlnp53ufqwYtqhKhgDkW+gbEstS1smAUp/XMADSIVVp+ -qjo= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/archedata.eu514.cer b/pkg/mapserver/updater/testdata/certs/archedata.eu514.cer deleted file mode 100644 index 5b09724c..00000000 --- a/pkg/mapserver/updater/testdata/certs/archedata.eu514.cer +++ /dev/null @@ -1,41 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIHNTCCBh2gAwIBAgIQA2XkAx47W39k8rDcfEd9DTANBgkqhkiG9w0BAQsFADBg -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMR8wHQYDVQQDExZSYXBpZFNTTCBUTFMgUlNBIENBIEcx -MB4XDTE5MDUyNTAwMDAwMFoXDTIxMDUyNDEyMDAwMFowGTEXMBUGA1UEAwwOKi5h -cmNoZWRhdGEuZXUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCvsOC7 -eJfEry0feM3dY001Uxwt/35LNtRdvgEAFRjTHNN8nPVgsRJklpNyiqfRCctfl7vE -0KhsHO98ChgPVlWKlU0X5e8gZgAf+rUrAXOQlGTSU3aMzGTC69SOQaooNh8wxsdw -ykjyN31VC3MKF/x8fqNXZIhKupOl3WbbPjS+GGTwwuzbDQMJPVhv77uxMB0tKwIL -jIIAMeSR6vQ2LLPCX70ylPkZIA/C3YEd+V5+o+DSL77j3NiEuSUVzmbKpBQ4+xKP -sL8zSo8j9JQ5ecEjsqsDB1+RUIicik7iLilxo6SVCdbndXx4sp9SPgX+P0FZTvxc -7tnNNdd0BLOxF7hX0FO6k6jg1OHQag0oja9o8MgFgd/14JHriuk7SaHxW+jbDcfU -CqWoSH0PgI+jtupI4CUuTZegFbwut9gkjJDZQbcjebXEDmjCFbI9/sFyapDpKArO -1VTz0FZA0JUDc7tnOEWXwlC2CcMtXgIuNRhSreMz7/uxyjFvIN107up1Mb6jxrhm -7olcGMv874X7n9ldmfwtvR4ttWPHHUqnHuF/2Pqppf2ReuVXC7XoevlOr+nTv1Fp -tI1kz7wCerShc+WHk/YJqxuZnogb8RRNIF6xuSEL71V7Bdyrj3KKEEXKEUW+eZZc -BFyNFdivk4872MRKvWxOZ+QHKY8Ru7pGwUPEUQIDAQABo4IDMDCCAywwHwYDVR0j -BBgwFoAUDNtsgkkPSmcKuBTuesRIUojrVjgwHQYDVR0OBBYEFOf/668/e/x9uQEn -0SUW6B8HXV28MCcGA1UdEQQgMB6CDiouYXJjaGVkYXRhLmV1ggxhcmNoZWRhdGEu -ZXUwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD -AjA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY2RwLnJhcGlkc3NsLmNvbS9SYXBp -ZFNTTFRMU1JTQUNBRzEuY3JsMEwGA1UdIARFMEMwNwYJYIZIAYb9bAECMCowKAYI -KwYBBQUHAgEWHGh0dHBzOi8vd3d3LmRpZ2ljZXJ0LmNvbS9DUFMwCAYGZ4EMAQIB -MHYGCCsGAQUFBwEBBGowaDAmBggrBgEFBQcwAYYaaHR0cDovL3N0YXR1cy5yYXBp -ZHNzbC5jb20wPgYIKwYBBQUHMAKGMmh0dHA6Ly9jYWNlcnRzLnJhcGlkc3NsLmNv -bS9SYXBpZFNTTFRMU1JTQUNBRzEuY3J0MAkGA1UdEwQCMAAwggF+BgorBgEEAdZ5 -AgQCBIIBbgSCAWoBaAB2AO5Lvbd1zmC64UJpH6vhnmajD35fsHLYgwDEe4l6qP3L -AAABavD+Qh8AAAQDAEcwRQIgWnh5pNAf6+s2CgVLdoKL9BKnZNpcBt4tO8HelCH3 -6rgCIQDM+f3Dbr+KxE0DEkdfBoIUJojFgUYNpjoJC2/AqbViqAB2AId1v+dZfPiM -Q5lfvfNu/1aNR1Y2/0q1YMG06v9eoIMPAAABavD+QzoAAAQDAEcwRQIhAP4forG0 -ffAjxPYC/0fMVoqoOOBO/HYC7th0+wPtBbJyAiATJBVqCghM0+EnLN55NcuA82ui -1Me+++KJg2o9HSSRIQB2AESUZS6w7s6vxEAH2Kj+KMDa5oK+2MsxtT/TM5a1toGo -AAABavD+Qb8AAAQDAEcwRQIgPvEMNC16lOohoBd7MbleUIVzlaceTO3U+AHARonD -UQECIQDqtt0xHhlNq4tzSIo0TbJO/C9BhGCLWbeqCw9GTSZ/KjANBgkqhkiG9w0B -AQsFAAOCAQEAVlj1abYJGVc5tYYxlnSHmn9xbdP3w+20MaprBLgGTdq6A/iXvGKh -DUMpA8LM8cElWR3qXIKqOSD+fsfvgnQN2rz3eJv+W/k1kh52kzvrmp3pA/uKMr6M -Hne0Chr2pmzBsLsIBe4geasSrDYwlEvmzzj98RqzcKkx9cLTDXQSo22DOFPncxB0 -a90l3mhpKnBl4bStpn7mzRZP6qAIMaxW4rZ72jxtGufNBByCYYu8sTn5NASUrzjH -XwZF+hkNCbU829Hi7rngN3+ZGpSVXu4g43dtj0ijoSlFEgpW+GX4lR8aZOOlFEc/ -4/spmG3oq+Gc2/rt7a28psPuy4fsAKubrA== ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/bijhuis.be463.cer b/pkg/mapserver/updater/testdata/certs/bijhuis.be463.cer deleted file mode 100644 index 172ab733..00000000 --- a/pkg/mapserver/updater/testdata/certs/bijhuis.be463.cer +++ /dev/null @@ -1,37 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGcDCCBVigAwIBAgIQVx+9RvYARCPKtux4Iw7qlDANBgkqhkiG9w0BAQsFADCB -jzELMAkGA1UEBhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4G -A1UEBxMHU2FsZm9yZDEYMBYGA1UEChMPU2VjdGlnbyBMaW1pdGVkMTcwNQYDVQQD -Ey5TZWN0aWdvIFJTQSBEb21haW4gVmFsaWRhdGlvbiBTZWN1cmUgU2VydmVyIENB -MB4XDTE5MDQyODAwMDAwMFoXDTIxMDQyNzIzNTk1OVowWTEhMB8GA1UECxMYRG9t -YWluIENvbnRyb2wgVmFsaWRhdGVkMR0wGwYDVQQLExRQb3NpdGl2ZVNTTCBXaWxk -Y2FyZDEVMBMGA1UEAwwMKi5iaWpodWlzLmJlMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEAjSa+M8XZcfcyXtsv6E77M8gxAZ63fdzVkOkx/JPr+ZQNWMz2 -snvxiKPqArn2dmeZqNxba0cJJnZf0Y9Ik1wF4cdQRGKV0/I8CLfZUeLtlGN5cO1u -vx4wERd1LBVfIubW2O8Ap7gOTCcG/PHanwPb4tx/MxdrwXmCWL/0cH57NqL3OYbu -PUwWymuAF5z6DNB6k48CkGq34U23qN+gXKwXdwNhZVijVOIYfSsImkSZjuj+xasE -wmbV7dPeOWtp95TETygIKMK7K9wZtHZo/64O0WK0XBeqWL0cX/fzF0La6AoTD1R8 -17cXHDAbFghtBZB3F05vVnYOnlXXDmbzoJ/3ZQIDAQABo4IC+zCCAvcwHwYDVR0j -BBgwFoAUjYxexFStiuF36Zv5mwXhuAGNYeEwHQYDVR0OBBYEFHHvrLeokRkJWn1F -GpziBOEDi2yTMA4GA1UdDwEB/wQEAwIFoDAMBgNVHRMBAf8EAjAAMB0GA1UdJQQW -MBQGCCsGAQUFBwMBBggrBgEFBQcDAjBJBgNVHSAEQjBAMDQGCysGAQQBsjEBAgIH -MCUwIwYIKwYBBQUHAgEWF2h0dHBzOi8vc2VjdGlnby5jb20vQ1BTMAgGBmeBDAEC -ATCBhAYIKwYBBQUHAQEEeDB2ME8GCCsGAQUFBzAChkNodHRwOi8vY3J0LnNlY3Rp -Z28uY29tL1NlY3RpZ29SU0FEb21haW5WYWxpZGF0aW9uU2VjdXJlU2VydmVyQ0Eu -Y3J0MCMGCCsGAQUFBzABhhdodHRwOi8vb2NzcC5zZWN0aWdvLmNvbTAjBgNVHREE -HDAaggwqLmJpamh1aXMuYmWCCmJpamh1aXMuYmUwggF/BgorBgEEAdZ5AgQCBIIB -bwSCAWsBaQB2ALvZ37wfinG1k5Qjl6qSe0c4V5UKq1LoGpCWZDaOHtGFAAABamOc -RN0AAAQDAEcwRQIgEels4w+qbU1HmduCq+R7neOXzJkVW/eQW7JUn5Htlt4CIQDh -H9wbxvvIFuIFvJjh6SrufyJVqo+dzk80hcDhyQIR9gB2AESUZS6w7s6vxEAH2Kj+ -KMDa5oK+2MsxtT/TM5a1toGoAAABamOcRdMAAAQDAEcwRQIhAKSOCfNU2tQ+Y4+S -4ygDIbzk7M373WJksn7MFrl0KTl6AiBgstYvZO69XI2CJV6qTR7RCdTDkEF6qzrV -DsxNadoZJAB3AG9Tdqwx8DEZ2JkApFEV/3cVHBHZAsEAKQaNsgiaN9kTAAABamOc -RO4AAAQDAEgwRgIhANyAz/c9tzSpVKUaBgILHuNRmo4IzFgfNJMELKi5v2vIAiEA -wLl2Tv9rtca0tiEEMfS8NL/5f/fFC8iwNzDCjzeIZIowDQYJKoZIhvcNAQELBQAD -ggEBAAOXaudyvoudz+mM7lywhL5OVyHMIa5o/FujsdaqfHoYnM4tynUdXd6GImcZ -T3g/gcrRMSq5ob6vo74GOseoLPHsfuW2LEn/WtLpqBi9zQi1p25U8sDxH/YLZx7j -KpZJL0X7NMH15GiWaMu1oiwbNAJtAgb+udAEl+OB7MU85fO9l6cVJ86gzg+EgQOF -+Bx9qk5gHRRgXur6mpXnDqK3YQaHn9yGiFqSVXP36ddYVC0Ui9eJnxjLJ2GsusF2 -48sd1VTDUb4gnh0GOst/JpjhF30PXqEavcDhUl5oSNhlKIRQfbeDfwF/aNBOwPXq -zGCjsv8uUC1pu2B277ytt6H09Ho= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/brandell.net972.cer b/pkg/mapserver/updater/testdata/certs/brandell.net972.cer deleted file mode 100644 index e97a6d5e..00000000 --- a/pkg/mapserver/updater/testdata/certs/brandell.net972.cer +++ /dev/null @@ -1,23 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDxKADAgECAgwmo/SCQr/BNYKpBggwDQYJKoZIhvcNAQELBQAwTDELMAkGA1UE -BhMCQkUxGTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExIjAgBgNVBAMTGUFscGhh -U1NMIENBIC0gU0hBMjU2IC0gRzIwHhcNMTkwMjEzMTgzMTEyWhcNMjEwNDI4MTMy -OTE0WjA8MSEwHwYDVQQLExhEb21haW4gQ29udHJvbCBWYWxpZGF0ZWQxFzAVBgNV -BAMMDiouYnJhbmRlbGwubmV0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAglx7czeDjLyzQUF8Qz543H30e67iO2LD/SWxU0TO8AspvC0GFEUiWNmSJSEp -O1awj1SPhBb8yx2CJkoBnKlgFzrgbb5pIQDSzOqanH/9Q0jkVIdp/nbVywlhFWEO -6E4sh5fws+i8pa41BWEVKKlzkV3UcNWaJd5sp4gOV2SEbpIRJgncVfq7kWm6FAom -aeOK3yiSgr46fUH6Ke66Yc7qFWSjRDaCiBEMQYaUYvUAf7zUbeJ2dg+rAb/daqlX -NquObvGH1Z9On1r7HqWjB2OZDopKro3tbMKzImiku9Wa7k+X4a3Ju5AaH7EfMvG2 -LjZnOf6z51r9wsrfg/zbnZS1iwIDAQABo4IBzDCCAcgwDgYDVR0PAQH/BAQDAgWg -MIGJBggrBgEFBQcBAQR9MHswQgYIKwYBBQUHMAKGNmh0dHA6Ly9zZWN1cmUyLmFs -cGhhc3NsLmNvbS9jYWNlcnQvZ3NhbHBoYXNoYTJnMnIxLmNydDA1BggrBgEFBQcw -AYYpaHR0cDovL29jc3AyLmdsb2JhbHNpZ24uY29tL2dzYWxwaGFzaGEyZzIwVwYD -VR0gBFAwTjBCBgorBgEEAaAyAQoKMDQwMgYIKwYBBQUHAgEWJmh0dHBzOi8vd3d3 -Lmdsb2JhbHNpZ24uY29tL3JlcG9zaXRvcnkvMAgGBmeBDAECATAJBgNVHRMEAjAA -MD4GA1UdHwQ3MDUwM6AxoC+GLWh0dHA6Ly9jcmwyLmFscGhhc3NsLmNvbS9ncy9n -c2FscGhhc2hhMmcyLmNybDAnBgNVHREEIDAegg4qLmJyYW5kZWxsLm5ldIIMYnJh -bmRlbGwubmV0MB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAdBgNVHQ4E -FgQUmjZ4aQHItBoUTiGrGBvWi6IUwbUwHwYDVR0jBBgwFoAU9c3VPAhQ+WpPOreX -2laD5mnSaPc= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/capsys.ca960.cer b/pkg/mapserver/updater/testdata/certs/capsys.ca960.cer deleted file mode 100644 index f1beaee2..00000000 --- a/pkg/mapserver/updater/testdata/certs/capsys.ca960.cer +++ /dev/null @@ -1,35 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGJzCCBQ+gAwIBAgIQBe+YF+aQWWY0m5/tC5edeDANBgkqhkiG9w0BAQsFADBe -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 -d3cuZGlnaWNlcnQuY29tMR0wGwYDVQQDExRSYXBpZFNTTCBSU0EgQ0EgMjAxODAe -Fw0xOTA1MjQwMDAwMDBaFw0yMTA1MjMxMjAwMDBaMBYxFDASBgNVBAMMCyouY2Fw -c3lzLmNhMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAySV3mh6sVBbD -xv1sKOqNT+kyGvmor7cGEE2Zw4t/YlBmW7mEkjhoBhTw45I0Y3GMUMYzfrDWVC2q -OYTErUbdZI+4mVwmPcVs86hRFE2tGIQTixhBS6TLRkYdCCEEXvbNe9oorOP4+4UK -WzLzV18gwScpYxI+Qy/K2QGsseC6e7jezOqLN8V8Jv9M+fvsq1mAnlfb0Rrjn4VW -DHooXiGNs2KwuoKO6mn2NUit9uT+6j433hnr1jcvdevP7DC+4mJQl+2DDvHnZG2f -o2vECK9vPYkclwoM3v8xKqgJ+X7lJclmJAc3AGQQk45n2mgwpEz0L8A7d7Sjms01 -tDetSy3yqQIDAQABo4IDJzCCAyMwHwYDVR0jBBgwFoAUU8oXWfxrwAMhLxqu5Kqo -HIJW2nUwHQYDVR0OBBYEFNjF74V0WV3Zm9epXKg42rInQQdFMCEGA1UdEQQaMBiC -CyouY2Fwc3lzLmNhggljYXBzeXMuY2EwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQW -MBQGCCsGAQUFBwMBBggrBgEFBQcDAjA+BgNVHR8ENzA1MDOgMaAvhi1odHRwOi8v -Y2RwLnJhcGlkc3NsLmNvbS9SYXBpZFNTTFJTQUNBMjAxOC5jcmwwTAYDVR0gBEUw -QzA3BglghkgBhv1sAQIwKjAoBggrBgEFBQcCARYcaHR0cHM6Ly93d3cuZGlnaWNl -cnQuY29tL0NQUzAIBgZngQwBAgEwdQYIKwYBBQUHAQEEaTBnMCYGCCsGAQUFBzAB -hhpodHRwOi8vc3RhdHVzLnJhcGlkc3NsLmNvbTA9BggrBgEFBQcwAoYxaHR0cDov -L2NhY2VydHMucmFwaWRzc2wuY29tL1JhcGlkU1NMUlNBQ0EyMDE4LmNydDAJBgNV -HRMEAjAAMIIBfQYKKwYBBAHWeQIEAgSCAW0EggFpAWcAdgDuS723dc5guuFCaR+r -4Z5mow9+X7By2IMAxHuJeqj9ywAAAWrrmMCaAAAEAwBHMEUCIQDIR15ahg6KR+Ku -gIJYzjA5K99+glN8iK1BnNYpKFDN2AIgHEGm+T+FL1VtSsWK1Xd8sMDlz3zdqn30 -3zv85hDszQwAdQCHdb/nWXz4jEOZX73zbv9WjUdWNv9KtWDBtOr/XqCDDwAAAWrr -mMG1AAAEAwBGMEQCICxlQXlD7UaPQJigFmzIel9KtqdgqdjjaiWijZwMBeo4AiAO -081e6h1N/oUHeD+mSq2e/kaEEKu4GwrCKmluuBdkbQB2AESUZS6w7s6vxEAH2Kj+ -KMDa5oK+2MsxtT/TM5a1toGoAAABauuYwDkAAAQDAEcwRQIhAIRKje9rkEkl1DLS -AR+ajHnppgteWkIBTgWA4upnYxRqAiBH1r7fKEsZ19FRZWpv669pAFYxcfYY3uyG -zdqZSdlW4TANBgkqhkiG9w0BAQsFAAOCAQEAa182PXoTqtFnUfnYYvoxMMkYUrLG -Qo68XiqNS7XuhU4BxxBj1Vxex0fJPlr9LROeymK644hWar7LnXZJRflt8SIGlUjo -Z8lkl8DssS0ThY//h44sKypsE+XqLRhKJtnMRXT4m0OdqKFF47eq4GTpGHpjJyah -flyQ2PhdetGe6MR+uFM1eni+/HOKnUb1YRfC1BOYU3HwSO68cQVCyccKxTKvuP3k -ro9xYVUxQgdN3QJ7hvIdkAJ5lLj9YdWyJ9rVJBmTzTCXTJlD/ETVtCI1XY6EZf6W -wZgb7RThgSeyFZRznYSMCLt+M+5knMtKkqKsoohYJqTnrlEvTd06xs64+Q== ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/carproof.com961.cer b/pkg/mapserver/updater/testdata/certs/carproof.com961.cer deleted file mode 100644 index 6443df85..00000000 --- a/pkg/mapserver/updater/testdata/certs/carproof.com961.cer +++ /dev/null @@ -1,38 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIGozCCBYugAwIBAgIQDT9AiH3hl8IcwEzpKGufbzANBgkqhkiG9w0BAQsFADBN -MQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMScwJQYDVQQDEx5E -aWdpQ2VydCBTSEEyIFNlY3VyZSBTZXJ2ZXIgQ0EwHhcNMTkwNTAyMDAwMDAwWhcN -MjEwNzAxMTIwMDAwWjBlMQswCQYDVQQGEwJDQTEQMA4GA1UECBMHT250YXJpbzEP -MA0GA1UEBxMGTG9uZG9uMRowGAYDVQQKExFDQVJGQVggQ2FuYWRhIFVMQzEXMBUG -A1UEAwwOKi5jYXJwcm9vZi5jb20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQDIopeANRIIAbNWGpNrsaLEXuZ6BVNMrojSZP3aodWcCvO/uLqcRv0uI4L/ -O+DhkNba2KN5h6969gvTSsFOFTluB7GSpgdp3YnzXkM28e5h5ZWWFdyRqpsEOIBq -64rz/dPxZpcBjBxJs/kHhvgAdjGHo+sYouau57BSCi+1WaDxhuNSe8LLu61Uqi67 -kR/q4cyetXtte1JH7o3xl/v9w7+YXKtWrICcyHuKxNkYrUnfcqzaSuqXDC8b4CUg -Zgr2J6rbM9ohsuA5L33wfuu7ErmLJJ4I/SLrcmpQaswC5PrXVTBVgKCFyEK4FIdm -8391lnkc8FoR6h6EADgR4YVoz1X3AgMBAAGjggNlMIIDYTAfBgNVHSMEGDAWgBQP -gGEcgjFh1S8o541GOLQs4cbZ4jAdBgNVHQ4EFgQUbDVtHST7+/wuQ6yoaFzXP1bz -eK4wJwYDVR0RBCAwHoIOKi5jYXJwcm9vZi5jb22CDGNhcnByb29mLmNvbTAOBgNV -HQ8BAf8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGsGA1Ud -HwRkMGIwL6AtoCuGKWh0dHA6Ly9jcmwzLmRpZ2ljZXJ0LmNvbS9zc2NhLXNoYTIt -ZzYuY3JsMC+gLaArhilodHRwOi8vY3JsNC5kaWdpY2VydC5jb20vc3NjYS1zaGEy -LWc2LmNybDBMBgNVHSAERTBDMDcGCWCGSAGG/WwBATAqMCgGCCsGAQUFBwIBFhxo -dHRwczovL3d3dy5kaWdpY2VydC5jb20vQ1BTMAgGBmeBDAECAjB8BggrBgEFBQcB -AQRwMG4wJAYIKwYBBQUHMAGGGGh0dHA6Ly9vY3NwLmRpZ2ljZXJ0LmNvbTBGBggr -BgEFBQcwAoY6aHR0cDovL2NhY2VydHMuZGlnaWNlcnQuY29tL0RpZ2lDZXJ0U0hB -MlNlY3VyZVNlcnZlckNBLmNydDAMBgNVHRMBAf8EAjAAMIIBfgYKKwYBBAHWeQIE -AgSCAW4EggFqAWgAdQC72d+8H4pxtZOUI5eqkntHOFeVCqtS6BqQlmQ2jh7RhQAA -AWp54OO8AAAEAwBGMEQCIFf8iRDiDWRDbYO3e3DagBu0ZlCIzYslu20HnkUtUF38 -AiAfdJvG5Sp6cVlcOWHyYsK1JueGwZwEEEplkdmahbou1wB2AId1v+dZfPiMQ5lf -vfNu/1aNR1Y2/0q1YMG06v9eoIMPAAABanng4CMAAAQDAEcwRQIhAL5E4SjI4BVj -4B4B3ekuhF2whrcT40yaYBW92nCtX7cVAiBx1GFO953tPmnkaPgAfkByp0U1Tl0w -bvP4SCja/watHAB3AESUZS6w7s6vxEAH2Kj+KMDa5oK+2MsxtT/TM5a1toGoAAAB -anng3qYAAAQDAEgwRgIhAMXGTPhy/xHaNaPSSQULIMBLzhlGxpRTfh1Wv7VZeXrj -AiEArOxaVoC0Nv2z06gBk+Gf18kkeyxD9F96yt111lpXRrcwDQYJKoZIhvcNAQEL -BQADggEBAFWsfQvUOSKFzs7tnkb0lYK0Hj3VyY3GaOBIwyH/aEjMOLbfcDM3glNO -yerAw7WNkILbmJ7uAvgUZIRKfC6GUKMTqUAvHac27JNjZ8U8b+1hDLddCX96kB6I -t8h4L4HTfo4vskpLd9ki9z7AttPUZXn1AFUWUlCEbbynQSrKFSjq7KfbdLbX3cVt -w7J8ZOVcBaBxANhcg1/f1qN5xR0Yp0/gtaY5G5fN3bMk0nHGHHYqG0xbonNPkTgb -R0TVwbYtdNzvdX68G8OTigZNC8ld+Jg1RqNmUV87yR1niHopvVHK4IzxsrY7KVk1 -XVeB6DfXUAquPj8zimefQZBU4QwB2KA= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/dev2.mortgagebotlos.com82.cer b/pkg/mapserver/updater/testdata/certs/dev2.mortgagebotlos.com82.cer deleted file mode 100644 index d71181ba..00000000 --- a/pkg/mapserver/updater/testdata/certs/dev2.mortgagebotlos.com82.cer +++ /dev/null @@ -1,25 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEJ6ADAgECAhANf1YkBC64IwAAAABQ8j0cMA0GCSqGSIb3DQEBCwUAMIG6MQsw -CQYDVQQGEwJVUzEWMBQGA1UEChMNRW50cnVzdCwgSW5jLjEoMCYGA1UECxMfU2Vl -IHd3dy5lbnRydXN0Lm5ldC9sZWdhbC10ZXJtczE5MDcGA1UECxMwKGMpIDIwMTIg -RW50cnVzdCwgSW5jLiAtIGZvciBhdXRob3JpemVkIHVzZSBvbmx5MS4wLAYDVQQD -EyVFbnRydXN0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gTDFLMB4XDTE5MDUy -NzE5NTgwM1oXDTIxMDUyNzIwMjgwMVowaDELMAkGA1UEBhMCR0IxDzANBgNVBAcT -BkxvbmRvbjEkMCIGA1UEChMbRmluYXN0cmEgR3JvdXAgSG9sZGluZ3MgTHRkMSIw -IAYDVQQDDBkqLmRldjIubW9ydGdhZ2Vib3Rsb3MuY29tMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAvOJN0m655UE8HZXFiKoXqraN/mzIQ3K7oQRHkD1q -RL/nM0BzU8XBS6NcdzQcV44XDTmVjahkBzWsofvofpTFNfweMZQCGHRkV8pTA666 -Dt158klGp1LiTFfjnU39Rm0AtlTZna43xgpex6+AgxruiQhnKAXsn5XkGZVe+xMp -k7VmncDs8uIaBhN8U980qmmg7vkOLlfVynWaAQYGNM2bJnGMtjsSpP7oKSNcE0xW -vSy0w+o/OoGiwQ1cRqRkTKPh2mstzgDijIMwEszZMj0cX8DCKoyokiGuNWgRYkKa -UvYj08YOe4G0TvtMI5k4oyXB9aY72S89D4HDCDnGeU2e7QIDAQABo4IBkDCCAYww -JAYDVR0RBB0wG4IZKi5kZXYyLm1vcnRnYWdlYm90bG9zLmNvbTAOBgNVHQ8BAf8E -BAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDMGA1UdHwQsMCow -KKAmoCSGImh0dHA6Ly9jcmwuZW50cnVzdC5uZXQvbGV2ZWwxay5jcmwwSwYDVR0g -BEQwQjA2BgpghkgBhvpsCgEFMCgwJgYIKwYBBQUHAgEWGmh0dHA6Ly93d3cuZW50 -cnVzdC5uZXQvcnBhMAgGBmeBDAECAjBoBggrBgEFBQcBAQRcMFowIwYIKwYBBQUH -MAGGF2h0dHA6Ly9vY3NwLmVudHJ1c3QubmV0MDMGCCsGAQUFBzAChidodHRwOi8v -YWlhLmVudHJ1c3QubmV0L2wxay1jaGFpbjI1Ni5jZXIwHwYDVR0jBBgwFoAUgqJw -dN28Uz/Pe9T3zX+nYMYKTL8wHQYDVR0OBBYEFJMYYC7twHdvOg5Jjv2EJos6O3tP -MAkGA1UdEwQCMAA= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/dfs.core.windows.net859.cer b/pkg/mapserver/updater/testdata/certs/dfs.core.windows.net859.cer deleted file mode 100644 index 95391394..00000000 --- a/pkg/mapserver/updater/testdata/certs/dfs.core.windows.net859.cer +++ /dev/null @@ -1,58 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIKZqADAgECAhN7AATCqjimgb30qf8sAAAABMKqMA0GCSqGSIb3DQEBCwUAMIGL -MQswCQYDVQQGEwJVUzETMBEGA1UECBMKV2FzaGluZ3RvbjEQMA4GA1UEBxMHUmVk -bW9uZDEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMRUwEwYDVQQLEwxN -aWNyb3NvZnQgSVQxHjAcBgNVBAMTFU1pY3Jvc29mdCBJVCBUTFMgQ0EgMTAeFw0x -OTA0MjcyMjIzNDBaFw0yMTA0MjcyMjIzNDBaMCExHzAdBgNVBAMMFiouZGZzLmNv -cmUud2luZG93cy5uZXQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDc -Sv775svKs6EyaFT/23Ja8vreK1TWuTXYVWP3h6NPFcl7EUcBIfZ3twO5rsgBeeYq -d0ZJ4h5KgTZWZ0+N+2R7da55l4KLVMR0+xIN94eTqrYnDE3NHGun6q14Fn1ZtRQ7 -TwED41WG6NIs2m2+4Bwsg6VC4hbnA9jcu/UuMZ/+Wq+rHjGj1+doRGNuZAl0qstd -WrJA1DHfOXcNkcQEAWE6yhoCnTD6Cf4QVFBAPh0RcmgaRFGK4baiLWuCtpzGbvR5 -OQlAkwv/4GVRSko7GByO7bCIHM8wdjoTzyRPp6SFPpoI8SeroM5SNVvZjTM+HB// -NLMGaScveiqXuVwrizaxAgMBAAGjgghCMIIIPjAnBgkrBgEEAYI3FQoEGjAYMAoG -CCsGAQUFBwMCMAoGCCsGAQUFBwMBMD4GCSsGAQQBgjcVBwQxMC8GJysGAQQBgjcV -CIfahnWD7tkBgsmFG4G1nmGF9OtggV2E0t9CgueTegIBZAIBHTCBhQYIKwYBBQUH -AQEEeTB3MFEGCCsGAQUFBzAChkVodHRwOi8vd3d3Lm1pY3Jvc29mdC5jb20vcGtp -L21zY29ycC9NaWNyb3NvZnQlMjBJVCUyMFRMUyUyMENBJTIwMS5jcnQwIgYIKwYB -BQUHMAGGFmh0dHA6Ly9vY3NwLm1zb2NzcC5jb20wHQYDVR0OBBYEFOUTS3/c6OmG -suU/OPTii7x/TtfBMAsGA1UdDwQEAwIEsDCCBd8GA1UdEQSCBdYwggXSghYqLmRm -cy5jb3JlLndpbmRvd3MubmV0ghcqLmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIaKi56 -MS5kZnMuc3RvcmFnZS5henVyZS5uZXSCGiouejIuZGZzLnN0b3JhZ2UuYXp1cmUu -bmV0ghoqLnozLmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIaKi56NC5kZnMuc3RvcmFn -ZS5henVyZS5uZXSCGiouejUuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghoqLno2LmRm -cy5zdG9yYWdlLmF6dXJlLm5ldIIaKi56Ny5kZnMuc3RvcmFnZS5henVyZS5uZXSC -GiouejguZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghoqLno5LmRmcy5zdG9yYWdlLmF6 -dXJlLm5ldIIbKi56MTAuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLnoxMS5kZnMu -c3RvcmFnZS5henVyZS5uZXSCGyouejEyLmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIb -Ki56MTMuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLnoxNC5kZnMuc3RvcmFnZS5h -enVyZS5uZXSCGyouejE1LmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MTYuZGZz -LnN0b3JhZ2UuYXp1cmUubmV0ghsqLnoxNy5kZnMuc3RvcmFnZS5henVyZS5uZXSC -GyouejE4LmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MTkuZGZzLnN0b3JhZ2Uu -YXp1cmUubmV0ghsqLnoyMC5kZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejIxLmRm -cy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MjIuZGZzLnN0b3JhZ2UuYXp1cmUubmV0 -ghsqLnoyMy5kZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejI0LmRmcy5zdG9yYWdl -LmF6dXJlLm5ldIIbKi56MjUuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLnoyNi5k -ZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejI3LmRmcy5zdG9yYWdlLmF6dXJlLm5l -dIIbKi56MjguZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLnoyOS5kZnMuc3RvcmFn -ZS5henVyZS5uZXSCGyouejMwLmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MzEu -ZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLnozMi5kZnMuc3RvcmFnZS5henVyZS5u -ZXSCGyouejMzLmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MzQuZGZzLnN0b3Jh -Z2UuYXp1cmUubmV0ghsqLnozNS5kZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejM2 -LmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56MzcuZGZzLnN0b3JhZ2UuYXp1cmUu -bmV0ghsqLnozOC5kZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejM5LmRmcy5zdG9y -YWdlLmF6dXJlLm5ldIIbKi56NDAuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLno0 -MS5kZnMuc3RvcmFnZS5henVyZS5uZXSCGyouejQyLmRmcy5zdG9yYWdlLmF6dXJl -Lm5ldIIbKi56NDMuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLno0NC5kZnMuc3Rv -cmFnZS5henVyZS5uZXSCGyouejQ1LmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56 -NDYuZGZzLnN0b3JhZ2UuYXp1cmUubmV0ghsqLno0Ny5kZnMuc3RvcmFnZS5henVy -ZS5uZXSCGyouejQ4LmRmcy5zdG9yYWdlLmF6dXJlLm5ldIIbKi56NDkuZGZzLnN0 -b3JhZ2UuYXp1cmUubmV0ghsqLno1MC5kZnMuc3RvcmFnZS5henVyZS5uZXQwgawG -A1UdHwSBpDCBoTCBnqCBm6CBmIZLaHR0cDovL21zY3JsLm1pY3Jvc29mdC5jb20v -cGtpL21zY29ycC9jcmwvTWljcm9zb2Z0JTIwSVQlMjBUTFMlMjBDQSUyMDEuY3Js -hklodHRwOi8vY3JsLm1pY3Jvc29mdC5jb20vcGtpL21zY29ycC9jcmwvTWljcm9z -b2Z0JTIwSVQlMjBUTFMlMjBDQSUyMDEuY3JsME0GA1UdIARGMEQwQgYJKwYBBAGC -NyoBMDUwMwYIKwYBBQUHAgEWJ2h0dHA6Ly93d3cubWljcm9zb2Z0LmNvbS9wa2kv -bXNjb3JwL2NwczAfBgNVHSMEGDAWgBRYiJ/W3JxIIrcUPv+EiOjmhf/6fTAdBgNV -HSUEFjAUBggrBgEFBQcDAgYIKwYBBQUHAwE= ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/certs/efsrecrute.fr620.cer b/pkg/mapserver/updater/testdata/certs/efsrecrute.fr620.cer deleted file mode 100644 index d4ed676c..00000000 --- a/pkg/mapserver/updater/testdata/certs/efsrecrute.fr620.cer +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIEXKADAgECAhAx3SuuxaNjMIFtz7q0alybMA0GCSqGSIb3DQEBCwUAMFoxCzAJ -BgNVBAYTAkZSMRMwEQYDVQQKEwpDZXJ0aW5vbWlzMRgwFgYDVQRhEw9OVFJGUi00 -MzM5OTg5MDMxHDAaBgNVBAMTE0NlcnRpbm9taXMgLSBXZWIgQ0EwHhcNMTkwNTI3 -MTUxODAwWhcNMjEwNTI2MTUxODAwWjCBvDELMAkGA1UEBhMCRlIxCzAJBgNVBAgM -AjkzMRQwEgYDVQQHDAtTYWludCBEZW5pczEnMCUGA1UECgweRVRBQkxJU1NFTUVO -VCBGUkFOQ0FJUyBEVSBTQU5HMRgwFgYDVQRhDA9OVFJGUi00Mjg4MjI4NTIxFzAV -BgNVBAsMDjAwMDIgNDI4ODIyODUyMRQwEgYDVQQFEws2Mzk3MEpOTTMyNjEYMBYG -A1UEAwwPKi5lZnNyZWNydXRlLmZyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA7iH40kPm06gwN9yTwXgVMYg5MDJHyi8y8AMraZlElhVeGPNm52HsgGSk -MDqlurhHGy0hDfM506VnUk7Bxt8z77CkVqRZlw+nCA52RPLIAGDyXSucwNJ7p2VB -zxB1axieMqb+bIWeLAR8tE5Q+vYx8WW1pYU6wTPhs9mHbV1ko/jU9ivUohHVb2x8 -dDDvsYDu9YjEqNr4fp8ryEp4gmEJyVLPuVEu0cpHQ4xaJm7MLRcwSK13JiZDZq5x -qQ0SSao7gc+tzg7d8dXfL/UMmZFmbW5ojth0QcKazHxCvXLOVBDa0TqEogqjlP5R -c+XBaen2OfO8oACljCyDPHVwChv/UQIDAQABo4IB0TCCAc0wewYIKwYBBQUHAQEE -bzBtMD4GCCsGAQUFBzAChjJodHRwOi8vd3d3LmNlcnRpbm9taXMuY29tL3B1Ymxp -L2Nlci9hYy1lamItd2ViLmNlcjArBggrBgEFBQcwAYYfaHR0cDovL29jc3AtcGtp -LmNlcnRpbm9taXMuY29tLzAdBgNVHQ4EFgQUzAYiiwUYGGvcOv5MPW+JaPbBwN8w -DAYDVR0TAQH/BAIwADAfBgNVHSMEGDAWgBTvZvBxkr3ZJYVUF5QV242zgJSyaDB2 -BgNVHSAEbzBtMFcGCiqBegFWAgYEPQEwSTBHBggrBgEFBQcCARY7aHR0cHM6Ly93 -d3cuY2VydGlub21pcy5mci9kb2N1bWVudHMtZXQtbGllbnMvbm9zLXBvbGl0aXF1 -ZXMwCAYGZ4EMAQICMAgGBgQAj3oBBzA9BgNVHR8ENjA0MDKgMKAuhixodHRwOi8v -d3d3LmNlcnRpbm9taXMuY29tL2NybC9hYy1lamItd2ViLmNybDAOBgNVHQ8BAf8E -BAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwIGCCsGAQUFBwMBMBoGA1UdEQQTMBGC -DyouZWZzcmVjcnV0ZS5mcg== ------END CERTIFICATE----- diff --git a/pkg/mapserver/updater/testdata/domain_list/domains.txt b/pkg/mapserver/updater/testdata/domain_list/domains.txt deleted file mode 100644 index 652446a1..00000000 --- a/pkg/mapserver/updater/testdata/domain_list/domains.txt +++ /dev/null @@ -1,1049 +0,0 @@ -google.com -facebook.com -doubleclick.net -google-analytics.com -akamaihd.net -googlesyndication.com -googleapis.com -googleadservices.com -facebook.net -youtube.com -twitter.com -scorecardresearch.com -microsoft.com -ytimg.com -googleusercontent.com -apple.com -msftncsi.com -2mdn.net -googletagservices.com -adnxs.com -yahoo.com -serving-sys.com -akadns.net -bluekai.com -ggpht.com -rubiconproject.com -verisign.com -addthis.com -crashlytics.com -amazonaws.com -quantserve.com -akamaiedge.net -live.com -googletagmanager.com -revsci.net -adadvisor.net -openx.net -digicert.com -pubmatic.com -agkn.com -instagram.com -mathtag.com -gmail.com -rlcdn.com -linkedin.com -yahooapis.com -chartbeat.net -twimg.com -turn.com -crwdcntrl.net -demdex.net -betrad.com -flurry.com -newrelic.com -yimg.com -youtube-nocookie.com -exelator.com -acxiom-online.com -imrworldwide.com -amazon.com -fbcdn.net -windowsupdate.com -mookie1.com -rfihub.com -omniroot.com -adsrvr.org -nexac.com -bing.com -skype.com -godaddy.com -sitescout.com -tubemogul.com -contextweb.com -w55c.net -chartbeat.com -akamai.net -jquery.com -adap.tv -criteo.com -krxd.net -optimizely.com -macromedia.com -comodoca.com -casalemedia.com -pinterest.com -adsymptotic.com -symcd.com -atwola.com -adobe.com -msn.com -adsafeprotected.com -tapad.com -truste.com -symantecliveupdate.com -atdmt.com -t.co -avast.com -google.co.in -spotxchange.com -tidaltv.com -adtechus.com -everesttech.net -addthisedge.com -hola.org -btrll.com -gwallet.com -liverail.com -windows.com -burstnet.com -disqus.com -nr-data.net -p-td.com -geotrust.com -admob.com -crittercism.com -bizographics.com -ru4.com -wtp101.com -ksmobile.com -msads.net -thawte.com -lijit.com -cloudflare.com -360yield.com -dropbox.com -simpli.fi -smartadserver.com -globalsign.com -mlnadvertising.com -chango.com -connexity.net -moatads.com -s-msn.com -entrust.net -tribalfusion.com -domdex.com -google.com.tr -whatsapp.net -ntp.org -amazon-adsystem.com -viber.com -disquscdn.com -yandex.ru -doubleverify.com -bkrtx.com -criteo.net -outbrain.com -questionmarket.com -adform.net -yieldmanager.com -typekit.net -goo.gl -voicefive.com -owneriq.net -media6degrees.com -tynt.com -symcb.com -advertising.com -audienceiq.com -wp.com -rtbidder.net -wikipedia.org -adroll.com -icloud.com -gravatar.com -collective-media.net -appsflyer.com -dmtry.com -blogger.com -taboola.com -legolas-media.com -images-amazon.com -afy11.net -aspnetcdn.com -hike.in -feedburner.com -bootstrapcdn.com -usertrust.com -adgrx.com -brilig.com -sharethis.com -flashtalking.com -mediaplex.com -eqads.com -adscale.de -imgur.com -edgesuite.net -blogspot.com -msocsp.com -wikimedia.org -ssl-images-amazon.com -amung.us -flickr.com -rundsp.com -trouter.io -edgekey.net -rfihub.net -utorrent.com -thebrighttag.com -eyeviewads.com -switchads.com -tiqcdn.com -mozilla.org -jwpcdn.com -exponential.com -abmr.net -nanigans.com -zenoviaexchange.com -aolcdn.com -licdn.com -mixpanel.com -254a.com -mopub.com -creative-serving.com -statcounter.com -jwpltx.com -parse.com -ensighten.com -adtech.de -brightcove.com -acuityplatform.com -gfx.ms -ixiaa.com -reddit.com -visualrevenue.com -google.com.br -stickyadstv.com -google.it -yashi.com -jumptap.com -interclick.com -tapjoyads.com -globalsign.net -eyereturn.com -pointroll.com -googlevideo.com -virtualearth.net -gumgum.com -triggit.com -tumblr.com -gigya.com -teamviewer.com -insightexpressai.com -msecnd.net -gemius.pl -oracle.com -sonobi.com -fastclick.net -ebay.com -adobetag.com -surveymonkey.com -stumbleupon.com -admaym.com -invitemedia.com -superfish.com -google.com.vn -yahoodns.net -tapjoy.com -blogblog.com -mxpnl.com -omtrdc.net -skimresources.com -akamai.com -adobedtm.com -starfieldtech.com -skypeassets.com -a.com -btstatic.com -researchnow.com -conviva.com -hotmail.com -bittorrent.com -openbittorrent.com -vindicosuite.com -duba.net -publicbt.com -impact-ad.jp -netflix.com -ib-ibi.com -smaato.net -netsolssl.com -fetchback.com -appspot.com -vk.com -mozilla.com -accu-weather.com -yieldmanager.net -yadro.ru -histats.com -netseer.com -creativecommons.org -live.net -vizu.com -youtu.be -kau.li -eyeota.net -weather.com -provenpixel.com -veruta.com -umengcloud.com -paypal.com -office365.com -simplereach.com -ooyala.com -specificclick.net -digg.com -google.ca -dotomi.com -netmng.com -undertone.com -erne.co -staticflickr.com -urbanairship.com -adkmob.com -pro-market.net -dtscout.com -imdb.com -mzstatic.com -alexa.com -fastly.net -baidu.com -brealtime.com -amazon.co.uk -midasplayer.com -bugsense.com -outlook.com -chartboost.com -adrta.com -adcash.com -root-servers.net -adtilt.com -awstls.com -fwmrm.net -cdninstagram.com -adsonar.com -zedo.com -demonii.com -vimeo.com -dianxinos.com -adventori.com -accuweather.com -steamstatic.com -coull.com -mxptint.net -pfx.ms -footprint.net -ceipmsn.com -paypalobjects.com -taboolasyndication.com -umeng.com -altitude-arena.com -webtrendslive.com -dl-rms.com -visualwebsiteoptimizer.com -mydas.mobi -cap-mii.net -naver.jp -avg.com -wordpress.com -pinimg.com -livefyre.com -tabwpm.us -maxymiser.net -wordpress.org -ebayimg.com -gravity.com -huffingtonpost.com -exoclick.com -pandora.com -reson8.com -grvcdn.com -aol.com -adcolony.com -adhigh.net -eset.com -trustwave.com -cnn.com -cxense.com -lfstmedia.com -xboxlive.com -vungle.com -a3cloud.net -dailymotion.com -postrelease.com -duapp.com -king.com -mailshell.net -pingdom.net -lenovomm.com -dyntrk.com -kaspersky-labs.com -jwpsrv.com -nsatc.net -soundcloud.com -vimeocdn.com -theviilage.com -hlserve.com -wdgserv.com -inmobi.com -bbc.co.uk -kaspersky.com -spotxcdn.com -norton.com -nytimes.com -crsspxl.com -liveperson.net -amgdgt.com -amazon.in -amazon.de -adotube.com -go.com -samsungosp.com -parsely.com -windowsphone.com -heias.com -amazon.it -washingtonpost.com -ospserver.net -mscimg.com -google.co.uk -mzl.la -pswec.com -media.net -v0cdn.net -supercell.net -visadd.com -andomedia.com -mdotlabs.com -adformdsp.net -wikimediafoundation.org -alenty.com -zergnet.com -sundaysky.com -amazon.ca -mediawiki.org -datafastguru.info -vidible.tv -adzerk.net -brand-server.com -quantcount.com -flipboard.com -dtmpub.com -spongecell.com -tinyurl.com -clkmon.com -bing.net -adlegend.com -adblockplus.org -dvtps.com -p-cdn.com -mailchimp.com -wikidata.org -icio.us -ebaystatic.com -viglink.com -ibook.info -itools.info -thinkdifferent.us -airport.us -appleiphonecell.com -hwcdnlb.net -effectivemeasure.net -amazon.fr -iponweb.net -mbamupdates.com -foxnews.com -fiksu.com -dlqm.net -ozonemedia.com -zenfs.com -deliads.com -yieldlab.net -sail-horizon.com -applovin.com -nspmotion.com -metrigo.com -pulsemgr.com -visiblemeasures.com -revenuemantra.com -smartclip.net -ijinshan.com -tndmnsha.com -go-mpulse.net -relestar.com -amazon.co.jp -jollywallet.com -trafficmanager.net -imgfarm.com -opera-mini.net -cogocast.net -onenote.com -amazon.es -opendns.com -p161.net -a-msedge.net -cpmstar.com -amazon.com.br -logmein.com -nflximg.net -univide.com -tekblue.net -infostatsvc.com -udmserve.net -basebanner.com -zynga.com -amazon.cn -mathads.com -amazon.com.au -mediade.sk -atemda.com -d41.co -amazon.com.mx -airpush.com -ksmobile.net -geogslb.com -goodreads.com -monetate.net -clicktale.net -richrelevance.com -tns-counter.ru -coremetrics.com -online-metrix.net -rs6.net -xingcloud.com -generalmobi.com -uservoice.com -herokuapp.com -adblade.com -svcmot.com -shopbop.com -z5x.net -optmd.com -dropboxusercontent.com -fbsbx.com -turner.com -onclickads.net -bookdepository.com -bluecava.com -adtimaserver.vn -beringmedia.com -choicestream.com -zanox.com -apsalar.com -realmedia.com -dpclk.com -cedexis.com -scanscout.com -display-trk.com -bitmedianetwork.com -ctnsnet.com -tunigo.com -samsung.com -bazaarvoice.com -ebayrtm.com -returnpath.net -walmart.com -wsod.com -constantcontact.com -getclicky.com -localytics.com -ligatus.com -appier.net -dxsvr.com -myhabit.com -ajaxcdn.org -adyapper.com -nist.gov -neulion.com -edgecastcdn.net -convertro.com -vnexpress.net -javafx.com -thepiratebay.org -skype.net -kontagent.net -newsinc.com -glpals.com -ebz.io -audible.com -mobogenie.com -dingaling.ca -nrcdn.com -stumble-upon.com -backupgrid.net -po.st -marinsm.com -nflximg.com -adizio.com -acx.com -fyre.co -admedo.com -xvideos.com -junglee.com -evernote.com -createspace.com -buzzfeed.com -zing.vn -sanasecurity.com -igexin.com -bnmla.com -liadm.com -usatoday.com -scanalert.com -espncdn.com -metamx.com -plexop.net -optimatic.com -medyanetads.com -w3.org -apnanalytics.com -gezinti.com -dpreview.com -xbox.com -servesharp.net -cpxinteractive.com -adsparc.net -cardlytics.com -dailymail.co.uk -redditstatic.com -sociomantic.com -contentabc.com -admost.com -inmobicdn.net -3g.cn -miisolutions.net -nrelate.com -innovid.com -nola.com -testflightapp.com -teads.tv -fool.com -tripadvisor.com -al.com -cloudapp.net -public-trust.com -vine.co -mlive.com -cleveland.com -tp-cdn.com -addtoany.com -sharethrough.com -clickfuse.com -nj.com -abebooks.com -batanga.net -mediavoice.com -wsodcdn.com -bloomberg.com -ucweb.com -fonts.com -videohub.tv -spotify.com -alicdn.com -cdngc.net -groupon.com -afterschool.com -symantec.com -oregonlive.com -apptimize.com -trafficfactory.biz -ibillboard.com -vizury.com -qservz.com -perfectmarket.com -yieldoptimizer.com -ad4game.com -ask.com -networkhm.com -amazonlocal.com -zappos.com -diapers.com -adtricity.com -ml314.com -yldbt.com -plexop.com -bbb.org -tworismo.com -amazonsupply.com -beautybar.com -theguardian.com -myhomemsn.com -nvidia.com -comixology.com -bookworm.com -huffpost.com -vcmedia.vn -casa.com -woot.com -eastdane.com -answers.com -infolinks.com -fabric.com -lphbs.com -rpxnow.com -ovi.com -dlinksearch.com -adlooxtracking.com -soap.com -mail.ru -look.com -microsoftonline.com -wag.com -dyndns.org -pennlive.com -nbcnews.com -yoyo.com -zopim.com -collserve.com -vine.com -gpsonextra.net -tacoda.net -trusteer.com -yahoo.net -toolbarservices.com -bluelithium.com -sun.com -33across.com -ipinfo.io -iasds01.com -longtailvideo.com -typography.com -6pm.com -ptvcdn.net -adf.ly -kissmetrics.com -ccc.de -c3tag.com -safemovedm.com -tango.me -bbc.com -syracuse.com -dashbida.com -gvt1.com -admicro.vn -sascdn.com -r1-cdn.net -everestjs.net -craigslist.org -llnwd.net -thanksearch.com -iegallery.com -typekit.com -visualdna.com -angsrvr.com -tenmarks.com -mediaforge.com -telegraph.co.uk -myspace.com -lastpass.com -steampowered.com -startssl.com -ipinyou.com -fonts.net -goo.mx -google.com.mx -tr553.com -5min.com -tfxiq.com -korrelate.net -alibaba.com -mininova.org -ebaydesc.com -desync.com -compete.com -kochava.com -kaltura.com -bleacherreport.com -buscape.com.br -flite.com -swisssign.net -yieldmo.com -content.ad -github.com -wsj.com -opera.com -grouponcdn.com -aliunicorn.com -solocpm.com -nav-links.com -crtinv.com -hiro.tv -opendsp.com -windows.net -dmcdn.net -wii.com -farlex.com -smartstream.tv -yandex.net -masslive.com -blogher.org -jccjd.com -beanstock.co -weatherbug.com -intellitxt.com -bidtheatre.com -mmondi.com -linkedinlabs.com -acrobat.com -nokia.com -levexis.com -cbsi.com -adsplats.com -perfectaudience.com -admarvel.com -performgroup.com -liveinternet.ru -zyngawithfriends.com -bankrate.com -24h.com.vn -trafficjunky.net -cedexis.net -janrain.com -geforce.com -tacdn.com -eonline.com -smarturl.it -impdesk.com -internapcdn.net -umeng.co -sekindo.com -steamcommunity.com -riotgames.com -wunderground.com -nextadvisor.com -reuters.com -vibrant.co -blackberry.com -hwcdn.net -tremormedia.com -netgear.com -fncstatic.com -google.com.eg -ebdr3.com -revcontent.com -businessinsider.com -prfct.co -iperceptions.com -c8.net.ua -taobao.com -delicious.com -247realmedia.com -imwx.com -active-agent.com -supersonicads.com -realtime.co -kill123.com -phncdn.com -redditmedia.com -thepostgame.com -h33t.com -a9.com -foursquare.com -milliyet.com.tr -4dsply.com -upwpm.us -csze.com -mediaquark.com -tritondigital.com -mozilla.net -fidelity-media.com -dmca.com -greystripe.com -cafemom.com -mapticket.net -xhamster.com -ow.ly -maxmind.com -avira.com -webspectator.com -marketo.net -vlingo.com -iesnare.com -qwapi.com -rarbg.com -twitch.tv -myfonts.net -aws-protocol-testing.com -cb-cdn.com -segment.io -adnetwork.vn -qq.com -kik.com -technoratimedia.com -res-x.com -samsungapps.com -lenovo.com -americanexpress.com -htc.com -android.com -apnstatic.com -bounceexchange.com -tumri.net -theplatform.com -olark.com -cnbc.com -thespatialists.com -shareaholic.com -specificmedia.com -sharedaddomain.com -jquerytools.org -microadinc.com -clashofclans.com -roku.com -qualtrics.com -thescene.com -medialytics.com -mashable.com -cubecdn.net -360game.vn -estara.com -kiip.me -aliexpress.com -dailyofferservice.com -uol.com.br -adk2.co -aliimg.com -tentaculos.net -jsuol.com -attracto.com -corom.vn -dessaly.com -sgiggle.com -mobileapptracking.com -office.com -linkwithin.com -latimes.com -cbsnews.com -eclick.vn -glbimg.com -epicunitscan.info -avira-update.com -hoptopboy.com -tvlsvc.com -tailtarget.com -desk.com -intentiq.com -ero-advertising.com -imguol.com -everyscreenmedia.com -bbci.co.uk -itunes.com -engadget.com -people.com -dsply.com -voga360.com -hmageo.com -337play.com -gannett-cdn.com -rcsadv.it -manage.com -cachefly.net -doublepimp.com -keen.io -ea.com -reklamport.com -shopping.com -youradexchange.com -hp.com -apptentive.com -earthnetworks.com -nfl.com -userdmp.com -yastatic.net -google.de -apxlv.com -moneynews.com -livechatinc.com -forbes.com -pornhub.com -sbal4kp.com -wsoddata.com -logmein-gateway.com -facdn.com -yldmgrimg.net -hurriyet.com.tr -lucidmedia.com -doracdn.com -indeed.com -disneytermsofuse.com -truecaller.com -time.com -mediatek.com -ioam.de -rackcdn.com -baidu.co.th -reklamstore.com -pricegrabber.com -dyndns.com -imageshack.us -popads.net -dataxu.com -sndcdn.com -gizmodo.com -imageshack.com -yelp.com -google.ru -best-tv.com -webtrends.com -google.fr -archive.org -walmartimages.com -att.com -e-planning.net -openxenterprise.com -yan.vn -company-target.com -cmptch.com -incmd04.com -disneyprivacycenter.com -npr.org -tellapart.com -hulu.com -dynamicyield.com -theatlantic.com -atgsvcs.com -whois.co.kr -life360.com -tmz.com -visualstudio.com -adservingml.com -securetrust.com -qubitproducts.com -360.cn -realvu.net -fortune.com -sitescoutadserver.com -sponsorpay.com -torrentum.pl -brcdn.com -origin.com -slidesharecdn.com -360safe.com -pressroomvip.com -unrulymedia.com -nxtck.com -adexcite.com -etsy.com -odnoklassniki.ru -iheart.com -mmstat.com -glam.com -radaronline.com -popnhop.com -edgefcs.net -redintelligence.net -myvisualiq.net -mgid.com -2o7.net -mapquest.com \ No newline at end of file diff --git a/pkg/mapserver/updater/tools.go b/pkg/mapserver/updater/tools.go deleted file mode 100644 index 52fa5df3..00000000 --- a/pkg/mapserver/updater/tools.go +++ /dev/null @@ -1,26 +0,0 @@ -package updater - -import ( - "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -// sort domain entries -func sortDomainEntry(domainEntry *common.DomainEntry) { - // // sort CA entries - // sort.Slice(domainEntry.Entries, func(j, k int) bool { - // if len(domainEntry.Entries[j].CAHash) == len(domainEntry.Entries[k].CAHash) { - // return bytes.Compare(domainEntry.Entries[j].CAHash, domainEntry.Entries[k].CAHash) == -1 - // } - // return len(domainEntry.Entries[j].CAHash) < len(domainEntry.Entries[k].CAHash) - // }) - - // // sort domain certs in one CA entry - // for i := range domainEntry.Entries { - // sort.Slice(domainEntry.Entries[i].DomainCerts, func(j, k int) bool { - // if len(domainEntry.Entries[i].DomainCerts[j]) == len(domainEntry.Entries[i].DomainCerts[k]) { - // return bytes.Compare(domainEntry.Entries[i].DomainCerts[j], domainEntry.Entries[i].DomainCerts[k]) == -1 - // } - // return len(domainEntry.Entries[i].DomainCerts[j]) < len(domainEntry.Entries[i].DomainCerts[k]) - // }) - // } -} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 6fa0b93a..3825830b 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -17,6 +17,8 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) +const readBatchSize = 100000 + // MapUpdater: map updater. It is responsible for updating the tree, and writing to db type MapUpdater struct { Fetcher logpicker.LogFetcher diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 6ffd335e..9163329c 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -26,8 +26,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { // Because we are using "random" bytes deterministically here, set a fixed seed. rand.Seed(111) - // ctx, cancelF := context.WithTimeout(context.Background(), time.Second) - ctx, cancelF := context.WithTimeout(context.Background(), time.Hour) //deleteme + ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Second) defer cancelF() // DB will have the same name as the test function. diff --git a/pkg/mapserver/updater/updater_test_adapter.go b/pkg/mapserver/updater/updater_test_adapter.go deleted file mode 100644 index 829829aa..00000000 --- a/pkg/mapserver/updater/updater_test_adapter.go +++ /dev/null @@ -1,60 +0,0 @@ -package updater - -import ( - "context" - - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" -) - -type UpdaterTestAdapter MapUpdater - -// func NewMapTestUpdater(config *db.Configuration, root []byte, cacheHeight int) (*UpdaterTestAdapter, error) { -// up, err := NewMapUpdater(config, root, cacheHeight) -// return (*UpdaterTestAdapter)(up), err -// } - -func (a *UpdaterTestAdapter) Conn() db.Conn { - return (*MapUpdater)(a).dbConn -} - -func (u *UpdaterTestAdapter) UpdateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { - return (*MapUpdater)(u).updateCerts(ctx, certs, certChains) -} - -func (a *UpdaterTestAdapter) FetchUpdatedDomainHash(ctx context.Context) ( - []common.SHA256Output, error) { - return (*MapUpdater)(a).fetchUpdatedDomainHash(ctx) -} - -func (a *UpdaterTestAdapter) KeyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ( - [][]byte, [][]byte, error) { - - return keyValuePairToSMTInput(keyValuePair) -} - -func (a *UpdaterTestAdapter) SMT() *trie.Trie { - return (*MapUpdater)(a).smt -} - -func (a *UpdaterTestAdapter) SetSMT(smt *trie.Trie) { - a.smt = smt -} - -func (a *UpdaterTestAdapter) SetDBConn(dbConn db.Conn) { - a.dbConn = dbConn -} - -func (a *UpdaterTestAdapter) GetRoot() []byte { - return (*MapUpdater)(a).GetRoot() -} - -func (a *UpdaterTestAdapter) Close() error { - return (*MapUpdater)(a).Close() -} - -func (a *UpdaterTestAdapter) CommitSMTChanges(ctx context.Context) error { - return (*MapUpdater)(a).CommitSMTChanges(ctx) -} From 9627297cdab0a83902e0d920a4f21be48fe035be Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 10:19:08 +0200 Subject: [PATCH 129/187] Fix build of pkg/domainowner . --- pkg/domainowner/domainowner.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 94792330..b392ffcd 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -7,6 +7,8 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" + "github.com/netsec-ethz/fpki/pkg/util" ) // Assume one domain owner only have one domain; Logic can be changed later @@ -34,14 +36,16 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS } // marshall public key into bytes - pubKeyBytes, err := common.RsaPublicKeyToPemBytes(&newPrivKeyPair.PublicKey) + pubKeyBytes, err := util.RSAPublicToPEM(&newPrivKeyPair.PublicKey) if err != nil { return nil, fmt.Errorf("GenerateRCSR | RsaPublicKeyToPemBytes | %w", err) } // generate rcsr rcsr := &common.RCSR{ - Subject: domainName, + PolicyObjectBase: common.PolicyObjectBase{ + Subject: domainName, + }, Version: version, TimeStamp: time.Now(), PublicKeyAlgorithm: common.RSA, @@ -51,14 +55,14 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS // if domain owner still have the private key of the previous RPC -> can avoid cool-off period if prevKey, ok := do.privKeyByDomainName[domainName]; ok { - err = common.RCSRGenerateRPCSignature(rcsr, prevKey) + err = crypto.RCSRGenerateRPCSignature(rcsr, prevKey) if err != nil { return nil, fmt.Errorf("GenerateRCSR | RCSRGenerateRPCSignature | %w", err) } } // generate signature for RCSR, using the new pub key - err = common.RCSRCreateSignature(newPrivKeyPair, rcsr) + err = crypto.RCSRCreateSignature(newPrivKeyPair, rcsr) if err != nil { return nil, fmt.Errorf("GenerateRCSR | RCSRCreateSignature | %w", err) } @@ -81,7 +85,7 @@ func (do *DomainOwner) GeneratePSR(domainName string, policy common.Policy) (*co DomainName: domainName, } - err := common.DomainOwnerSignPSR(rpcKeyPair, psr) + err := crypto.DomainOwnerSignPSR(rpcKeyPair, psr) if err != nil { return nil, fmt.Errorf("GeneratePSR | DomainOwnerSignPSR | %w", err) } From d1ac392365dbdc6bb6c3e50660971e09d6362db5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 10:23:30 +0200 Subject: [PATCH 130/187] Fix build of pkg/grpc/... --- pkg/grpc/grpcclient/grpcclient.go | 2 +- pkg/grpc/grpcserver/grpcserver.go | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/pkg/grpc/grpcclient/grpcclient.go b/pkg/grpc/grpcclient/grpcclient.go index f265dc64..422a846c 100644 --- a/pkg/grpc/grpcclient/grpcclient.go +++ b/pkg/grpc/grpcclient/grpcclient.go @@ -24,7 +24,7 @@ var ( name = flag.String("name", domainName, "Domain name to query") ) -func GetProofs(name string, port int) ([]common.MapServerResponse, error) { +func GetProofs(name string, port int) ([]*common.MapServerResponse, error) { flag.Parse() // Set up a connection to the server. conn, err := grpc.Dial("localhost:"+strconv.Itoa(port), grpc.WithTransportCredentials(insecure.NewCredentials())) diff --git a/pkg/grpc/grpcserver/grpcserver.go b/pkg/grpc/grpcserver/grpcserver.go index be9031f3..8da3e7f8 100644 --- a/pkg/grpc/grpcserver/grpcserver.go +++ b/pkg/grpc/grpcserver/grpcserver.go @@ -8,6 +8,7 @@ import ( "log" "net" + "github.com/netsec-ethz/fpki/pkg/db" pb "github.com/netsec-ethz/fpki/pkg/grpc/query" "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/responder" @@ -21,11 +22,11 @@ var ( // ResponderServer: server to distribute map response type ResponderServer struct { pb.UnimplementedMapResponderServer - responder *responder.OldMapResponder + responder *responder.MapResponder } type GRPCProofs struct { - Proofs []common.MapServerResponse + Proofs []*common.MapServerResponse } // QueryMapEntries: return value according to key @@ -48,8 +49,10 @@ func (server ResponderServer) QueryMapEntries(ctx context.Context, in *pb.MapCli }, nil } -func NewGRPCServer(ctx context.Context, root []byte, cacheHeight int, mapserverConfigPath string) (*ResponderServer, error) { - responder, err := responder.NewOldMapResponder(ctx, root, cacheHeight, mapserverConfigPath) +func NewGRPCServer(ctx context.Context, mapserverConfigPath string, + conn db.Conn) (*ResponderServer, error) { + + responder, err := responder.NewMapResponder(ctx, mapserverConfigPath, conn) if err != nil { return nil, err } @@ -58,7 +61,7 @@ func NewGRPCServer(ctx context.Context, root []byte, cacheHeight int, mapserverC } func (s *ResponderServer) Close() error { - return s.responder.Close() + return nil } func (server *ResponderServer) StartWork(terminateChan chan byte, port int) error { From 0b494635300f89f8543a0be04cb23f64a1e1a58e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 16:11:22 +0200 Subject: [PATCH 131/187] Simplify connecting to test DB. --- pkg/db/mysql/mysql_test.go | 33 +++++------------ pkg/db/mysql/read_worker.go | 44 ----------------------- pkg/mapserver/responder/responder_test.go | 34 +++++------------- pkg/mapserver/updater/updater_test.go | 18 +++------- pkg/tests/defs.go | 1 + pkg/tests/testdb/{testDB.go => testdb.go} | 43 ++++++++++++++++++---- 6 files changed, 57 insertions(+), 116 deletions(-) delete mode 100644 pkg/db/mysql/read_worker.go rename pkg/tests/testdb/{testDB.go => testdb.go} (62%) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index c4deb304..ebf061d6 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" "github.com/netsec-ethz/fpki/pkg/tests/random" @@ -25,20 +24,12 @@ func TestCheckCertsExist(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - // DB will have the same name as the test function. - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - // Create a new DB with that name. On exiting the function, it will be removed. - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() // Connect to the DB. - conn, err := mysql.Connect(config) + conn, err := testdb.Connect(config) require.NoError(t, err) defer conn.Close() @@ -84,20 +75,12 @@ func TestCoalesceForDirtyDomains(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - // DB will have the same name as the test function. - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - // Create a new DB with that name. On exiting the function, it will be removed. - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() // Connect to the DB. - conn, err := mysql.Connect(config) + conn, err := testdb.Connect(config) require.NoError(t, err) defer conn.Close() diff --git a/pkg/db/mysql/read_worker.go b/pkg/db/mysql/read_worker.go deleted file mode 100644 index ec30fe78..00000000 --- a/pkg/db/mysql/read_worker.go +++ /dev/null @@ -1,44 +0,0 @@ -package mysql - -import ( - "context" - "database/sql" - "fmt" - "strconv" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" -) - -// keyValueResult: used in worker thread; in multi-thread read -type keyValueResult struct { - Pairs []*db.KeyValuePair - Err error -} - -// used for retrieving entries from updates table -func fetchKeyWorker(resultChan chan readKeyResult, start, end int, ctx context.Context, db *sql.DB) { - key := make([]byte, 0, end-start) - result := make([]common.SHA256Output, 0, end-start) - - resultRows, err := db.Query("SELECT * FROM updates LIMIT " + strconv.Itoa(start) + "," + strconv.Itoa(end-start)) - if err != nil { - resultChan <- readKeyResult{Err: fmt.Errorf("fetchKeyWorker | Query | %w", err)} - return - } - defer resultRows.Close() - - for resultRows.Next() { - err = resultRows.Scan(&key) - // sql.NoRowErr should not be omitted. - if err != nil { - resultChan <- readKeyResult{Err: fmt.Errorf("fetchKeyWorker | Scan | %w", err)} - return - } - var key32bytes common.SHA256Output - copy(key32bytes[:], key) - result = append(result, key32bytes) - } - - resultChan <- readKeyResult{Keys: result} -} diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index f7e8efba..e37ecabc 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -10,8 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" mapcommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" "github.com/netsec-ethz/fpki/pkg/mapserver/prover" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" @@ -24,20 +22,12 @@ func TestNewResponder(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - // DB will have the same name as the test function. - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - // Create a new DB with that name. On exiting the function, it will be removed. - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() // Connect to the DB. - conn, err := mysql.Connect(config) + conn, err := testdb.Connect(config) require.NoError(t, err) defer conn.Close() @@ -77,20 +67,12 @@ func TestProof(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - // DB will have the same name as the test function. - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - // Create a new DB with that name. On exiting the function, it will be removed. - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() // Connect to the DB. - conn, err := mysql.Connect(config) + conn, err := testdb.Connect(config) require.NoError(t, err) defer conn.Close() diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 9163329c..8a1000eb 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -11,8 +11,6 @@ import ( ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" @@ -29,20 +27,12 @@ func TestUpdateWithKeepExisting(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Second) defer cancelF() - // DB will have the same name as the test function. - dbName := t.Name() - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) - - // Create a new DB with that name. On exiting the function, it will be removed. - err := testdb.CreateTestDB(ctx, dbName) - require.NoError(t, err) - defer func() { - err = testdb.RemoveTestDB(ctx, config) - require.NoError(t, err) - }() + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() // Connect to the DB. - conn, err := mysql.Connect(config) + conn, err := testdb.Connect(config) require.NoError(t, err) defer conn.Close() diff --git a/pkg/tests/defs.go b/pkg/tests/defs.go index 186f8316..311a2c69 100644 --- a/pkg/tests/defs.go +++ b/pkg/tests/defs.go @@ -5,4 +5,5 @@ import "github.com/stretchr/testify/require" type T interface { require.TestingT Helper() + Name() string } diff --git a/pkg/tests/testdb/testDB.go b/pkg/tests/testdb/testdb.go similarity index 62% rename from pkg/tests/testdb/testDB.go rename to pkg/tests/testdb/testdb.go index c7009cf8..8b20343f 100644 --- a/pkg/tests/testdb/testDB.go +++ b/pkg/tests/testdb/testdb.go @@ -5,14 +5,46 @@ import ( "fmt" "io" "os/exec" + "time" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/tools" + "github.com/stretchr/testify/require" ) -// CreateTestDB creates a new and ready test DB with the same structure as the F-PKI one. -func CreateTestDB(ctx context.Context, dbName string) error { +func Connect(config *db.Configuration) (db.Conn, error) { + return mysql.Connect(config) +} + +// ConfigureTestDB creates a new configuration and database with the name of the test, and +// returns the configuration and the DB removal function that should be called with defer. +func ConfigureTestDB(t tests.T) (*db.Configuration, func()) { + dbName := t.Name() + config := db.NewConfig(mysql.WithDefaults(), db.WithDB(dbName)) + + // New context to create the DB. + ctx, cancelF := context.WithTimeout(context.Background(), 2*time.Second) + + // Create a new DB with that name. On exiting the function, it will be removed. + err := createTestDB(ctx, dbName) + require.NoError(t, err) + cancelF() // DB was created. + + // Return the configuration and removal function. + removeFunc := func() { + ctx, cancelF := context.WithTimeout(context.Background(), 2*time.Second) + err = removeTestDB(ctx, config) + require.NoError(t, err) + cancelF() + } + + return config, removeFunc +} + +// createTestDB creates a new and ready test DB with the same structure as the F-PKI one. +func createTestDB(ctx context.Context, dbName string) error { // The create_schema script is embedded. Send it to the stdin of bash, and right after // send a line with the invocation of the create_new_db function. script := tools.CreateSchemaScript() @@ -80,7 +112,8 @@ func CreateTestDB(ctx context.Context, dbName string) error { return nil } -func RemoveTestDB(ctx context.Context, config *db.Configuration) error { +// removeTestDB removes a test DB that was created with CreateTestDB. +func removeTestDB(ctx context.Context, config *db.Configuration) error { conn, err := Connect(config) if err != nil { return fmt.Errorf("connecting to test DB: %w", err) @@ -92,7 +125,3 @@ func RemoveTestDB(ctx context.Context, config *db.Configuration) error { } return nil } - -func Connect(config *db.Configuration) (db.Conn, error) { - return mysql.Connect(config) -} From 3bba090e5b710e32f003b8d1e2ac77289c394586 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 16:23:35 +0200 Subject: [PATCH 132/187] Get rid of MockDB. --- pkg/mapserver/trie/trie_test.go | 78 ++++++++--- pkg/tests/testdb/mockdb_for_testing.go | 183 ------------------------- 2 files changed, 59 insertions(+), 202 deletions(-) delete mode 100644 pkg/tests/testdb/mockdb_for_testing.go diff --git a/pkg/mapserver/trie/trie_test.go b/pkg/mapserver/trie/trie_test.go index 9336a499..238be650 100644 --- a/pkg/mapserver/trie/trie_test.go +++ b/pkg/mapserver/trie/trie_test.go @@ -19,8 +19,13 @@ import ( // TestTrieEmpty: test empty SMT func TestTrieEmpty(t *testing.T) { - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) require.Empty(t, smt.Root) @@ -32,8 +37,13 @@ func TestTrieUpdateAndGet(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.atomicUpdate = false @@ -72,8 +82,13 @@ func TestTrieAtomicUpdate(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.CacheHeightLimit = 0 @@ -102,8 +117,13 @@ func TestTriePublicUpdateAndGet(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.CacheHeightLimit = 0 @@ -139,8 +159,13 @@ func TestTrieUpdateAndDelete(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) smt.CacheHeightLimit = 0 @@ -177,8 +202,13 @@ func TestTrieMerkleProof(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) // Add data to empty trie @@ -209,8 +239,13 @@ func TestTrieMerkleProofCompressed(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) + require.NoError(t, err) + + smt, err := NewTrie(nil, common.SHA256Hash, conn) require.NoError(t, err) // Add data to empty trie @@ -238,14 +273,19 @@ func TestHeight0LeafShortcut(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) defer cancelF() - keySize := 32 - db := testdb.NewMockDB() - smt, err := NewTrie(nil, common.SHA256Hash, db) + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + conn, err := testdb.Connect(config) require.NoError(t, err) + smt, err := NewTrie(nil, common.SHA256Hash, conn) + require.NoError(t, err) + + keySize := 32 // Add 2 sibling keys that will be stored at height 0 - key0 := make([]byte, keySize, keySize) - key1 := make([]byte, keySize, keySize) + key0 := make([]byte, keySize) + key1 := make([]byte, keySize) bitSet(key1, keySize*8-1) keys := [][]byte{key0, key1} values := getRandomData(t, 2) diff --git a/pkg/tests/testdb/mockdb_for_testing.go b/pkg/tests/testdb/mockdb_for_testing.go deleted file mode 100644 index c6a41868..00000000 --- a/pkg/tests/testdb/mockdb_for_testing.go +++ /dev/null @@ -1,183 +0,0 @@ -package testdb - -import ( - "context" - "database/sql" - "time" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" -) - -var empty struct{} - -// MockDB: mock db is a memory store to simulate the db. -type MockDB struct { - TreeTable map[common.SHA256Output][]byte - DomainEntriesTable map[common.SHA256Output][]byte - UpdatesTable map[common.SHA256Output]struct{} -} - -var _ db.Conn = (*MockDB)(nil) - -// newMockDB: return a new mock db -func NewMockDB() *MockDB { - return &MockDB{ - TreeTable: make(map[common.SHA256Output][]byte), - DomainEntriesTable: make(map[common.SHA256Output][]byte), - UpdatesTable: make(map[common.SHA256Output]struct{}), - } -} - -func (d *MockDB) DB() *sql.DB { - return nil -} - -// Close closes the connection. -func (d *MockDB) Close() error { return nil } - -func (d *MockDB) TruncateAllTables(ctx context.Context) error { return nil } - -func (*MockDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { return nil, nil } -func (*MockDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error { return nil } - -func (d *MockDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { - return make([]bool, len(ids)), nil -} - -func (d *MockDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { - return make([]bool, len(ids)), nil -} - -func (d *MockDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, - expirations []*time.Time, payloads [][]byte) error { - - return nil -} - -func (d *MockDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, - expirations []*time.Time, payloads [][]byte) error { - - return nil -} - -func (d *MockDB) UpdateDomains(context.Context, []*common.SHA256Output, []string) error { - return nil -} - -func (d *MockDB) UpdateDomainCerts(ctx context.Context, - domainIDs, certIDs []*common.SHA256Output) error { - - return nil -} - -func (d *MockDB) UpdateDomainPolicies(ctx context.Context, - domainIDs, policyIDs []*common.SHA256Output) error { - - return nil -} - -func (d *MockDB) RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ([]byte, error) { - return d.TreeTable[id], nil -} - -func (d *MockDB) RetrieveDomainCertificatesPayload(ctx context.Context, key common.SHA256Output) ( - *common.SHA256Output, []byte, error) { - - id := common.SHA256Hash32Bytes(d.DomainEntriesTable[key]) - return &id, d.DomainEntriesTable[key], nil -} - -func (d *MockDB) RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( - payloadID *common.SHA256Output, payload []byte, err error) { - - return nil, nil, nil -} - -func (d *MockDB) RetrieveKeyValuePairTreeStruct(ctx context.Context, id []common.SHA256Output, - numOfRoutine int) ([]*db.KeyValuePair, error) { - result := []*db.KeyValuePair{} - for _, key := range id { - value, ok := d.TreeTable[key] - if !ok { - continue - } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) - } - return result, nil -} - -func (d *MockDB) RetrieveDomainEntries(ctx context.Context, ids []*common.SHA256Output) ( - []*db.KeyValuePair, error) { - - result := make([]*db.KeyValuePair, 0, len(ids)) - for _, key := range ids { - value, ok := d.DomainEntriesTable[*key] - if !ok { - continue - } - result = append(result, &db.KeyValuePair{Key: *key, Value: value}) - } - return result, nil -} - -func (d *MockDB) RetrieveUpdatedDomains(ctx context.Context, perQueryLimit int) ([]common.SHA256Output, error) { - result := []common.SHA256Output{} - for k := range d.UpdatesTable { - result = append(result, k) - } - return result, nil -} - -func (d *MockDB) CountUpdatedDomains(ctx context.Context) (int, error) { - return len(d.UpdatesTable), nil -} - -func (d *MockDB) UpdateDomainEntries(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { - for _, pair := range keyValuePairs { - d.DomainEntriesTable[pair.Key] = pair.Value - } - - return 0, nil -} - -func (d *MockDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { - for _, pair := range keyValuePairs { - d.TreeTable[pair.Key] = pair.Value - } - - return 0, nil -} - -func (d *MockDB) DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) { - for _, key := range keys { - delete(d.TreeTable, key) - } - return 0, nil -} - -func (d *MockDB) AddUpdatedDomains(ctx context.Context, keys []common.SHA256Output) (int, error) { - for _, key := range keys { - d.UpdatesTable[key] = empty - } - return 0, nil -} - -func (d *MockDB) RemoveAllUpdatedDomains(ctx context.Context) error { - d.UpdatesTable = make(map[common.SHA256Output]struct{}) - return nil -} - -func (d *MockDB) UpdatedDomains(context.Context) ([]*common.SHA256Output, error) { - return nil, nil -} - -func (*MockDB) CleanupDirty(ctx context.Context) error { return nil } - -func (*MockDB) DirtyDomainsCount(ctx context.Context) (int, error) { - return 0, nil -} - -func (*MockDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { - return nil -} From c389f86ae5305def1eeb3293c089de517a80b89b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 17:02:52 +0200 Subject: [PATCH 133/187] Cleanup pkg/db. Removed the db integration test. --- Makefile | 1 - pkg/db/db.go | 127 +++---- pkg/db/mysql/certs.go | 146 ++++++++ pkg/db/mysql/dirty.go | 65 ++++ pkg/db/mysql/mysql.go | 407 +--------------------- pkg/db/mysql/policies.go | 122 +++++++ pkg/db/mysql/read.go | 152 --------- pkg/db/mysql/smt.go | 81 +++++ pkg/db/mysql/write.go | 368 -------------------- pkg/mapserver/updater/updater.go | 17 +- pkg/tests/testdb/helperfunctions.go | 60 ---- scripts/integration_tests.sh | 13 +- tests/integration/db/db.go | 509 ---------------------------- 13 files changed, 473 insertions(+), 1595 deletions(-) create mode 100644 pkg/db/mysql/certs.go create mode 100644 pkg/db/mysql/dirty.go create mode 100644 pkg/db/mysql/policies.go delete mode 100644 pkg/db/mysql/read.go create mode 100644 pkg/db/mysql/smt.go delete mode 100644 pkg/db/mysql/write.go delete mode 100644 pkg/tests/testdb/helperfunctions.go delete mode 100644 tests/integration/db/db.go diff --git a/Makefile b/Makefile index 1f7723d4..e87337ba 100644 --- a/Makefile +++ b/Makefile @@ -25,7 +25,6 @@ build_integration_test: @go build -o ./bin/test_domainowner_pca_policlog_interaction ./tests/integration/domainowner_pca_policlog_interaction @go build -o ./bin/test_mapserver ./tests/integration/mapserver @go build -o ./bin/test_smt ./tests/integration/smt - @go build -o ./bin/test_db ./tests/integration/db @go build -o ./bin/test_grpc ./tests/integration/grpc_test drop_cacheTable: diff --git a/pkg/db/db.go b/pkg/db/db.go index ac3ce4d3..502239b4 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -14,19 +14,26 @@ type KeyValuePair struct { Value []byte } -// Conn: interface for db connection -type Conn interface { - // TODO(juagargi) remove the temporary access to the sql.DB object - DB() *sql.DB - // Close closes the connection. - Close() error - - // TruncateAllTables resets the DB to an initial state. - TruncateAllTables(ctx context.Context) error - +type smt interface { LoadRoot(ctx context.Context) (*common.SHA256Output, error) SaveRoot(ctx context.Context, root *common.SHA256Output) error + // RetrieveTreeNode: Retrieve one key-value pair from Tree table. + RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ([]byte, error) + // UpdateTreeNodes: Update a list of key-value pairs in Tree table + UpdateTreeNodes(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) + // DeleteTreeNodes: Delete a list of key-value pairs in Tree table + DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) +} + +type dirty interface { + + // RetrieveDirtyDomains returns a channel of batches of updated domains. + // A batch will have a implementation dependent size. + // Each updated domain represents the SHA256 of the textual domain that was updated and + // present in the `updates` table. + RetrieveDirtyDomains(ctx context.Context) ([]*common.SHA256Output, error) + // ReplaceDirtyDomainPayloads retrieves dirty domains from the dirty list, starting // at firstRow and finishing at lastRow (for a total of lastRow - firstRow + 1 domains), // computes the aggregated payload for their certificates and policies, and stores it in the DB. @@ -34,90 +41,64 @@ type Conn interface { // domain, including e.g. the trust chain. ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error - // RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload - // itself, given the domain ID. - RetrieveDomainCertificatesPayload(ctx context.Context, id common.SHA256Output) ( - certIDsID *common.SHA256Output, certIDs []byte, err error) + //DirtyDomainsCount returns the number of domains that are still to be updated. + DirtyDomainsCount(ctx context.Context) (int, error) - // RetrieveDomainPoliciesPayload returns the policy related payload for a given domain. - // This includes the RPCs, SPs, etc. - RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( - payloadID *common.SHA256Output, payload []byte, err error) + CleanupDirty(ctx context.Context) error +} + +type certs interface { // CheckCertsExist returns a slice of true/false values. Each value indicates if // the corresponding certificate identified by its ID is already present in the DB. CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) + InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, + payloads [][]byte) error + + // UpdateDomainCerts updates the domain_certs table with new entries. + UpdateDomainCerts(ctx context.Context, domainIDs, certIDs []*common.SHA256Output) error + + // RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload + // itself, given the domain ID. + RetrieveDomainCertificatesPayload(ctx context.Context, id common.SHA256Output) ( + certIDsID *common.SHA256Output, certIDs []byte, err error) +} + +type policies interface { // CheckPoliciesExist returns a slice of true/false values. Each value indicates if // the corresponding policy identified by its ID is already present in the DB. CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) - InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, - payloads [][]byte) error - InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error - // UpdateDomains updates the domains and dirty tables. - UpdateDomains(ctx context.Context, domainIDs []*common.SHA256Output, domainNames []string) error - - // UpdateDomainCerts updates the domain_certs table with new entries. - UpdateDomainCerts(ctx context.Context, domainIDs, certIDs []*common.SHA256Output) error - // UpdateDomainPolicies updates the domain_policies table with new entries. UpdateDomainPolicies(ctx context.Context, domainIDs, policyIDs []*common.SHA256Output) error - ////////////////////////////////////////////////////////////////// - // check if the functions below are needed after the new design // - ////////////////////////////////////////////////////////////////// - - // ************************************************************ - // Function for Tree table - // ************************************************************ + // RetrieveDomainPoliciesPayload returns the policy related payload for a given domain. + // This includes the RPCs, SPs, etc. + RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( + payloadID *common.SHA256Output, payload []byte, err error) +} - // RetrieveTreeNode: Retrieve one key-value pair from Tree table. - RetrieveTreeNode(ctx context.Context, id common.SHA256Output) ([]byte, error) +type Conn interface { + smt + dirty + certs + policies - // UpdateTreeNodes: Update a list of key-value pairs in Tree table - UpdateTreeNodes(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) + // TODO(juagargi) remove the temporary access to the sql.DB object + DB() *sql.DB + // Close closes the connection. + Close() error - // DeleteTreeNodes: Delete a list of key-value pairs in Tree table - DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) + // TruncateAllTables resets the DB to an initial state. + TruncateAllTables(ctx context.Context) error - // ************************************************************ - // Function for DomainEntries table - // ************************************************************ + // UpdateDomains updates the domains and dirty tables. + UpdateDomains(ctx context.Context, domainIDs []*common.SHA256Output, domainNames []string) error // RetrieveDomainEntries: Retrieve a list of domain entries table RetrieveDomainEntries(ctx context.Context, id []*common.SHA256Output) ([]*KeyValuePair, error) - - // UpdateDomainEntries: Update a list of key-value pairs in domain entries table - UpdateDomainEntries(ctx context.Context, keyValuePairs []*KeyValuePair) (int, error) - - // ************************************************************ - // Function for Updates table - // ************************************************************ - - // CountUpdatedDomains: Retrieve number of updated domains during this updates. - CountUpdatedDomains(ctx context.Context) (int, error) // TODO(juagargi) review usage - - // AddUpdatedDomains: Add a list of hashes of updated domain into the updates table. If key exists, ignore it. - AddUpdatedDomains(ctx context.Context, keys []common.SHA256Output) (int, error) - - // TODO(yongzhe): investigate whether perQueryLimit is necessary - // RetrieveUpdatedDomains: Retrieve all updated domain hashes from update table - RetrieveUpdatedDomains(ctx context.Context, perQueryLimit int) ([]common.SHA256Output, error) - - // RemoveAllUpdatedDomains: Truncate updates table; Called after updating is finished - RemoveAllUpdatedDomains(ctx context.Context) error - - // UpdatedDomains returns a channel of batches of updated domains. - // A batch will have a implementation dependent size. - // Each updated domain represents the SHA256 of the textual domain that was updated and - // present in the `updates` table. - UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) - - //DirtyDomainsCount returns the number of domains that are still to be updated. - DirtyDomainsCount(ctx context.Context) (int, error) - CleanupDirty(ctx context.Context) error } diff --git a/pkg/db/mysql/certs.go b/pkg/db/mysql/certs.go new file mode 100644 index 00000000..94abc4f7 --- /dev/null +++ b/pkg/db/mysql/certs.go @@ -0,0 +1,146 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +// CheckCertsExist returns a slice of true/false values. Each value indicates if +// the corresponding certificate identified by its ID is already present in the DB. +func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { + if len(ids) == 0 { + // If empty, return empty. + return nil, nil + } + presence := make([]bool, len(ids)) + + // The query won't accept more than batchSize elements. Make batches. + for i := 0; i < len(ids)-batchSize; i += batchSize { + to := i + batchSize + if err := c.checkCertsExist(ctx, ids[i:to], presence[i:to]); err != nil { + return nil, err + } + } + // Do the last batch, if non empty. + from := len(ids) / batchSize * batchSize + to := from + len(ids)%batchSize + var err error + if to > from { + err = c.checkCertsExist(ctx, ids[from:to], presence[from:to]) + } + return presence, err +} + +func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { + + if len(ids) == 0 { + return nil + } + // TODO(juagargi) set a prepared statement in constructor + // Because the primary key is the SHA256 of the payload, if there is a clash, it must + // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. + const N = 4 + str := "REPLACE INTO certs (cert_id, parent_id, expiration, payload) VALUES " + + repeatStmt(len(ids), N) + data := make([]interface{}, N*len(ids)) + for i := range ids { + data[i*N] = ids[i][:] + if parents[i] != nil { + data[i*N+1] = parents[i][:] + } + data[i*N+2] = expirations[i] + data[i*N+3] = payloads[i] + } + _, err := c.db.ExecContext(ctx, str, data...) + if err != nil { + return err + } + + return nil +} + +// UpdateDomainCerts updates the domain_certs table. +func (c *mysqlDB) UpdateDomainCerts(ctx context.Context, + domainIDs, certIDs []*common.SHA256Output) error { + + if len(domainIDs) == 0 { + return nil + } + // Insert into domain_certs: + str := "INSERT IGNORE INTO domain_certs (domain_id,cert_id) VALUES " + + repeatStmt(len(certIDs), 2) + data := make([]interface{}, 2*len(certIDs)) + for i := range certIDs { + data[2*i] = domainIDs[i][:] + data[2*i+1] = certIDs[i][:] + } + _, err := c.db.ExecContext(ctx, str, data...) + + return err +} + +// RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload itself, +// given the domain ID. +func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainID common.SHA256Output, +) (*common.SHA256Output, []byte, error) { + + str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" + var certIDsID, certIDs []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) + if err != nil && err != sql.ErrNoRows { + return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) + } + var IDptr *common.SHA256Output + if certIDsID != nil { + IDptr = (*common.SHA256Output)(certIDsID) + } + return IDptr, certIDs, nil +} + +// checkCertsExist should not be called with larger than ~1000 elements, the query being used +// may fail with a message like: +// Error 1436 (HY000): Thread stack overrun: 1028624 bytes used of a 1048576 byte stack, +// and 20000 bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack. +func (c *mysqlDB) checkCertsExist(ctx context.Context, ids []*common.SHA256Output, + present []bool) error { + + // Slice to be used in the SQL query: + data := make([]interface{}, len(ids)) + for i, id := range ids { + data[i] = id[:] + } + + // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. + elems := make([]string, len(data)) + for i := range elems { + elems[i] = "SELECT ? AS cert_id" + } + + // The query means: join two tables, one with the values I am passing as arguments (those + // are the ids) and the certs table, and for those that exist write a 1, otherwise a 0. + // Finally, group_concat all rows into just one field of type string. + str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + + "SELECT (CASE WHEN certs.cert_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + + strings.Join(elems, " UNION ALL ") + + ") AS request LEFT JOIN ( SELECT cert_id FROM certs ) AS certs ON " + + "certs.cert_id = request.cert_id) AS t" + + // Return slice of booleans: + var value string + if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { + return err + } + for i, c := range value { + if c == '1' { + present[i] = true + } + } + + return nil +} diff --git a/pkg/db/mysql/dirty.go b/pkg/db/mysql/dirty.go new file mode 100644 index 00000000..d7abdb91 --- /dev/null +++ b/pkg/db/mysql/dirty.go @@ -0,0 +1,65 @@ +package mysql + +import ( + "context" + "fmt" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +func (c *mysqlDB) DirtyDomainsCount(ctx context.Context) (int, error) { + str := "SELECT COUNT(*) FROM dirty" + var count int + if err := c.db.QueryRowContext(ctx, str).Scan(&count); err != nil { + return 0, fmt.Errorf("querying number of dirty domains: %w", err) + } + return count, nil +} + +// RetrieveDirtyDomains returns the domain IDs that are still dirty, i.e. modified certificates for +// that domain, but not yet coalesced and ingested by the SMT. +func (c *mysqlDB) RetrieveDirtyDomains(ctx context.Context) ([]*common.SHA256Output, error) { + str := "SELECT domain_id FROM dirty" + rows, err := c.db.QueryContext(ctx, str) + if err != nil { + return nil, fmt.Errorf("error querying dirty domains: %w", err) + } + domainIDs := make([]*common.SHA256Output, 0) + for rows.Next() { + var domainId []byte + err = rows.Scan(&domainId) + if err != nil { + return nil, fmt.Errorf("error scanning domain ID: %w", err) + } + ptr := (*common.SHA256Output)(domainId) + domainIDs = append(domainIDs, ptr) + } + return domainIDs, nil +} + +func (c *mysqlDB) CleanupDirty(ctx context.Context) error { + // Remove all entries from the dirty table. + str := "TRUNCATE dirty" + _, err := c.db.ExecContext(ctx, str) + if err != nil { + return fmt.Errorf("error truncating dirty table: %w", err) + } + return nil +} + +func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { + // Call the certificate coalescing stored procedure with these parameters. + str := "CALL calc_dirty_domains_certs(?,?)" + _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) + if err != nil { + return fmt.Errorf("coalescing certificates for domains: %w", err) + } + + // Call the policy coalescing stored procedure with these parameters. + str = "CALL calc_dirty_domains_policies(?,?)" + _, err = c.db.ExecContext(ctx, str, firstRow, lastRow) + if err != nil { + return fmt.Errorf("coalescing policies for domains: %w", err) + } + return nil +} diff --git a/pkg/db/mysql/mysql.go b/pkg/db/mysql/mysql.go index 3467d817..c4a9dbea 100644 --- a/pkg/db/mysql/mysql.go +++ b/pkg/db/mysql/mysql.go @@ -5,7 +5,6 @@ import ( "database/sql" "fmt" "strings" - "time" _ "github.com/go-sql-driver/mysql" "github.com/netsec-ethz/fpki/pkg/common" @@ -14,110 +13,14 @@ import ( const batchSize = 1000 -// NOTE -// The project contains three tables: -// * Domain entries tables: the table to store domain materials. -// -- Key: domain name hash: 32 bytes VarBinary -// -- Value: Serialized data of domain materials. Use Json to serialize the data structure. Stored as BLOB -// * Tree table: contains the Sparse Merkle Tree. Store the nodes of Sparse Merkle Tree -// * updates table: contains the domain hashes of the changed domains during this update. -// updates table will be truncated after the Sparse Merkle Tree is updated. - -type prepStmtGetter func(count int) (*sql.Stmt, *sql.Stmt) - type mysqlDB struct { db *sql.DB - - prepGetValueDomainEntries *sql.Stmt // returns the domain entries - prepGetValueTree *sql.Stmt // get key-value pair from tree table - // prepGetUpdatedDomains *sql.Stmt // get updated domains - - getDomainEntriesUpdateStmts prepStmtGetter // used to update key-values in domain entries - getTreeStructureUpdateStmts prepStmtGetter // used to update key-values in the tree table - getUpdatesInsertStmts prepStmtGetter // used to insert entries in the updates table - getTreeDeleteStmts prepStmtGetter // used to delete entries in the tree table - - getProofLimiter chan struct{} } -// NewMysqlDB is called to create a new instance of the mysqlDB, initializing certain values, -// like stored procedures. +// NewMysqlDB is called to create a new instance of the mysqlDB. func NewMysqlDB(db *sql.DB) (*mysqlDB, error) { - // prepGetValueDomainEntries, err := db.Prepare("SELECT `value` from `domainEntries` WHERE `key`=?") - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueDomainEntries: %w", err) - // } - // prepGetValueTree, err := db.Prepare("SELECT `value` from `tree` WHERE `key32`=?") - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetValueTree: %w", err) - // } - // prepGetUpdatedDomains, err := db.Prepare("SELECT `key` FROM `updates`") - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepGetUpdatedDomains: %w", err) - // } - - // str := "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(batchSize, 2) - // prepReplaceDomainEntries, err := db.Prepare(str) - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceDomainEntries: %w", err) - // } - // str = "REPLACE into tree (`key32`, `value`) values " + repeatStmt(batchSize, 2) - // prepReplaceTree, err := db.Prepare(str) - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceTree: %w", err) - // } - // str = "REPLACE into `updates` (`key`) VALUES " + repeatStmt(batchSize, 1) - // prepReplaceUpdates, err := db.Prepare(str) - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepReplaceUpdates: %w", err) - // } - // str = "DELETE from `tree` WHERE `key32` IN " + repeatStmt(1, batchSize) - // prepDeleteUpdates, err := db.Prepare(str) - // if err != nil { - // return nil, fmt.Errorf("NewMysqlDB | preparing statement prepDeleteUpdates: %w", err) - // } - return &mysqlDB{ db: db, - // prepGetValueDomainEntries: prepGetValueDomainEntries, - // prepGetValueTree: prepGetValueTree, - // prepGetUpdatedDomains: prepGetUpdatedDomains, - // getDomainEntriesUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - // str = "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(count, 2) - // prepPartial, err := db.Prepare(str) - // if err != nil { - // panic(err) - // } - // return prepReplaceDomainEntries, prepPartial - // }, - // getTreeStructureUpdateStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - // str := "REPLACE into tree (`key`, `value`) values " + repeatStmt(count, 2) - // prepPartial, err := db.Prepare(str) - // if err != nil { - // panic(err) - // } - // return prepReplaceTree, prepPartial - // }, - // getUpdatesInsertStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - // str := "REPLACE into `updates` (`key`) VALUES " + repeatStmt(count, 1) - // prepPartial, err := db.Prepare(str) - // if err != nil { - // panic(err) - // } - // return prepReplaceUpdates, prepPartial - // }, - // getTreeDeleteStmts: func(count int) (*sql.Stmt, *sql.Stmt) { - // if count == 0 { - // return prepDeleteUpdates, nil - // } - // str := "DELETE from `tree` WHERE `key` IN " + repeatStmt(1, count) - // prepPartial, err := db.Prepare(str) - // if err != nil { - // panic(err) - // } - // return prepDeleteUpdates, prepPartial - // }, - getProofLimiter: make(chan struct{}, 128), }, nil } @@ -125,10 +28,7 @@ func (c *mysqlDB) DB() *sql.DB { return c.db } -// Close: close connection func (c *mysqlDB) Close() error { - // c.prepGetValueTree.Close() - // c.prepGetValueDomainEntries.Close() return c.db.Close() } @@ -150,189 +50,6 @@ func (c *mysqlDB) TruncateAllTables(ctx context.Context) error { return nil } -func (c *mysqlDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { - var key []byte - if err := c.db.QueryRowContext(ctx, "SELECT key32 FROM root").Scan(&key); err != nil { - if err == sql.ErrNoRows { - return nil, nil - } - return nil, fmt.Errorf("error obtaining the root entry: %w", err) - } - return (*common.SHA256Output)(key), nil -} - -// CheckCertsExist returns a slice of true/false values. Each value indicates if -// the corresponding certificate identified by its ID is already present in the DB. -func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) { - if len(ids) == 0 { - // If empty, return empty. - return nil, nil - } - presence := make([]bool, len(ids)) - - // The query won't accept more than batchSize elements. Make batches. - for i := 0; i < len(ids)-batchSize; i += batchSize { - to := i + batchSize - if err := c.checkCertsExist(ctx, ids[i:to], presence[i:to]); err != nil { - return nil, err - } - } - // Do the last batch, if non empty. - from := len(ids) / batchSize * batchSize - to := from + len(ids)%batchSize - var err error - if to > from { - err = c.checkCertsExist(ctx, ids[from:to], presence[from:to]) - } - return presence, err -} - -// checkCertsExist should not be called with larger than ~1000 elements, the query being used -// may fail with a message like: -// Error 1436 (HY000): Thread stack overrun: 1028624 bytes used of a 1048576 byte stack, -// and 20000 bytes needed. Use 'mysqld --thread_stack=#' to specify a bigger stack. -func (c *mysqlDB) checkCertsExist(ctx context.Context, ids []*common.SHA256Output, - present []bool) error { - - // Slice to be used in the SQL query: - data := make([]interface{}, len(ids)) - for i, id := range ids { - data[i] = id[:] - } - - // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. - elems := make([]string, len(data)) - for i := range elems { - elems[i] = "SELECT ? AS cert_id" - } - - // The query means: join two tables, one with the values I am passing as arguments (those - // are the ids) and the certs table, and for those that exist write a 1, otherwise a 0. - // Finally, group_concat all rows into just one field of type string. - str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + - "SELECT (CASE WHEN certs.cert_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + - strings.Join(elems, " UNION ALL ") + - ") AS request LEFT JOIN ( SELECT cert_id FROM certs ) AS certs ON " + - "certs.cert_id = request.cert_id) AS t" - - // Return slice of booleans: - var value string - if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { - return err - } - for i, c := range value { - if c == '1' { - present[i] = true - } - } - - return nil -} - -// CheckPoliciesExist returns a slice of true/false values. Each value indicates if -// the corresponding certificate identified by its ID is already present in the DB. -func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ( - []bool, error) { - - if len(ids) == 0 { - // If empty, return empty. - return nil, nil - } - // Slice to be used in the SQL query: - data := make([]interface{}, len(ids)) - for i, id := range ids { - data[i] = id[:] - } - - // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. - elems := make([]string, len(data)) - for i := range elems { - elems[i] = "SELECT ? AS policy_id" - } - - // The query means: join two tables, one with the values I am passing as arguments (those - // are the ids) and the policies table, and for those that exist write a 1, otherwise a 0. - // Finally, group_concat all rows into just one field of type string. - str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + - "SELECT (CASE WHEN policies.policy_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + - strings.Join(elems, " UNION ALL ") + - ") AS request LEFT JOIN ( SELECT policy_id FROM policies ) AS policies ON " + - "policies.policy_id = request.policy_id) AS t" - - // Return slice of booleans: - present := make([]bool, len(ids)) - - var value string - if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { - return nil, err - } - for i, c := range value { - if c == '1' { - present[i] = true - } - } - - return present, nil -} - -func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, - expirations []*time.Time, payloads [][]byte) error { - - if len(ids) == 0 { - return nil - } - // TODO(juagargi) set a prepared statement in constructor - // Because the primary key is the SHA256 of the payload, if there is a clash, it must - // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. - const N = 4 - str := "REPLACE INTO certs (cert_id, parent_id, expiration, payload) VALUES " + - repeatStmt(len(ids), N) - data := make([]interface{}, N*len(ids)) - for i := range ids { - data[i*N] = ids[i][:] - if parents[i] != nil { - data[i*N+1] = parents[i][:] - } - data[i*N+2] = expirations[i] - data[i*N+3] = payloads[i] - } - _, err := c.db.ExecContext(ctx, str, data...) - if err != nil { - return err - } - - return nil -} - -func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, - expirations []*time.Time, payloads [][]byte) error { - - if len(ids) == 0 { - return nil - } - // TODO(juagargi) set a prepared statement in constructor - // Because the primary key is the SHA256 of the payload, if there is a clash, it must - // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. - const N = 4 - str := "REPLACE INTO policies (policy_id, parent_id, expiration, payload) VALUES " + - repeatStmt(len(ids), N) - data := make([]interface{}, N*len(ids)) - for i := range ids { - data[i*N] = ids[i][:] - if parents[i] != nil { - data[i*N+1] = parents[i][:] - } - data[i*N+2] = expirations[i] - data[i*N+3] = payloads[i] - } - _, err := c.db.ExecContext(ctx, str, data...) - if err != nil { - return err - } - - return nil -} - func (c *mysqlDB) UpdateDomains(ctx context.Context, domainIDs []*common.SHA256Output, domainNames []string) error { @@ -376,97 +93,6 @@ func (c *mysqlDB) UpdateDomains(ctx context.Context, domainIDs []*common.SHA256O return err } -// UpdateDomainCerts updates the domain_certs table. -func (c *mysqlDB) UpdateDomainCerts(ctx context.Context, - domainIDs, certIDs []*common.SHA256Output) error { - - if len(domainIDs) == 0 { - return nil - } - // Insert into domain_certs: - str := "INSERT IGNORE INTO domain_certs (domain_id,cert_id) VALUES " + - repeatStmt(len(certIDs), 2) - data := make([]interface{}, 2*len(certIDs)) - for i := range certIDs { - data[2*i] = domainIDs[i][:] - data[2*i+1] = certIDs[i][:] - } - _, err := c.db.ExecContext(ctx, str, data...) - - return err -} - -// UpdateDomainPolicies updates the domain_certs table. -func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, - domainIDs, policyIDs []*common.SHA256Output) error { - - if len(domainIDs) == 0 { - return nil - } - // Insert into domain_certs: - str := "INSERT IGNORE INTO domain_policies (domain_id,policy_id) VALUES " + - repeatStmt(len(policyIDs), 2) - data := make([]interface{}, 2*len(policyIDs)) - for i := range policyIDs { - data[2*i] = domainIDs[i][:] - data[2*i+1] = policyIDs[i][:] - } - _, err := c.db.ExecContext(ctx, str, data...) - - return err -} - -func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { - // Call the certificate coalescing stored procedure with these parameters. - str := "CALL calc_dirty_domains_certs(?,?)" - _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) - if err != nil { - return fmt.Errorf("coalescing certificates for domains: %w", err) - } - - // Call the policy coalescing stored procedure with these parameters. - str = "CALL calc_dirty_domains_policies(?,?)" - _, err = c.db.ExecContext(ctx, str, firstRow, lastRow) - if err != nil { - return fmt.Errorf("coalescing policies for domains: %w", err) - } - return nil -} - -// RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload itself, -// given the domain ID. -func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainID common.SHA256Output, -) (*common.SHA256Output, []byte, error) { - - str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" - var certIDsID, certIDs []byte - err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) - if err != nil && err != sql.ErrNoRows { - return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) - } - var IDptr *common.SHA256Output - if certIDsID != nil { - IDptr = (*common.SHA256Output)(certIDsID) - } - return IDptr, certIDs, nil -} - -func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, -) (*common.SHA256Output, []byte, error) { - - str := "SELECT policy_ids_id, policy_ids FROM domain_payloads WHERE domain_id = ?" - var policyIDsID, policyIDs []byte - err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&policyIDsID, &policyIDs) - if err != nil && err != sql.ErrNoRows { - return nil, nil, fmt.Errorf("RetrieveDomainPoliciesPayload | %w", err) - } - var IDptr *common.SHA256Output - if policyIDsID != nil { - IDptr = (*common.SHA256Output)(policyIDsID) - } - return IDptr, policyIDs, nil -} - // RetrieveDomainEntries: Retrieve a list of key-value pairs from domain entries table // No sql.ErrNoRows will be thrown, if some records does not exist. Check the length of result func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common.SHA256Output, @@ -504,37 +130,6 @@ func (c *mysqlDB) RetrieveDomainEntries(ctx context.Context, domainIDs []*common return pairs, nil } -// used for retrieving key value pair -func (c *mysqlDB) retrieveDomainEntriesOld(ctx context.Context, keys []*common.SHA256Output) ( - []*db.KeyValuePair, error) { - str := "SELECT `key`, `value` FROM domainEntries WHERE `key` IN " + repeatStmt(1, len(keys)) - args := make([]interface{}, len(keys)) - for i, k := range keys { - k := k // XXX(juagargi): create a copy - args[i] = k[:] // assign the slice covering the copy (the original k changes !!) - } - rows, err := c.db.QueryContext(ctx, str, args...) - if err != nil { - return nil, err - } - defer rows.Close() - var k, v []byte - domainEntries := make([]*db.KeyValuePair, 0, len(keys)) - for rows.Next() { - if err = rows.Scan(&k, &v); err != nil { - return nil, err - } - domainEntries = append(domainEntries, &db.KeyValuePair{ - Key: *(*common.SHA256Output)(k), - Value: v, - }) - } - if err := rows.Err(); err != nil { - return nil, err - } - return domainEntries, nil -} - // repeatStmt returns ( (?,..dimensions..,?), ...elemCount... ) // Use it like repeatStmt(1, len(IDs)) to obtain (?,?,...) func repeatStmt(elemCount int, dimensions int) string { diff --git a/pkg/db/mysql/policies.go b/pkg/db/mysql/policies.go new file mode 100644 index 00000000..598c2d7c --- /dev/null +++ b/pkg/db/mysql/policies.go @@ -0,0 +1,122 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + "strings" + "time" + + "github.com/netsec-ethz/fpki/pkg/common" +) + +// CheckPoliciesExist returns a slice of true/false values. Each value indicates if +// the corresponding certificate identified by its ID is already present in the DB. +func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ( + []bool, error) { + + if len(ids) == 0 { + // If empty, return empty. + return nil, nil + } + // Slice to be used in the SQL query: + data := make([]interface{}, len(ids)) + for i, id := range ids { + data[i] = id[:] + } + + // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. + elems := make([]string, len(data)) + for i := range elems { + elems[i] = "SELECT ? AS policy_id" + } + + // The query means: join two tables, one with the values I am passing as arguments (those + // are the ids) and the policies table, and for those that exist write a 1, otherwise a 0. + // Finally, group_concat all rows into just one field of type string. + str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + + "SELECT (CASE WHEN policies.policy_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + + strings.Join(elems, " UNION ALL ") + + ") AS request LEFT JOIN ( SELECT policy_id FROM policies ) AS policies ON " + + "policies.policy_id = request.policy_id) AS t" + + // Return slice of booleans: + present := make([]bool, len(ids)) + + var value string + if err := c.db.QueryRowContext(ctx, str, data...).Scan(&value); err != nil { + return nil, err + } + for i, c := range value { + if c == '1' { + present[i] = true + } + } + + return present, nil +} + +func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, + expirations []*time.Time, payloads [][]byte) error { + + if len(ids) == 0 { + return nil + } + // TODO(juagargi) set a prepared statement in constructor + // Because the primary key is the SHA256 of the payload, if there is a clash, it must + // be that the certificates are identical. Thus always REPLACE or INSERT IGNORE. + const N = 4 + str := "REPLACE INTO policies (policy_id, parent_id, expiration, payload) VALUES " + + repeatStmt(len(ids), N) + data := make([]interface{}, N*len(ids)) + for i := range ids { + data[i*N] = ids[i][:] + if parents[i] != nil { + data[i*N+1] = parents[i][:] + } + data[i*N+2] = expirations[i] + data[i*N+3] = payloads[i] + } + _, err := c.db.ExecContext(ctx, str, data...) + if err != nil { + return err + } + + return nil +} + +// UpdateDomainPolicies updates the domain_certs table. +func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, + domainIDs, policyIDs []*common.SHA256Output) error { + + if len(domainIDs) == 0 { + return nil + } + // Insert into domain_certs: + str := "INSERT IGNORE INTO domain_policies (domain_id,policy_id) VALUES " + + repeatStmt(len(policyIDs), 2) + data := make([]interface{}, 2*len(policyIDs)) + for i := range policyIDs { + data[2*i] = domainIDs[i][:] + data[2*i+1] = policyIDs[i][:] + } + _, err := c.db.ExecContext(ctx, str, data...) + + return err +} + +func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, +) (*common.SHA256Output, []byte, error) { + + str := "SELECT policy_ids_id, policy_ids FROM domain_payloads WHERE domain_id = ?" + var policyIDsID, policyIDs []byte + err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&policyIDsID, &policyIDs) + if err != nil && err != sql.ErrNoRows { + return nil, nil, fmt.Errorf("RetrieveDomainPoliciesPayload | %w", err) + } + var IDptr *common.SHA256Output + if policyIDsID != nil { + IDptr = (*common.SHA256Output)(policyIDsID) + } + return IDptr, policyIDs, nil +} diff --git a/pkg/db/mysql/read.go b/pkg/db/mysql/read.go deleted file mode 100644 index 2778a261..00000000 --- a/pkg/db/mysql/read.go +++ /dev/null @@ -1,152 +0,0 @@ -package mysql - -import ( - "context" - "database/sql" - "fmt" - - "github.com/netsec-ethz/fpki/pkg/common" -) - -// used during main thread and worker thread -type readKeyResult struct { - Keys []common.SHA256Output - Err error -} - -func (c *mysqlDB) RetrieveTreeNode(ctx context.Context, key common.SHA256Output) ([]byte, error) { - var value []byte - str := "SELECT value FROM tree WHERE key32 = ?" - err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&value) - if err != nil { - if err == sql.ErrNoRows { - return nil, nil - } - return nil, fmt.Errorf("error retrieving node from tree: %w", err) - } - return value, nil -} - -// RetrieveTreeNode retrieves one single key-value pair from tree table -// Return sql.ErrNoRows if no row is round -func (c *mysqlDB) RetrieveTreeNodeOLD(ctx context.Context, key common.SHA256Output) ([]byte, error) { - c.getProofLimiter <- struct{}{} - defer func() { <-c.getProofLimiter }() - - value, err := retrieveValue(ctx, c.prepGetValueTree, key) - if err != nil && err != sql.ErrNoRows { - return nil, fmt.Errorf("RetrieveTreeNode | %w", err) - } - return value, err -} - -// ******************************************************************** -// -// Read functions for updates table -// -// ******************************************************************** -// CountUpdatedDomains: Get number of entries in updates table -func (c *mysqlDB) CountUpdatedDomains(ctx context.Context) (int, error) { - var number int - err := c.db.QueryRow("SELECT COUNT(*) FROM updates").Scan(&number) - if err != nil { - return 0, fmt.Errorf("CountUpdatedDomains | Scan | %w", err) - } - return number, nil -} - -// RetrieveUpdatedDomains: Get updated domains name hashes from updates table. -func (c *mysqlDB) RetrieveUpdatedDomains(ctx context.Context, perQueryLimit int) ([]common.SHA256Output, error) { - count, err := c.CountUpdatedDomains(ctx) - if err != nil { - return nil, fmt.Errorf("RetrieveUpdatedDomains | %w", err) - } - - // calculate the number of workers - var numberOfWorker int - if count > perQueryLimit { - numberOfWorker = count/perQueryLimit + 1 - } else { - numberOfWorker = 1 - } - - var step int - if numberOfWorker == 1 { - step = count - } else { - // evenly distribute the workload - step = count / numberOfWorker - } - - resultChan := make(chan readKeyResult) - for r := 0; r < numberOfWorker-1; r++ { - go fetchKeyWorker(resultChan, r*step, r*step+step, ctx, c.db) - } - // let the final one do the rest of the work - go fetchKeyWorker(resultChan, (numberOfWorker-1)*step, count+1, ctx, c.db) - - finishedWorker := 0 - keys := make([]common.SHA256Output, 0, count) - - // get response - for numberOfWorker > finishedWorker { - newResult := <-resultChan - if newResult.Err != nil { - return nil, fmt.Errorf("RetrieveUpdatedDomains | %w", newResult.Err) - } - keys = append(keys, newResult.Keys...) - finishedWorker++ - } - - if count != len(keys) { - return nil, fmt.Errorf("RetrieveUpdatedDomains | incomplete fetching") - } - return keys, nil -} - -// UpdatedDomains returns the domain IDs that are still dirty, i.e. modified certificates for -// that domain, but not yet coalesced and ingested by the SMT. -func (c *mysqlDB) UpdatedDomains(ctx context.Context) ([]*common.SHA256Output, error) { - str := "SELECT domain_id FROM dirty" - rows, err := c.db.QueryContext(ctx, str) - if err != nil { - return nil, fmt.Errorf("error querying dirty domains: %w", err) - } - domainIDs := make([]*common.SHA256Output, 0) - for rows.Next() { - var domainId []byte - err = rows.Scan(&domainId) - if err != nil { - return nil, fmt.Errorf("error scanning domain ID: %w", err) - } - ptr := (*common.SHA256Output)(domainId) - domainIDs = append(domainIDs, ptr) - } - return domainIDs, nil -} - -func (c *mysqlDB) DirtyDomainsCount(ctx context.Context) (int, error) { - str := "SELECT COUNT(*) FROM dirty" - var count int - if err := c.db.QueryRowContext(ctx, str).Scan(&count); err != nil { - return 0, fmt.Errorf("querying number of dirty domains: %w", err) - } - return count, nil -} - -func (c *mysqlDB) CleanupDirty(ctx context.Context) error { - // Remove all entries from the dirty table. - str := "TRUNCATE dirty" - _, err := c.db.ExecContext(ctx, str) - if err != nil { - return fmt.Errorf("error truncating dirty table: %w", err) - } - return nil -} - -func retrieveValue(ctx context.Context, stmt *sql.Stmt, key common.SHA256Output) ([]byte, error) { - var value []byte - row := stmt.QueryRow(key[:]) - err := row.Scan(&value) - return value, err -} diff --git a/pkg/db/mysql/smt.go b/pkg/db/mysql/smt.go new file mode 100644 index 00000000..9cc230ec --- /dev/null +++ b/pkg/db/mysql/smt.go @@ -0,0 +1,81 @@ +package mysql + +import ( + "context" + "database/sql" + "fmt" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/db" +) + +func (c *mysqlDB) LoadRoot(ctx context.Context) (*common.SHA256Output, error) { + var key []byte + if err := c.db.QueryRowContext(ctx, "SELECT key32 FROM root").Scan(&key); err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, fmt.Errorf("error obtaining the root entry: %w", err) + } + return (*common.SHA256Output)(key), nil +} + +func (c *mysqlDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error { + str := "REPLACE INTO root (key32) VALUES (?)" + _, err := c.db.ExecContext(ctx, str, (*root)[:]) + if err != nil { + return fmt.Errorf("error inserting root ID: %w", err) + } + return nil +} + +func (c *mysqlDB) RetrieveTreeNode(ctx context.Context, key common.SHA256Output) ([]byte, error) { + var value []byte + str := "SELECT value FROM tree WHERE key32 = ?" + err := c.db.QueryRowContext(ctx, str, key[:]).Scan(&value) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, fmt.Errorf("error retrieving node from tree: %w", err) + } + return value, nil +} + +func (c *mysqlDB) DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) { + str := "DELETE FROM tree WHERE key32 IN " + repeatStmt(1, len(keys)) + params := make([]interface{}, len(keys)) + for i, k := range keys { + params[i] = k[:] + } + res, err := c.db.ExecContext(ctx, str, params...) + if err != nil { + return 0, fmt.Errorf("error deleting keys from tree: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) + } + return int(n), nil +} + +func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { + if len(keyValuePairs) == 0 { + return 0, nil + } + str := "REPLACE INTO tree (key32,value) VALUES " + repeatStmt(len(keyValuePairs), 2) + params := make([]interface{}, 2*len(keyValuePairs)) + for i, pair := range keyValuePairs { + params[i*2] = pair.Key[:] + params[i*2+1] = pair.Value + } + res, err := c.db.ExecContext(ctx, str, params...) + if err != nil { + return 0, fmt.Errorf("error inserting key-values into tree: %w", err) + } + n, err := res.RowsAffected() + if err != nil { + panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) + } + return int(n), nil +} diff --git a/pkg/db/mysql/write.go b/pkg/db/mysql/write.go deleted file mode 100644 index 6d2aac06..00000000 --- a/pkg/db/mysql/write.go +++ /dev/null @@ -1,368 +0,0 @@ -package mysql - -import ( - "context" - "database/sql" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - - "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" -) - -func (c *mysqlDB) UpdateDomainEntries(ctx context.Context, pairs []*db.KeyValuePair) (int, error) { - str := "REPLACE into domainEntries (`key`, `value`) values " + repeatStmt(len(pairs), 2) - params := make([]interface{}, len(pairs)*2) - for i, p := range pairs { - params[i*2] = p.Key[:] - params[i*2+1] = p.Value - } - _, err := c.db.ExecContext(ctx, str, params...) - if err != nil { - return 0, fmt.Errorf("UpdateDomainEntries | %w", err) - } - - return 666666666, nil -} - -// UpdateDomainEntries: Update a list of key-value store -func (c *mysqlDB) UpdateDomainEntriesOLD(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { - numOfUpdatedRecords, err := c.doUpdatePairs(ctx, keyValuePairs, c.getDomainEntriesUpdateStmts) - if err != nil { - return 0, fmt.Errorf("UpdateDomainEntries | %w", err) - } - return numOfUpdatedRecords, nil -} - -func (c *mysqlDB) DeleteTreeNodes(ctx context.Context, keys []common.SHA256Output) (int, error) { - str := "DELETE FROM tree WHERE key32 IN " + repeatStmt(1, len(keys)) - params := make([]interface{}, len(keys)) - for i, k := range keys { - params[i] = k[:] - } - res, err := c.db.ExecContext(ctx, str, params...) - if err != nil { - return 0, fmt.Errorf("error deleting keys from tree: %w", err) - } - n, err := res.RowsAffected() - if err != nil { - panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) - } - return int(n), nil -} - -// DeleteTreeNodes deletes a list of key-value stored in the tree table. -func (c *mysqlDB) DeleteTreeNodesOLD(ctx context.Context, keys []common.SHA256Output) (int, error) { - n, err := c.doUpdateKeys(ctx, keys, c.getTreeDeleteStmts) - if err != nil { - return 0, fmt.Errorf("DeleteTreeNodes | %w", err) - } - - return n, nil -} - -func (c *mysqlDB) UpdateTreeNodes(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { - if len(keyValuePairs) == 0 { - return 0, nil - } - str := "REPLACE INTO tree (key32,value) VALUES " + repeatStmt(len(keyValuePairs), 2) - params := make([]interface{}, 2*len(keyValuePairs)) - for i, pair := range keyValuePairs { - params[i*2] = pair.Key[:] - params[i*2+1] = pair.Value - } - res, err := c.db.ExecContext(ctx, str, params...) - if err != nil { - return 0, fmt.Errorf("error inserting key-values into tree: %w", err) - } - n, err := res.RowsAffected() - if err != nil { - panic(fmt.Errorf("unsupported retrieving number of rows affected: %w", err)) - } - return int(n), nil -} - -// UpdateTreeNodes: Update a list of key-value store -func (c *mysqlDB) UpdateTreeNodesOLD(ctx context.Context, keyValuePairs []*db.KeyValuePair) (int, error) { - numOfUpdatedPairs, err := c.doUpdatePairs(ctx, keyValuePairs, c.getTreeStructureUpdateStmts) - if err != nil { - return 0, fmt.Errorf("UpdateTreeNodes | %w", err) - } - return numOfUpdatedPairs, nil -} - -// AddUpdatedDomains inserts a list of keys into the updates table. -// If a key exists, ignores it. -func (c *mysqlDB) AddUpdatedDomains(ctx context.Context, keys []common.SHA256Output) (int, error) { - if len(keys) == 0 { - return 0, nil - } - n, err := c.doUpdateKeys(ctx, keys, c.getUpdatesInsertStmts) - if err != nil { - return 0, fmt.Errorf("AddUpdatedDomains | %w", err) - } - return n, nil -} - -// RemoveAllUpdatedDomains: truncate updates table -func (c *mysqlDB) RemoveAllUpdatedDomains(ctx context.Context) error { - _, err := c.db.Exec("TRUNCATE `fpki`.`updates`;") - if err != nil { - return fmt.Errorf("RemoveAllUpdatedDomains | TRUNCATE | %w", err) - } - return nil -} - -func (c *mysqlDB) SaveRoot(ctx context.Context, root *common.SHA256Output) error { - str := "REPLACE INTO root (key32) VALUES (?)" - _, err := c.db.ExecContext(ctx, str, (*root)[:]) - if err != nil { - return fmt.Errorf("error inserting root ID: %w", err) - } - return nil -} - -// ******************************************************************** -// -// Common -// -// ******************************************************************** -// worker to update key-value pairs -func (c *mysqlDB) doUpdatePairs(ctx context.Context, keyValuePairs []*db.KeyValuePair, - stmtGetter prepStmtGetter) (int, error) { - -func (HugeLeafError) Error() string { - return "Huge Leaf" -} - -// updateKeyValuesFcn -// stmtGen generates a new prepared statement, in case the size changes. -// parameters is reserved once with batchSize and passed along. -// keyValuePairs is the slice with all the key-values to insert, not only one batch. -// The function returns the number of rows affected, or error. -func (c *mysqlDB) updateKeyValuesFcn(tableName string, stmtGen prepStmtGetter, parameters []interface{}, - kvPairs []*KeyValuePair, stmt *sql.Stmt, first, last int) (int, error) { - - // Check if the size is too big for MySQL (max_allowed_packet must always be < 1G). - size := 0 - for j := first; j <= last; j++ { - size += len(kvPairs[j].Value) - } - if size > 1024*1024*1024 { - // This is too big to be sent to MySQL, it will receive a - // "Error 1105: Parameter of prepared statement which is set through - // mysql_send_long_data() is longer than 'max_allowed_packet' bytes" - // and fail. We need to split the data. - fmt.Printf("Detected one case of gigantism: data is %d Mb. Splitting in two.\n", - size/1024/1024) - if first == last { - panic(fmt.Errorf("cannot split: this is one gigantic entry. Size=%d bytes, key=%s", - size, hex.EncodeToString(kvPairs[first].Key[:]))) - } - last1 := (last-first+1)/2 + first - 1 - // The size has changed, generate a new prepared statement. - _, stmt := stmtGen(last1 - first + 1) - n, err1 := c.updateKeyValuesFcn(tableName, stmtGen, parameters, kvPairs, stmt, first, last1) - _, stmt = stmtGen(last - (last1 + 1) + 1) - n2, err2 := c.updateKeyValuesFcn(tableName, stmtGen, parameters, kvPairs, stmt, last1+1, last) - if err1 != nil { - err2 = err1 - } - return n2 + n, err2 - } - - data := parameters[:2*(last-first+1)] - for j := 0; j < len(data)/2; j++ { - data[2*j] = kvPairs[first+j].Key[:] - data[2*j+1] = kvPairs[first+j].Value - } - for { - result, err := stmt.Exec(data...) - if err != nil { - if myerr, ok := err.(*mysql.MySQLError); ok && myerr.Number == 1213 { // deadlock - // A deadlock was found, just cancel this operation and retry until success. - continue - } - return 0, fmt.Errorf("updateFcn | Exec | %w", err) - } - n, err := result.RowsAffected() - if err != nil { - return 0, fmt.Errorf("updateFcn | RowsAffected | %w", err) - } - return int(n), nil - } -} - -func (c *mysqlDB) insertKeyValuesViaLocalFile(tableName string, kvs []*KeyValuePair) error { - panicIfErr := func(err error) { - if err != nil { - panic(err) - } - } - // Create temp file to hold the CSV file in /tmp/ - f, err := ioutil.TempFile("", "hugeLeaf_*.dat") - panicIfErr(err) - // Always remove the file. - defer func() { - f.Close() - panicIfErr(os.Remove(f.Name())) - }() - - // writes a field enclosed by ", escaping both " and \ - writeField := func(data []byte) { - _, err := f.Write([]byte(`"`)) - panicIfErr(err) - // Ensure no " is part of the data, or escape it. - last := -1 - for i := 0; i < len(data); i++ { - switch data[i] { - case '"': - fallthrough - case '\\': - _, err = f.Write(data[last+1 : i]) - panicIfErr(err) - _, err = f.Write([]byte{'\\', data[i]}) - panicIfErr(err) - last = i - } - } - // Write the field - _, err = f.Write(data[last+1:]) - panicIfErr(err) - _, err = f.Write([]byte(`"`)) - panicIfErr(err) - } - - for _, kv := range kvs { - writeField(kv.Key[:]) // <----------- key - _, err = f.Write([]byte(",")) - panicIfErr(err) - writeField(kv.Value) // <------------- value - _, err = f.Write([]byte("\n")) - panicIfErr(err) - } - err = f.Chmod(0666) - panicIfErr(err) - err = f.Close() - panicIfErr(err) - - // Columns are `key` and `value`. - str := fmt.Sprintf( - "LOAD DATA INFILE '%s' REPLACE INTO TABLE %s "+ - ` FIELDS TERMINATED BY ',' ENCLOSED BY '"' `+"(`key`,`value`)", - f.Name(), tableName) - - for { - _, err = c.db.Exec(str) - if err != nil { - if myerr, ok := err.(*mysql.MySQLError); ok && myerr.Number == 1213 { // deadlock - // A deadlock was found, just cancel this operation and retry until success. - continue - } - panicIfErr(err) - } - break - } - return nil -} - -// ******************************************************************** -// -// Common -// -// ******************************************************************** -// worker to update key-value pairs -func (c *mysqlDB) doUpdatePairs(ctx context.Context, keyValuePairs []*KeyValuePair, - stmtGetter prepStmtGetter, tableName string) (int, error) { - - dataLen := len(keyValuePairs) - affectedRowsCount := 0 - - data := make([]interface{}, 2*batchSize) // 2 elements per record - updateWholeBatchStmt, updatePartialBatchStmt := stmtGetter(dataLen % batchSize) - updateAdapter := func(stmt *sql.Stmt, first, last int) (int, error) { - return c.updateKeyValuesFcn(tableName, stmtGetter, data, keyValuePairs, stmt, first, last) - } - - for i := 0; i < dataLen/batchSize; i++ { - n, err := updateAdapter(updateWholeBatchStmt, i*batchSize, (i+1)*batchSize-1) - if err != nil { - return 0, fmt.Errorf("doUpdatePairs | wholeBatch | %w", err) - } - affectedRowsCount += n - } - if dataLen%batchSize > 0 { - n, err := updateAdapter(updatePartialBatchStmt, dataLen/batchSize*batchSize, dataLen-1) - if err != nil { - return 0, fmt.Errorf("doUpdatePairs | partialBatch | %w", err) - } - affectedRowsCount += n - } - //defer updateWholeBatchStmt.Close() - //defer updatePartialBatchStmt.Close() - return affectedRowsCount, nil -} - -// worker to update keys -func (c *mysqlDB) doUpdateKeys(ctx context.Context, keys []common.SHA256Output, - stmtGetter prepStmtGetter) (int, error) { - - dataLen := len(keys) - affectedRowsCount := 0 - - if dataLen == 0 { - return 0, nil - } - - data := make([]interface{}, batchSize) - // updateFcn updates the DB using keys starting at index/batch, until the end of the - // batch or the end of keyValuePairs - updateFcn := func(stmt *sql.Stmt, index int) (int, error) { - for { - data := data[:min(batchSize, dataLen-batchSize*index)] - for j := 0; j < len(data); j++ { - data[j] = keys[index*batchSize+j][:] - } - result, err := stmt.Exec(data...) - if err != nil { - if myerr, ok := err.(*mysql.MySQLError); ok && myerr.Number == 1213 { // deadlock - // A deadlock was found, just cancel this operation and retry until success. - continue - } - return 0, fmt.Errorf("updateFcn | Exec | %w", err) - } - n, err := result.RowsAffected() - if err != nil { - return 0, fmt.Errorf("updateFcn | RowsAffected | %w", err) - } - return int(n), nil - } - } - - updateWholeBatchStmt, updatePartialBatchStmt := stmtGetter(dataLen % batchSize) - for i := 0; i < dataLen/batchSize; i++ { - n, err := updateFcn(updateWholeBatchStmt, i) - if err != nil { - return 0, fmt.Errorf("doUpdateKeys | wholeBatch | %w", err) - } - affectedRowsCount += n - } - if dataLen%batchSize > 0 { - n, err := updateFcn(updatePartialBatchStmt, dataLen/batchSize) - if err != nil { - return 0, fmt.Errorf("doUpdateKeys | partialBatch | %w", err) - } - affectedRowsCount += n - } - return affectedRowsCount, nil -} - -func min(a, b int) int { - if a < b { - return a - } - return b -} diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 3825830b..fa5f20ad 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -175,21 +175,6 @@ func (mapUpdater *MapUpdater) CommitSMTChanges(ctx context.Context) error { return nil } -// fetchUpdatedDomainHash: get hashes of updated domain from updates table, and truncate the table -func (mapUpdater *MapUpdater) fetchUpdatedDomainHash(ctx context.Context) ([]common.SHA256Output, error) { - keys, err := mapUpdater.dbConn.RetrieveUpdatedDomains(ctx, readBatchSize) - if err != nil { - return nil, fmt.Errorf("fetchUpdatedDomainHash | %w", err) - } - - err = mapUpdater.dbConn.RemoveAllUpdatedDomains(ctx) - if err != nil { - return nil, fmt.Errorf("fetchUpdatedDomainHash | %w", err) - } - - return keys, nil -} - // keyValuePairToSMTInput: key value pair -> SMT update input // deleteme: this function takes the payload and computes the hash of it. The hash is already // stored in the DB with the new design: change both the function RetrieveDomainEntries and @@ -397,7 +382,7 @@ func UpdateSMT(ctx context.Context, conn db.Conn, cacheHeight int) error { // smtTrie.CacheHeightLimit = cacheHeight // Get the dirty domains. - domains, err := conn.UpdatedDomains(ctx) + domains, err := conn.RetrieveDirtyDomains(ctx) if err != nil { return err } diff --git a/pkg/tests/testdb/helperfunctions.go b/pkg/tests/testdb/helperfunctions.go deleted file mode 100644 index df516f54..00000000 --- a/pkg/tests/testdb/helperfunctions.go +++ /dev/null @@ -1,60 +0,0 @@ -package testdb - -import ( - "context" - "fmt" - - "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/stretchr/testify/require" -) - -// TruncateAllTablesWithoutTestObject will truncate all tables in DB. This function should -// be used only while testing. -func TruncateAllTablesWithoutTestObject() { - t := &testingT{} - TruncateAllTablesForTest(t) -} - -// TruncateAllTablesForTest will truncate all tables in DB. This function should be used -// only in tests. -func TruncateAllTablesForTest(t require.TestingT) { - db, err := mysql.Connect(nil) - require.NoError(t, err) - - err = db.TruncateAllTables(context.Background()) - require.NoError(t, err) - - err = db.Close() - require.NoError(t, err) -} - -// GetDomainCountWithoutTestObject will get rows count of domain entries table -// be used only while testing. -func GetDomainCountWithoutTestObject() int { - t := &testingT{} - return getDomainNames(t) -} - -func getDomainNames(t require.TestingT) int { - db, err := mysql.Connect(nil) - require.NoError(t, err) - - var count int - err = db.DB().QueryRow("SELECT COUNT(*) FROM domainEntries;").Scan(&count) - require.NoError(t, err) - - err = db.Close() - require.NoError(t, err) - - return count -} - -type testingT struct{} - -func (t *testingT) Errorf(format string, args ...interface{}) { - str := fmt.Sprintf(format, args...) - panic(str) -} -func (t *testingT) FailNow() { - panic("") -} diff --git a/scripts/integration_tests.sh b/scripts/integration_tests.sh index 4fbd288a..5abbc533 100755 --- a/scripts/integration_tests.sh +++ b/scripts/integration_tests.sh @@ -12,7 +12,6 @@ PCA_EXIT_CPDE="succeed" POLICY_EXIT_CPDE="succeed" MAP_EXIT_CPDE="succeed" SMT_EXIT_CPDE="succeed" -DB_EXIT_CPDE="succeed" GRPC_EXIT_CPDE="succeed" RESULT="succeed" @@ -44,13 +43,6 @@ then SMT_EXIT_CPDE="failed" fi -echo "################# Running db test ###################" -./bin/test_db -if [ $? -ne 0 ] -then - DB_EXIT_CPDE="failed" -fi - echo "################# Running grpc test ###################" ./bin/test_grpc if [ $? -ne 0 ] @@ -63,11 +55,12 @@ echo " Test: domainowner_pca_policlog_interaction: ${PCA_EXIT_CPDE}" echo " Test: policylog_interaction: ${POLICY_EXIT_CPDE}" echo " Test: mapserver: ${MAP_EXIT_CPDE}" echo " Test: sparse merkle tree: ${SMT_EXIT_CPDE}" -echo " Test: db: ${DB_EXIT_CPDE}" echo " Test: grpc: ${GRPC_EXIT_CPDE}" echo " ********************************** Overall Result *************************************** " -if [ $PCA_EXIT_CPDE != "succeed" ] || [ $POLICY_EXIT_CPDE != "succeed" ] || [ $MAP_EXIT_CPDE != "succeed" ] || [ $SMT_EXIT_CPDE != "succeed" ] || [ $DB_EXIT_CPDE != "succeed" ] || [ $GRPC_EXIT_CPDE != "succeed" ] +if [ $PCA_EXIT_CPDE != "succeed" ] || [ $POLICY_EXIT_CPDE != "succeed" ] || + [ $MAP_EXIT_CPDE != "succeed" ] || [ $SMT_EXIT_CPDE != "succeed" ] || + [ $GRPC_EXIT_CPDE != "succeed" ] then RESULT="failed" fi diff --git a/tests/integration/db/db.go b/tests/integration/db/db.go deleted file mode 100644 index 7ad21ae1..00000000 --- a/tests/integration/db/db.go +++ /dev/null @@ -1,509 +0,0 @@ -package main - -import ( - "bytes" - "context" - "fmt" - "strconv" - "time" - - "database/sql" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" -) - -func main() { - clearTable() - - // test for tree table functions - testTreeTable() - - // test for domain entries table functions - testDomainEntriesTable() - - // test for updates table functions - testUpdateTable() - - clearTable() - fmt.Println("succeed") -} - -// test tree table -func testTreeTable() { - // ***************************************************************** - // open a db connection - // ***************************************************************** - conn, err := mysql.Connect(nil) - if err != nil { - panic(err) - } - - // ***************************************************************** - // insert into tree table - // ***************************************************************** - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - // insert key 1511 - 2012 - newKVPair := getKeyValuePair(1511, 2012, []byte("hi this is a test")) - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2013 - 2055 - newKVPair = getKeyValuePair(2013, 2055, []byte("hi this is a test")) - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2056 - 2155 - newKVPair = getKeyValuePair(2056, 2155, []byte("hi this is a test")) - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2056 - 4555 - newKVPair = getKeyValuePair(2056, 4555, []byte("hi this is a test")) - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - // ***************************************************************** - // check if value is correctly inserted - // ***************************************************************** - keys := getKeys(1511, 4555) - prevKeySize := len(keys) - result := []*db.KeyValuePair{} - - for _, key := range keys { - value, err := conn.RetrieveTreeNode(ctx, key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - // test of value if correctly stored and read - if !bytes.Equal(value, []byte("hi this is a test")) { - panic("Tree Table Read test 1: Stored value is not correct") - } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) - } - } - - if len(keys) != len(result) { - panic("Tree Table Read test 1: read result size error") - } - - // query a larger range - keys = getKeys(1011, 5555) - result = []*db.KeyValuePair{} - - for _, key := range keys { - value, err := conn.RetrieveTreeNode(ctx, key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - // test of value if correctly stored and read - if !bytes.Equal(value, []byte("hi this is a test")) { - panic("Tree Table Read test 2: Stored value is not correct") - } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) - } - } - - if prevKeySize != len(result) { - panic("Tree Table Read test 2: read result size error") - } - - // ***************************************************************** - // read empty keys - // ***************************************************************** - keys = getKeys(11511, 14555) - result = []*db.KeyValuePair{} - - for _, key := range keys { - value, err := conn.RetrieveTreeNode(ctx, key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - result = append(result, &db.KeyValuePair{Key: key, Value: value}) - } - } - - if len(result) != 0 { - panic("Tree Table Read test 3: read not inserted values") - } - - // ***************************************************************** - // update keys - // ***************************************************************** - newKVPair = getKeyValuePair(2056, 4555, []byte("new value")) - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - // ***************************************************************** - // read updated key-value pairs - // ***************************************************************** - keys = getKeys(2056, 4555) - result = []*db.KeyValuePair{} - - for _, key := range keys { - value, err := conn.RetrieveTreeNode(ctx, key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - // test of value if correctly stored and read - if !bytes.Equal(value, []byte("new value")) { - panic("Tree Table Read test 4: Stored value is not correct") - } - result = append(result, &db.KeyValuePair{Key: key, Value: value}) - } - } - - if len(keys) != len(result) { - panic("Tree Table Read test 4: read result size error") - } - - // ***************************************************************** - // delete keys - // ***************************************************************** - keys = getKeys(1000, 1200) - affectDomainsCount, err := conn.DeleteTreeNodes(ctx, keys) - if err != nil { - panic(err) - } - - if affectDomainsCount != 0 { - panic("Tree Table Read test 5: affected number error (1000-1200)") - } - - keys = getKeys(1511, 4222) - affectDomainsCount, err = conn.DeleteTreeNodes(ctx, keys) - if err != nil { - panic(err) - } - if affectDomainsCount != len(keys) { - panic("Tree Table Read test 5: affected number error (1511-4222)") - } - - keys = getKeys(4223, 4555) - affectDomainsCount, err = conn.DeleteTreeNodes(ctx, keys) - if err != nil { - panic(err) - } - if affectDomainsCount != len(keys) { - panic("Tree Table Read test 5: affected number error (4223-4555)") - } - - // ***************************************************************** - // read keys again - // ***************************************************************** - keys = getKeys(1011, 5555) - - for _, key := range keys { - value, err := conn.RetrieveTreeNode(ctx, key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - panic("Tree Table test 6: read deleted entry") - } - } - - // ***************************************************************** - // Test Close() - // ***************************************************************** - err = conn.Close() - if err != nil { - panic(err) - } -} - -// test tree table -func testDomainEntriesTable() { - // ***************************************************************** - // open a db connection - // ***************************************************************** - conn, err := mysql.Connect(nil) - if err != nil { - panic(err) - } - // ***************************************************************** - // insert into tree table - // ***************************************************************** - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - // insert key 1511 - 2012 - newKVPair := getKeyValuePair(1511, 2012, []byte("hi this is a test")) - _, err = conn.UpdateDomainEntries(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2013 - 2055 - newKVPair = getKeyValuePair(2013, 2055, []byte("hi this is a test")) - _, err = conn.UpdateDomainEntries(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2056 - 2155 - newKVPair = getKeyValuePair(2056, 2155, []byte("hi this is a test")) - _, err = conn.UpdateDomainEntries(ctx, newKVPair) - if err != nil { - panic(err) - } - - // insert key 2056 - 4555 - newKVPair = getKeyValuePair(2056, 4555, []byte("hi this is a test")) - _, err = conn.UpdateDomainEntries(ctx, newKVPair) - if err != nil { - panic(err) - } - - // ***************************************************************** - // check if value is correctly inserted - // RetrieveDomainCertificatesPayload() - // ***************************************************************** - keys := getKeyPtrs(1511, 4555) - prevKeySize := len(keys) - result := make([]*db.KeyValuePair, 0, len(keys)) - - for _, key := range keys { - _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - // test of value if correctly stored and read - if !bytes.Equal(value, []byte("hi this is a test")) { - panic("Domain entries Table Read test 1: Stored value is not correct") - } - result = append(result, &db.KeyValuePair{Key: *key, Value: value}) - } - } - - if len(keys) != len(result) { - panic("Domain entries Table Read test 1: read result size error") - } - - // query a larger range - keys = getKeyPtrs(1011, 5555) - result = make([]*db.KeyValuePair, 0, len(keys)) - - for _, key := range keys { - _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - // test of value if correctly stored and read - if !bytes.Equal(value, []byte("hi this is a test")) { - panic("Domain entries Table Read test 2: Stored value is not correct") - } - result = append(result, &db.KeyValuePair{Key: *key, Value: value}) - } - } - - if prevKeySize != len(result) { - panic("Domain entries Table Read test 2: read result size error") - } - - // ***************************************************************** - // check if value is correctly inserted - // RetrieveDomainEntries() - // ***************************************************************** - result, err = conn.RetrieveDomainEntries(ctx, keys) - if err != nil { - panic(err) - } - - if prevKeySize != len(result) { - panic("Domain entries Table Read test 3: read result size error") - } - - for _, entry := range result { - if !bytes.Equal(entry.Value, []byte("hi this is a test")) { - panic("Domain entries Table Read test 3: Stored value is not correct") - } - } - - // ***************************************************************** - // read empty keys - // ***************************************************************** - keys = getKeyPtrs(11511, 14555) - result = make([]*db.KeyValuePair, 0, len(keys)) - - for _, key := range keys { - _, value, err := conn.RetrieveDomainCertificatesPayload(ctx, *key) - if err != nil && err != sql.ErrNoRows { - panic(err) - } - if value != nil { - result = append(result, &db.KeyValuePair{Key: *key, Value: value}) - } - } - - if len(result) != 0 { - panic("Domain entries Table Read test 4: read not inserted values") - } - - // ***************************************************************** - // Test Close() - // ***************************************************************** - err = conn.Close() - if err != nil { - panic(err) - } -} - -// testUpdateTable: test if RetrieveTableRowsCount return correct number of entries. -func testUpdateTable() { - // ***************************************************************** - // open a db connection - // ***************************************************************** - conn, err := mysql.Connect(nil) - if err != nil { - panic(err) - } - - // ***************************************************************** - // add some records - // ***************************************************************** - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - totalRecordsNum := 0 - - keys := getKeys(100, 200) - _, err = conn.AddUpdatedDomains(ctx, keys) - if err != nil { - panic(err) - } - totalRecordsNum = totalRecordsNum + len(keys) - - keys = getKeys(333, 409) - _, err = conn.AddUpdatedDomains(ctx, keys) - if err != nil { - panic(err) - } - totalRecordsNum = totalRecordsNum + len(keys) - - // ***************************************************************** - // query updates - // ***************************************************************** - numOfUpdates, err := conn.CountUpdatedDomains(ctx) - if err != nil { - panic(err) - } - - if numOfUpdates != totalRecordsNum { - panic("Updates table test: missing some records") - } - - keys, err = conn.RetrieveUpdatedDomains(ctx, 1000) - if len(keys) != numOfUpdates { - panic("Updates table test: length not equal") - } - - // ***************************************************************** - // truncate tables - // ***************************************************************** - err = conn.RemoveAllUpdatedDomains(ctx) - if err != nil { - panic(err) - } - - // ***************************************************************** - // read records after truncation - // ***************************************************************** - numOfUpdates, err = conn.CountUpdatedDomains(ctx) - if err != nil { - panic(err) - } - if numOfUpdates != 0 { - panic("Updates table test: table not truncated") - } - - keys, err = conn.RetrieveUpdatedDomains(ctx, 1000) - if len(keys) != 0 { - panic("Updates table test: read values after truncation") - } - - // ***************************************************************** - // Test Close() - // ***************************************************************** - err = conn.Close() - if err != nil { - panic(err) - } -} - -func getKeyValuePair(startIdx, endIdx int, content []byte) []*db.KeyValuePair { - result := []*db.KeyValuePair{} - for i := startIdx; i <= endIdx; i++ { - keyHash := common.SHA256Hash([]byte(strconv.Itoa(i))) - keyHash32Bytes := [32]byte{} - copy(keyHash32Bytes[:], keyHash) - result = append(result, &db.KeyValuePair{Key: keyHash32Bytes, Value: content}) - } - return result -} - -func getKeys(startIdx, endIdx int) []common.SHA256Output { - result := []common.SHA256Output{} - for i := startIdx; i <= endIdx; i++ { - keyHash := common.SHA256Hash([]byte(strconv.Itoa(i))) - keyHash32Bytes := [32]byte{} - copy(keyHash32Bytes[:], keyHash) - result = append(result, keyHash32Bytes) - } - return result -} - -func getKeyPtrs(startIdx, endIdx int) []*common.SHA256Output { - result := []*common.SHA256Output{} - for i := startIdx; i <= endIdx; i++ { - keyHash := common.SHA256Hash32Bytes([]byte(strconv.Itoa(i))) - result = append(result, (*common.SHA256Output)(&keyHash)) - } - return result -} - -func clearTable() { - db, err := sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/fpki?maxAllowedPacket=1073741824") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE domainEntries;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE updates;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE tree;") - if err != nil { - panic(err) - } - - err = db.Close() - if err != nil { - panic(err) - } -} From e11a6dddfc26114f795385227533123886876d24 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 24 May 2023 17:13:39 +0200 Subject: [PATCH 134/187] Renamed some DB methods. --- pkg/db/db.go | 16 ++++++++-------- pkg/db/mysql/certs.go | 8 ++++---- pkg/db/mysql/dirty.go | 4 +++- pkg/db/mysql/mysql_test.go | 4 ++-- pkg/db/mysql/policies.go | 4 ++-- pkg/mapserver/responder/responder.go | 4 ++-- pkg/mapserver/updater/updater.go | 6 +++--- pkg/mapserver/updater/updater_test.go | 4 ++-- 8 files changed, 26 insertions(+), 24 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 502239b4..14eeef8e 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -34,12 +34,12 @@ type dirty interface { // present in the `updates` table. RetrieveDirtyDomains(ctx context.Context) ([]*common.SHA256Output, error) - // ReplaceDirtyDomainPayloads retrieves dirty domains from the dirty list, starting + // RecomputeDirtyDomainsCertAndPolicyIDs retrieves dirty domains from the dirty list, starting // at firstRow and finishing at lastRow (for a total of lastRow - firstRow + 1 domains), // computes the aggregated payload for their certificates and policies, and stores it in the DB. // The aggregated payload takes into account all policies and certificates needed for that // domain, including e.g. the trust chain. - ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error + RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context, firstRow, lastRow int) error //DirtyDomainsCount returns the number of domains that are still to be updated. DirtyDomainsCount(ctx context.Context) (int, error) @@ -53,15 +53,15 @@ type certs interface { // the corresponding certificate identified by its ID is already present in the DB. CheckCertsExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) - InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, + UpdateCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error // UpdateDomainCerts updates the domain_certs table with new entries. UpdateDomainCerts(ctx context.Context, domainIDs, certIDs []*common.SHA256Output) error - // RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload + // RetrieveDomainCertificatesIDs retrieves the domain's certificate payload ID and the payload // itself, given the domain ID. - RetrieveDomainCertificatesPayload(ctx context.Context, id common.SHA256Output) ( + RetrieveDomainCertificatesIDs(ctx context.Context, id common.SHA256Output) ( certIDsID *common.SHA256Output, certIDs []byte, err error) } @@ -70,15 +70,15 @@ type policies interface { // the corresponding policy identified by its ID is already present in the DB. CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Output) ([]bool, error) - InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, + UpdatePolicies(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error // UpdateDomainPolicies updates the domain_policies table with new entries. UpdateDomainPolicies(ctx context.Context, domainIDs, policyIDs []*common.SHA256Output) error - // RetrieveDomainPoliciesPayload returns the policy related payload for a given domain. + // RetrieveDomainPoliciesIDs returns the policy related payload for a given domain. // This includes the RPCs, SPs, etc. - RetrieveDomainPoliciesPayload(ctx context.Context, id common.SHA256Output) ( + RetrieveDomainPoliciesIDs(ctx context.Context, id common.SHA256Output) ( payloadID *common.SHA256Output, payload []byte, err error) } diff --git a/pkg/db/mysql/certs.go b/pkg/db/mysql/certs.go index 94abc4f7..cc1c6184 100644 --- a/pkg/db/mysql/certs.go +++ b/pkg/db/mysql/certs.go @@ -36,7 +36,7 @@ func (c *mysqlDB) CheckCertsExist(ctx context.Context, ids []*common.SHA256Outpu return presence, err } -func (c *mysqlDB) InsertCerts(ctx context.Context, ids, parents []*common.SHA256Output, +func (c *mysqlDB) UpdateCerts(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { if len(ids) == 0 { @@ -85,16 +85,16 @@ func (c *mysqlDB) UpdateDomainCerts(ctx context.Context, return err } -// RetrieveDomainCertificatesPayload retrieves the domain's certificate payload ID and the payload itself, +// RetrieveDomainCertificatesIDs retrieves the domain's certificate payload ID and the payload itself, // given the domain ID. -func (c *mysqlDB) RetrieveDomainCertificatesPayload(ctx context.Context, domainID common.SHA256Output, +func (c *mysqlDB) RetrieveDomainCertificatesIDs(ctx context.Context, domainID common.SHA256Output, ) (*common.SHA256Output, []byte, error) { str := "SELECT cert_ids_id, cert_ids FROM domain_payloads WHERE domain_id = ?" var certIDsID, certIDs []byte err := c.db.QueryRowContext(ctx, str, domainID[:]).Scan(&certIDsID, &certIDs) if err != nil && err != sql.ErrNoRows { - return nil, nil, fmt.Errorf("RetrieveDomainCertificatesPayload | %w", err) + return nil, nil, fmt.Errorf("RetrieveDomainCertificatesIDs | %w", err) } var IDptr *common.SHA256Output if certIDsID != nil { diff --git a/pkg/db/mysql/dirty.go b/pkg/db/mysql/dirty.go index d7abdb91..ed06dc79 100644 --- a/pkg/db/mysql/dirty.go +++ b/pkg/db/mysql/dirty.go @@ -47,7 +47,9 @@ func (c *mysqlDB) CleanupDirty(ctx context.Context) error { return nil } -func (c *mysqlDB) ReplaceDirtyDomainPayloads(ctx context.Context, firstRow, lastRow int) error { +func (c *mysqlDB) RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context, + firstRow, lastRow int) error { + // Call the certificate coalescing stored procedure with these parameters. str := "CALL calc_dirty_domains_certs(?,?)" _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index ebf061d6..7117b61b 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -118,7 +118,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { // Check the certificate coalescing: under leaf there must be 4 IDs, for the certs. for i, leaf := range leafCerts { domainID := common.SHA256Hash32Bytes([]byte(leaf)) - gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) + gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesIDs(ctx, domainID) require.NoError(t, err) expectedSize := common.SHA256Size * len(certs) / len(leafCerts) require.Len(t, gotCertIDs, expectedSize, "bad length, should be %d but it's %d", @@ -138,7 +138,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { } for name, policies := range policiesPerName { id := common.SHA256Hash32Bytes([]byte(name)) - gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesPayload(ctx, id) + gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesIDs(ctx, id) require.NoError(t, err) // For each sequence of policies, compute the ID of their JSON. polIDs := computeIDsOfPolicies(policies) diff --git a/pkg/db/mysql/policies.go b/pkg/db/mysql/policies.go index 598c2d7c..7c58d423 100644 --- a/pkg/db/mysql/policies.go +++ b/pkg/db/mysql/policies.go @@ -56,7 +56,7 @@ func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Ou return present, nil } -func (c *mysqlDB) InsertPolicies(ctx context.Context, ids, parents []*common.SHA256Output, +func (c *mysqlDB) UpdatePolicies(ctx context.Context, ids, parents []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { if len(ids) == 0 { @@ -105,7 +105,7 @@ func (c *mysqlDB) UpdateDomainPolicies(ctx context.Context, return err } -func (c *mysqlDB) RetrieveDomainPoliciesPayload(ctx context.Context, domainID common.SHA256Output, +func (c *mysqlDB) RetrieveDomainPoliciesIDs(ctx context.Context, domainID common.SHA256Output, ) (*common.SHA256Output, []byte, error) { str := "SELECT policy_ids_id, policy_ids FROM domain_payloads WHERE domain_id = ?" diff --git a/pkg/mapserver/responder/responder.go b/pkg/mapserver/responder/responder.go index c3041549..433eb3be 100644 --- a/pkg/mapserver/responder/responder.go +++ b/pkg/mapserver/responder/responder.go @@ -70,12 +70,12 @@ func (r *MapResponder) GetProof(ctx context.Context, domainName string, if isPoP { proofType = mapCommon.PoP de.CertIDsID, de.CertIDs, err = - r.conn.RetrieveDomainCertificatesPayload(ctx, domainPartID) + r.conn.RetrieveDomainCertificatesIDs(ctx, domainPartID) if err != nil { return nil, fmt.Errorf("error obtaining x509 payload for %s: %w", domainPart, err) } de.PolicyIDsID, de.PolicyIDs, err = - r.conn.RetrieveDomainPoliciesPayload(ctx, domainPartID) + r.conn.RetrieveDomainPoliciesIDs(ctx, domainPartID) if err != nil { return nil, fmt.Errorf("error obtaining policies payload for %s: %w", domainPart, err) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index fa5f20ad..89ddf03a 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -326,7 +326,7 @@ func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { return err } // Do all updates at once, in one thread/connection (faster than multiple routines). - if err := conn.ReplaceDirtyDomainPayloads(ctx, 0, dirtyCount-1); err != nil { + if err := conn.RecomputeDirtyDomainsCertAndPolicyIDs(ctx, 0, dirtyCount-1); err != nil { return fmt.Errorf("coalescing payloads of dirty domains: %w", err) } return nil @@ -404,7 +404,7 @@ func insertCerts(ctx context.Context, conn db.Conn, names [][]string, ids, parentIDs []*common.SHA256Output, expirations []*time.Time, payloads [][]byte) error { // Send hash, parent hash, expiration and payload to the certs table. - if err := conn.InsertCerts(ctx, ids, parentIDs, expirations, payloads); err != nil { + if err := conn.UpdateCerts(ctx, ids, parentIDs, expirations, payloads); err != nil { return fmt.Errorf("inserting certificates: %w", err) } @@ -459,7 +459,7 @@ func insertPolicies(ctx context.Context, conn db.Conn, names []string, ids []*co expirations[i] = &t } // Update policies: - if err := conn.InsertPolicies(ctx, ids, parents, expirations, payloads); err != nil { + if err := conn.UpdatePolicies(ctx, ids, parents, expirations, payloads); err != nil { return fmt.Errorf("inserting policies: %w", err) } diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 8a1000eb..6c8edd62 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -80,7 +80,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { for i, leaf := range leafCerts { domainID := common.SHA256Hash32Bytes([]byte(leaf)) // t.Logf("%s: %s", leaf, hex.EncodeToString(domainID[:])) - gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesPayload(ctx, domainID) + gotCertIDsID, gotCertIDs, err := conn.RetrieveDomainCertificatesIDs(ctx, domainID) require.NoError(t, err) expectedSize := common.SHA256Size * len(certs) / len(leafCerts) require.Len(t, gotCertIDs, expectedSize, "bad length, should be %d but it's %d", @@ -101,7 +101,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { } for name, policies := range policiesPerName { id := common.SHA256Hash32Bytes([]byte(name)) - gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesPayload(ctx, id) + gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesIDs(ctx, id) require.NoError(t, err) // For each sequence of policies, compute the ID of their JSON. polIDs := computeIDsOfPolicies(policies) From d28fc316cc832ea95882e519d27ddf83fa01fe79 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 31 May 2023 11:30:30 +0200 Subject: [PATCH 135/187] Fix test. --- pkg/common/hasher.go | 1 + pkg/db/mysql/mysql_test.go | 34 ++++++++++++--------------- pkg/mapserver/updater/updater_test.go | 12 +++++++--- 3 files changed, 25 insertions(+), 22 deletions(-) diff --git a/pkg/common/hasher.go b/pkg/common/hasher.go index 885b9009..61bd7a83 100644 --- a/pkg/common/hasher.go +++ b/pkg/common/hasher.go @@ -49,6 +49,7 @@ func IDsToBytes(IDs []*SHA256Output) []byte { // SortIDsAndGlue takes a sequence of IDs, sorts them alphabetically, and glues every byte of // them together. +// The IDs are expected to be unique. func SortIDsAndGlue(IDs []*SHA256Output) []byte { // Copy slice to avoid mutating of the original. ids := append(IDs[:0:0], IDs...) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 7117b61b..202265d6 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -4,7 +4,6 @@ import ( "context" "encoding/hex" "math/rand" - "os" "testing" "time" @@ -84,6 +83,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { require.NoError(t, err) defer conn.Close() + // Prepare two mock leaf certificates, with their trust chains. leafCerts := []string{ "leaf.certs.com", "example.certs.com", @@ -100,11 +100,7 @@ func TestCoalesceForDirtyDomains(t *testing.T) { certNames = append(certNames, certNames2...) } - // Ingest two mock policies. - data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") - require.NoError(t, err) - pols, err := util.LoadPoliciesFromRaw(data) - require.NoError(t, err) + pols := random.BuildTestRandomPolicyHierarchy(t, "domain_with_policies.com") // Update with certificates and policies. err = updater.UpdateWithKeepExisting(ctx, conn, certNames, certIDs, parentCertIDs, @@ -126,7 +122,8 @@ func TestCoalesceForDirtyDomains(t *testing.T) { // From the certificate IDs, grab the IDs corresponding to this leaf: N := len(certIDs) / len(leafCerts) // IDs per leaf = total / leaf_count expectedCertIDs, expectedCertIDsID := glueSortedIDsAndComputeItsID(certIDs[i*N : (i+1)*N]) - t.Logf("expectedCertIDs: %s\n", hex.EncodeToString(expectedCertIDs)) + t.Logf("Certificate IDs for domain name \"%s\":\nexpected: %s\ngot: %s", + leaf, hex.EncodeToString(expectedCertIDs), hex.EncodeToString(gotCertIDs)) require.Equal(t, expectedCertIDs, gotCertIDs) require.Equal(t, expectedCertIDsID, gotCertIDsID) } @@ -143,7 +140,8 @@ func TestCoalesceForDirtyDomains(t *testing.T) { // For each sequence of policies, compute the ID of their JSON. polIDs := computeIDsOfPolicies(policies) expectedPolIDs, expectedPolIDsID := glueSortedIDsAndComputeItsID(polIDs) - t.Logf("expectedPolIDs: %s\n", hex.EncodeToString(expectedPolIDs)) + t.Logf("Policy IDs for domain name \"%s\":\nexpected: %s\ngot: %s", + name, hex.EncodeToString(expectedPolIDs), hex.EncodeToString(gotPolIDs)) require.Equal(t, expectedPolIDs, gotPolIDs) require.Equal(t, expectedPolIDsID, gotPolIDsID) } @@ -157,18 +155,16 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S } func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { - IDs := make([]*common.SHA256Output, len(policies)) - for i, pol := range policies { + set := make(map[common.SHA256Output]struct{}, len(policies)) + for _, pol := range policies { id := common.SHA256Hash32Bytes(pol.Raw()) - IDs[i] = &id + set[id] = struct{}{} } - return IDs -} -func randomBytes(t require.TestingT, size int) []byte { - buff := make([]byte, size) - n, err := rand.Read(buff) - require.NoError(t, err) - require.Equal(t, size, n) - return buff + IDs := make([]*common.SHA256Output, 0, len(set)) + for k := range set { + k := k + IDs = append(IDs, &k) + } + return IDs } diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 6c8edd62..5e02aa0d 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -162,10 +162,16 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S } func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { - IDs := make([]*common.SHA256Output, len(policies)) - for i, pol := range policies { + set := make(map[common.SHA256Output]struct{}, len(policies)) + for _, pol := range policies { id := common.SHA256Hash32Bytes(pol.Raw()) - IDs[i] = &id + set[id] = struct{}{} + } + + IDs := make([]*common.SHA256Output, 0, len(set)) + for k := range set { + k := k + IDs = append(IDs, &k) } return IDs } From f025e72f979ecb81755ad0dfc828940390d49934 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 1 Jun 2023 19:12:39 +0200 Subject: [PATCH 136/187] Add experimental collate function. But seems to be way too slow. --- pkg/db/mysql/dirty.go | 17 +++------ tools/create_schema.sh | 80 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+), 12 deletions(-) diff --git a/pkg/db/mysql/dirty.go b/pkg/db/mysql/dirty.go index ed06dc79..f6744c14 100644 --- a/pkg/db/mysql/dirty.go +++ b/pkg/db/mysql/dirty.go @@ -48,20 +48,13 @@ func (c *mysqlDB) CleanupDirty(ctx context.Context) error { } func (c *mysqlDB) RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context, - firstRow, lastRow int) error { + firstRow, lastRow int) error { // deleteme no need for parameters - // Call the certificate coalescing stored procedure with these parameters. - str := "CALL calc_dirty_domains_certs(?,?)" - _, err := c.db.ExecContext(ctx, str, firstRow, lastRow) - if err != nil { - return fmt.Errorf("coalescing certificates for domains: %w", err) - } - - // Call the policy coalescing stored procedure with these parameters. - str = "CALL calc_dirty_domains_policies(?,?)" - _, err = c.db.ExecContext(ctx, str, firstRow, lastRow) + // Call the coalescing stored procedure without parameters. + str := "CALL calc_dirty_domains()" + _, err := c.db.ExecContext(ctx, str) if err != nil { - return fmt.Errorf("coalescing policies for domains: %w", err) + return fmt.Errorf("coalescing for domains: %w", err) } return nil } diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 5a9afc1d..b4a168fc 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -406,6 +406,86 @@ BEGIN ) AS hasher_query; END$$ DELIMITER ; +EOF + ) + echo "$CMD" | $MYSQLCMD + + CMD=$(cat < Date: Fri, 2 Jun 2023 11:12:40 +0200 Subject: [PATCH 137/187] Change collate function with FULL OUTER JOIN. The new calc_dirty_domains uses a FULL OUTER JOIN instead of a procedural approach. This stored procedure seems performant enough now. --- tools/create_schema.sh | 159 +++++++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 63 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index b4a168fc..262e4743 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -414,76 +414,109 @@ EOF USE $DBNAME; DROP PROCEDURE IF EXISTS calc_dirty_domains; DELIMITER $$ --- firstRow and lastRow are parameters specifying which is the first row of dirty, --- and the last one for which it will update the policies. --- The SP needs ~ 5 seconds per 20K dirty domains. +-- Because MySQL doesn't support FULL OUTER JOIN, we have to emulate it with: +-- SELECT * FROM t1 +-- LEFT JOIN t2 ON t1.id = t2.id +-- UNION +-- SELECT * FROM t1 +-- RIGHT JOIN t2 ON t1.id = t2.id +-- https://stackoverflow.com/questions/4796872/how-can-i-do-a-full-outer-join-in-mysql +-- +-- The table t1 is a CTE that retrieves the certificates. +-- The table t2 is a CTE that retrieves the policies. +-- This SP needs ~ 5 seconds per 20K dirty domains. CREATE PROCEDURE calc_dirty_domains() BEGIN - DECLARE var_domain_id VARBINARY(32); - DECLARE dirty_done BOOLEAN DEFAULT FALSE; - DECLARE cur_dirty CURSOR FOR SELECT domain_id FROM dirty; - DECLARE CONTINUE HANDLER FOR NOT FOUND SET dirty_done := TRUE; SET group_concat_max_len = 1073741824; -- so that GROUP_CONCAT doesn't truncate results + -- Replace the domain ID, its certificates, policies, and their corresponding SHA256 for all dirty domains. + REPLACE INTO domain_payloads(domain_id, cert_ids, cert_ids_id, policy_ids, policy_ids_id) -- Values from subquery. + SELECT domain_id, cert_ids, UNHEX(SHA2(cert_ids, 256)) AS cert_ids_id, policy_ids, UNHEX(SHA2(policy_ids, 256)) AS policy_ids_id FROM -- Subquery to compute the SHA256 in place. + ( + + SELECT A.domain_id,GROUP_CONCAT(cert_id ORDER BY cert_id SEPARATOR '') AS cert_ids,GROUP_CONCAT(policy_id ORDER BY policy_id SEPARATOR '') AS policy_ids FROM + ( + WITH RECURSIVE cte AS ( + -- Base case: specify which leaf certs we choose: those that + -- have a link with a domain that is part of the dirty domains. + SELECT dirty.domain_id, certs.cert_id, parent_id + FROM certs + INNER JOIN domain_certs ON certs.cert_id = domain_certs.cert_id + INNER JOIN dirty ON domain_certs.domain_id = dirty.domain_id + UNION ALL + -- Recursive case: any certificate that has its ID as + -- parent ID of the previous set, recursively. + SELECT cte.domain_id, certs.cert_id, certs.parent_id + FROM certs + JOIN cte ON certs.cert_id = cte.parent_id + ) + SELECT DISTINCT domain_id, cert_id FROM cte + ) AS A + LEFT OUTER JOIN + ( + WITH RECURSIVE cte AS ( + -- Base case: specify which leaf policies we choose: those that + -- have a link with a domain that is part of the dirty domains. + SELECT dirty.domain_id, policies.policy_id, parent_id + FROM policies + INNER JOIN domain_policies ON policies.policy_id = domain_policies.policy_id + INNER JOIN dirty ON domain_policies.domain_id = dirty.domain_id + UNION ALL + -- Recursive case: any poilicy that has its ID as + -- parent ID of the previous set, recursively. + SELECT cte.domain_id, policies.policy_id, policies.parent_id + FROM policies + JOIN cte ON policies.policy_id = cte.parent_id + ) + SELECT DISTINCT domain_id, policy_id FROM cte + ) AS B + ON A.domain_id = B.domain_id + GROUP BY domain_id + + UNION + + SELECT B.domain_id,GROUP_CONCAT(cert_id ORDER BY cert_id SEPARATOR '') AS cert_ids,GROUP_CONCAT(policy_id ORDER BY policy_id SEPARATOR '') AS policy_ids FROM + ( + WITH RECURSIVE cte AS ( + -- Base case: specify which leaf certs we choose: those that + -- have a link with a domain that is part of the dirty domains. + SELECT dirty.domain_id, certs.cert_id, parent_id + FROM certs + INNER JOIN domain_certs ON certs.cert_id = domain_certs.cert_id + INNER JOIN dirty ON domain_certs.domain_id = dirty.domain_id + UNION ALL + -- Recursive case: any certificate that has its ID as + -- parent ID of the previous set, recursively. + SELECT cte.domain_id, certs.cert_id, certs.parent_id + FROM certs + JOIN cte ON certs.cert_id = cte.parent_id + ) + SELECT DISTINCT domain_id, cert_id FROM cte + ) AS A + RIGHT OUTER JOIN + ( + WITH RECURSIVE cte AS ( + -- Base case: specify which leaf policies we choose: those that + -- have a link with a domain that is part of the dirty domains. + SELECT dirty.domain_id, policies.policy_id, parent_id + FROM policies + INNER JOIN domain_policies ON policies.policy_id = domain_policies.policy_id + INNER JOIN dirty ON domain_policies.domain_id = dirty.domain_id + UNION ALL + -- Recursive case: any poilicy that has its ID as + -- parent ID of the previous set, recursively. + SELECT cte.domain_id, policies.policy_id, policies.parent_id + FROM policies + JOIN cte ON policies.policy_id = cte.parent_id + ) + SELECT DISTINCT domain_id, policy_id FROM cte + ) AS B + ON A.domain_id = B.domain_id + GROUP BY domain_id - SET @var_cert_ids = ''; - SET @var_policy_ids = ''; - OPEN cur_dirty; - dirty_loop: WHILE dirty_done = FALSE DO - FETCH cur_dirty INTO var_domain_id; - IF dirty_done THEN - LEAVE dirty_loop; - END IF; - SELECT HEX(var_domain_id); -- deleteme - - -- Select the concatenation of all cert IDs (sorted) of a domain. - SELECT GROUP_CONCAT(cert_id ORDER BY cert_id SEPARATOR '') AS cert_ids FROM( - -- The CTE lists all certs that are reachable by the domain_id - WITH RECURSIVE cte AS ( - -- Base case: specify which leaf certs we choose: those that - -- have a link with a domain that is part of the dirty domains. - SELECT domain_certs.domain_id, certs.cert_id, parent_id - FROM certs - INNER JOIN domain_certs ON certs.cert_id = domain_certs.cert_id - WHERE domain_certs.domain_id = var_domain_id - UNION ALL - -- Recursive case: any poilicy that has its ID as - -- parent ID of the previous set, recursively. - SELECT cte.domain_id, certs.cert_id, certs.parent_id - FROM certs - JOIN cte ON certs.cert_id = cte.parent_id - ) - SELECT DISTINCT domain_id, cert_id FROM cte - ) AS collate_cert_ids_query - INTO @var_cert_ids; - SELECT HEX(@var_cert_ids); -- deleteme - - -- Select the concatenation of all policy IDs (sorted) of a domain. - SELECT GROUP_CONCAT(policy_id ORDER BY policy_id SEPARATOR '') AS policy_ids FROM( - -- The CTE lists all policies that are reachable by the domain_id - WITH RECURSIVE cte AS ( - -- Base case: specify which leaf policies we choose: those that - -- have a link with a domain that is part of the dirty domains. - SELECT dirty.domain_id, policies.policy_id, parent_id - FROM policies - INNER JOIN domain_policies ON policies.policy_id = domain_policies.policy_id - INNER JOIN dirty ON domain_policies.domain_id = dirty.domain_id - WHERE dirty.domain_id = var_domain_id - UNION ALL - -- Recursive case: any poilicy that has its ID as - -- parent ID of the previous set, recursively. - SELECT cte.domain_id, policies.policy_id, policies.parent_id - FROM policies - JOIN cte ON policies.policy_id = cte.parent_id - ) - SELECT DISTINCT domain_id, policy_id FROM cte - ) AS collate_policy_ids_query - INTO @var_policy_ids; + ) AS hasher_query; - REPLACE INTO domain_payloads(domain_id, cert_ids, cert_ids_id, policy_ids, policy_ids_id) - VALUES(var_domain_id, @var_cert_ids, UNHEX(SHA2(@var_cert_ids, 256)), @var_policy_ids, UNHEX(SHA2(@var_policy_ids, 256))); - END WHILE; END$$ DELIMITER ; EOF From 1ae5fcbf7665888062710674aefb2726a2e9b375 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 11:20:06 +0200 Subject: [PATCH 138/187] Payload functions in DB. --- pkg/db/db.go | 8 ++ pkg/db/mysql/certs.go | 34 ++++++ pkg/db/mysql/mysql_test.go | 133 ++++++++++++++++++---- pkg/db/mysql/policies.go | 35 ++++++ pkg/mapserver/responder/responder_test.go | 2 +- pkg/mapserver/updater/updater.go | 2 +- 6 files changed, 189 insertions(+), 25 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 14eeef8e..1b235e0b 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -63,6 +63,10 @@ type certs interface { // itself, given the domain ID. RetrieveDomainCertificatesIDs(ctx context.Context, id common.SHA256Output) ( certIDsID *common.SHA256Output, certIDs []byte, err error) + + // RetrieveCertificatePayloads returns the payload for each of the certificates identified + // by the passed ID. + RetrieveCertificatePayloads(ctx context.Context, IDs []*common.SHA256Output) ([][]byte, error) } type policies interface { @@ -80,6 +84,10 @@ type policies interface { // This includes the RPCs, SPs, etc. RetrieveDomainPoliciesIDs(ctx context.Context, id common.SHA256Output) ( payloadID *common.SHA256Output, payload []byte, err error) + + // RetrievePolicyPayloads returns the payload for each of the policies identified + // by the passed ID. + RetrievePolicyPayloads(ctx context.Context, IDs []*common.SHA256Output) ([][]byte, error) } type Conn interface { diff --git a/pkg/db/mysql/certs.go b/pkg/db/mysql/certs.go index cc1c6184..2848e1e1 100644 --- a/pkg/db/mysql/certs.go +++ b/pkg/db/mysql/certs.go @@ -103,6 +103,40 @@ func (c *mysqlDB) RetrieveDomainCertificatesIDs(ctx context.Context, domainID co return IDptr, certIDs, nil } +// RetrieveCertificatePayloads returns the payload for each certificate identified by the IDs +// parameter, in the same order (element i corresponds to IDs[i]). +func (c *mysqlDB) RetrieveCertificatePayloads(ctx context.Context, IDs []*common.SHA256Output, +) ([][]byte, error) { + + str := "SELECT cert_id,payload from certs WHERE cert_id IN " + repeatStmt(1, len(IDs)) + params := make([]any, len(IDs)) + for i, id := range IDs { + params[i] = id[:] + } + rows, err := c.db.QueryContext(ctx, str, params...) + if err != nil { + return nil, err + } + + m := make(map[common.SHA256Output][]byte, len(IDs)) + for rows.Next() { + var id, payload []byte + if err := rows.Scan(&id, &payload); err != nil { + return nil, err + } + idArray := (*common.SHA256Output)(id) + m[*idArray] = payload + } + + // Sort them in the same order as the IDs. + payloads := make([][]byte, len(IDs)) + for i, id := range IDs { + payloads[i] = m[*id] + } + + return payloads, nil +} + // checkCertsExist should not be called with larger than ~1000 elements, the query being used // may fail with a message like: // Error 1436 (HY000): Thread stack overrun: 1028624 bytes used of a 1048576 byte stack, diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 202265d6..7d6cc09c 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -14,6 +14,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db/mysql" "github.com/netsec-ethz/fpki/pkg/mapserver/updater" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/tests/testdb" "github.com/netsec-ethz/fpki/pkg/util" @@ -88,19 +89,13 @@ func TestCoalesceForDirtyDomains(t *testing.T) { "leaf.certs.com", "example.certs.com", } - var certs []*ctx509.Certificate - var certIDs, parentCertIDs []*common.SHA256Output - var certNames [][]string - for _, leaf := range leafCerts { - // Create two mock x509 chains on top of leaf: - certs2, certIDs2, parentCertIDs2, certNames2 := random.BuildTestRandomCertHierarchy(t, leaf) - certs = append(certs, certs2...) - certIDs = append(certIDs, certIDs2...) - parentCertIDs = append(parentCertIDs, parentCertIDs2...) - certNames = append(certNames, certNames2...) - } + certs, certIDs, parentCertIDs, certNames := testCertHierarchyForLeafs(t, leafCerts) - pols := random.BuildTestRandomPolicyHierarchy(t, "domain_with_policies.com") + // Prepare a mock policy chain. + leafPols := []string{ + "domain_with_policies.com", + } + pols, polIDs := testPolicyHierarchyForLeafs(t, leafPols) // Update with certificates and policies. err = updater.UpdateWithKeepExisting(ctx, conn, certNames, certIDs, parentCertIDs, @@ -129,24 +124,116 @@ func TestCoalesceForDirtyDomains(t *testing.T) { } // Check policy coalescing. - policiesPerName := make(map[string][]common.PolicyObject, len(pols)) - for _, pol := range pols { - policiesPerName[pol.Domain()] = append(policiesPerName[pol.Domain()], pol) - } - for name, policies := range policiesPerName { - id := common.SHA256Hash32Bytes([]byte(name)) - gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesIDs(ctx, id) + for i, leaf := range leafPols { + domainID := common.SHA256Hash32Bytes([]byte(leaf)) + gotPolIDsID, gotPolIDs, err := conn.RetrieveDomainPoliciesIDs(ctx, domainID) require.NoError(t, err) - // For each sequence of policies, compute the ID of their JSON. - polIDs := computeIDsOfPolicies(policies) - expectedPolIDs, expectedPolIDsID := glueSortedIDsAndComputeItsID(polIDs) + expectedSize := common.SHA256Size * len(pols) / len(leafPols) + require.Len(t, gotPolIDs, expectedSize, "bad length, should be %d but it's %d", + expectedSize, len(gotPolIDs)) + // From the policy IDs, grab the IDs corresponding to this leaf: + N := len(polIDs) / len(leafPols) + expectedPolIDs, expectedPolIDsID := glueSortedIDsAndComputeItsID(polIDs[i*N : (i+1)*N]) t.Logf("Policy IDs for domain name \"%s\":\nexpected: %s\ngot: %s", - name, hex.EncodeToString(expectedPolIDs), hex.EncodeToString(gotPolIDs)) + leaf, hex.EncodeToString(expectedPolIDs), hex.EncodeToString(gotPolIDs)) require.Equal(t, expectedPolIDs, gotPolIDs) require.Equal(t, expectedPolIDsID, gotPolIDsID) } } +func TestRetrieveCertificatePayloads(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) + defer cancelF() + + // Configure a test DB. + config, removeF := testdb.ConfigureTestDB(t) + defer removeF() + + // Connect to the DB. + conn, err := testdb.Connect(config) + require.NoError(t, err) + defer conn.Close() + + // Ingest some data. + leafCerts := []string{ + "leaf.certs.com", + "example.certs.com", + } + certs, certIDs, parentCertIDs, certNames := testCertHierarchyForLeafs(t, leafCerts) + pols, polIDs := testPolicyHierarchyForLeafs(t, leafCerts) + err = updater.UpdateWithKeepExisting(ctx, conn, certNames, certIDs, parentCertIDs, + certs, util.ExtractExpirations(certs), pols) + require.NoError(t, err) + // Coalescing of payloads. + err = updater.CoalescePayloadsForDirtyDomains(ctx, conn) + require.NoError(t, err) + + // I can retrieve any of the certificate by their IDs. + gotCerts, err := conn.RetrieveCertificatePayloads(ctx, certIDs) + require.NoError(t, err) + expectedCerts := make([][]byte, len(certs)) + for i, cert := range certs { + expectedCerts[i] = cert.Raw + } + require.Equal(t, expectedCerts, gotCerts) + // Do the same one by one: + for i := range expectedCerts { + gotCerts, err := conn.RetrieveCertificatePayloads(ctx, certIDs[i:i+1]) + require.NoError(t, err) + require.Equal(t, expectedCerts[i:i+1], gotCerts) + } + + // Do the same for policies. + gotPols, err := conn.RetrievePolicyPayloads(ctx, polIDs) + require.NoError(t, err) + expectedPols := make([][]byte, len(pols)) + for i, pol := range pols { + expectedPols[i] = pol.Raw() + } + require.Equal(t, expectedPols, gotPols) + // Do the same one by one: + for i := range expectedPols { + gotPols, err := conn.RetrievePolicyPayloads(ctx, polIDs[i:i+1]) + require.NoError(t, err) + require.Equal(t, expectedPols[i:i+1], gotPols) + } +} + +// testCertHierarchyForLeafs returns a hierarchy per leaf certificate. Each certificate is composed +// of two mock chains, like: leaf->c1.com->c0.com, leaf->c0.com , created using the function +// BuildTestRandomCertHierarchy. +func testCertHierarchyForLeafs(t tests.T, leaves []string) (certs []*ctx509.Certificate, + certIDs, parentCertIDs []*common.SHA256Output, certNames [][]string) { + + for _, leaf := range leaves { + // Create two mock x509 chains on top of leaf: + certs2, certIDs2, parentCertIDs2, certNames2 := random.BuildTestRandomCertHierarchy(t, leaf) + certs = append(certs, certs2...) + certIDs = append(certIDs, certIDs2...) + parentCertIDs = append(parentCertIDs, parentCertIDs2...) + certNames = append(certNames, certNames2...) + } + return +} + +// testPolicyHierarchyForLeafs returns simply a policy hierarchy per leaf name, created using +// the function BuildTestRandomPolicyHierarchy. +func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyObject, + polIDs []*common.SHA256Output) { + + for _, name := range leaves { + pols = append(pols, + random.BuildTestRandomPolicyHierarchy(t, name)...) + } + + polIDs = make([]*common.SHA256Output, len(pols)) + for i, pol := range pols { + id := common.SHA256Hash32Bytes(pol.Raw()) + polIDs[i] = &id + } + return +} + func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.SHA256Output) { gluedIDs := common.SortIDsAndGlue(IDs) // Compute the hash of the glued IDs. diff --git a/pkg/db/mysql/policies.go b/pkg/db/mysql/policies.go index 7c58d423..77f26f98 100644 --- a/pkg/db/mysql/policies.go +++ b/pkg/db/mysql/policies.go @@ -120,3 +120,38 @@ func (c *mysqlDB) RetrieveDomainPoliciesIDs(ctx context.Context, domainID common } return IDptr, policyIDs, nil } + +// RetrievePolicyPayloads returns the payload for each policy identified by the IDs +// parameter, in the same order (element i corresponds to IDs[i]). +func (c *mysqlDB) RetrievePolicyPayloads(ctx context.Context, IDs []*common.SHA256Output, +) ([][]byte, error) { + + str := "SELECT policy_id,payload from policies WHERE policy_id IN " + + repeatStmt(1, len(IDs)) + params := make([]any, len(IDs)) + for i, id := range IDs { + params[i] = id[:] + } + rows, err := c.db.QueryContext(ctx, str, params...) + if err != nil { + return nil, err + } + + m := make(map[common.SHA256Output][]byte, len(IDs)) + for rows.Next() { + var id, payload []byte + if err := rows.Scan(&id, &payload); err != nil { + return nil, err + } + idArray := (*common.SHA256Output)(id) + m[*idArray] = payload + } + + // Sort them in the same order as the IDs. + payloads := make([][]byte, len(IDs)) + for i, id := range IDs { + payloads[i] = m[*id] + } + + return payloads, nil +} diff --git a/pkg/mapserver/responder/responder_test.go b/pkg/mapserver/responder/responder_test.go index e37ecabc..dc402699 100644 --- a/pkg/mapserver/responder/responder_test.go +++ b/pkg/mapserver/responder/responder_test.go @@ -125,7 +125,7 @@ func TestProof(t *testing.T) { id = common.SHA256Hash32Bytes(policiesB[0].Raw()) checkProof(t, &id, proofChain) - // Check b.com: + // Check c.com: proofChain, err = responder.GetProof(ctx, "c.com") require.NoError(t, err) id = common.SHA256Hash32Bytes(certsC[0].Raw) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 89ddf03a..81b684f1 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -326,7 +326,7 @@ func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { return err } // Do all updates at once, in one thread/connection (faster than multiple routines). - if err := conn.RecomputeDirtyDomainsCertAndPolicyIDs(ctx, 0, dirtyCount-1); err != nil { + if err := conn.RecomputeDirtyDomainsCertAndPolicyIDs(ctx, 0, dirtyCount-1); err != nil { // deleteme count not needed anymore return fmt.Errorf("coalescing payloads of dirty domains: %w", err) } return nil From 63104bb9d5b5bccbc8c15e45b6c0d2da0a399568 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 11:20:24 +0200 Subject: [PATCH 139/187] Fix test function for random policies. It now returns a non empty JSON inside each policy. --- pkg/tests/random/random.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index ac7c7bcf..25ab3b1a 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -45,6 +45,10 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic CAName: "c0.com", CASignature: RandomBytesForTest(t, 100), } + data, err := common.ToJSON(rpc) + require.NoError(t, err) + rpc.RawJSON = data + sp := &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ Subject: domainName, @@ -53,6 +57,10 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic CASignature: RandomBytesForTest(t, 100), RootCertSignature: RandomBytesForTest(t, 100), } + data, err = common.ToJSON(sp) + require.NoError(t, err) + sp.RawJSON = data + return []common.PolicyObject{rpc, sp} } From ff429610a047eea705c47e4c9a7affd61bf81a5a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 11:33:31 +0200 Subject: [PATCH 140/187] Call to coalesce function with no parameters. --- pkg/db/db.go | 5 +---- pkg/db/mysql/dirty.go | 12 +----------- pkg/mapserver/updater/updater.go | 7 +------ 3 files changed, 3 insertions(+), 21 deletions(-) diff --git a/pkg/db/db.go b/pkg/db/db.go index 1b235e0b..fc6ed9df 100644 --- a/pkg/db/db.go +++ b/pkg/db/db.go @@ -39,10 +39,7 @@ type dirty interface { // computes the aggregated payload for their certificates and policies, and stores it in the DB. // The aggregated payload takes into account all policies and certificates needed for that // domain, including e.g. the trust chain. - RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context, firstRow, lastRow int) error - - //DirtyDomainsCount returns the number of domains that are still to be updated. - DirtyDomainsCount(ctx context.Context) (int, error) + RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context) error CleanupDirty(ctx context.Context) error } diff --git a/pkg/db/mysql/dirty.go b/pkg/db/mysql/dirty.go index f6744c14..a56d2571 100644 --- a/pkg/db/mysql/dirty.go +++ b/pkg/db/mysql/dirty.go @@ -7,15 +7,6 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" ) -func (c *mysqlDB) DirtyDomainsCount(ctx context.Context) (int, error) { - str := "SELECT COUNT(*) FROM dirty" - var count int - if err := c.db.QueryRowContext(ctx, str).Scan(&count); err != nil { - return 0, fmt.Errorf("querying number of dirty domains: %w", err) - } - return count, nil -} - // RetrieveDirtyDomains returns the domain IDs that are still dirty, i.e. modified certificates for // that domain, but not yet coalesced and ingested by the SMT. func (c *mysqlDB) RetrieveDirtyDomains(ctx context.Context) ([]*common.SHA256Output, error) { @@ -47,8 +38,7 @@ func (c *mysqlDB) CleanupDirty(ctx context.Context) error { return nil } -func (c *mysqlDB) RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context, - firstRow, lastRow int) error { // deleteme no need for parameters +func (c *mysqlDB) RecomputeDirtyDomainsCertAndPolicyIDs(ctx context.Context) error { // Call the coalescing stored procedure without parameters. str := "CALL calc_dirty_domains()" diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 81b684f1..82936350 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -320,13 +320,8 @@ func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]s } func CoalescePayloadsForDirtyDomains(ctx context.Context, conn db.Conn) error { - // How many domains to update? - dirtyCount, err := conn.DirtyDomainsCount(ctx) - if err != nil { - return err - } // Do all updates at once, in one thread/connection (faster than multiple routines). - if err := conn.RecomputeDirtyDomainsCertAndPolicyIDs(ctx, 0, dirtyCount-1); err != nil { // deleteme count not needed anymore + if err := conn.RecomputeDirtyDomainsCertAndPolicyIDs(ctx); err != nil { return fmt.Errorf("coalescing payloads of dirty domains: %w", err) } return nil From 2ca394fc4800027feaa1f6be8dc8038e6a1b0590 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 11:38:59 +0200 Subject: [PATCH 141/187] Remove unused tables and procs from schema. --- tools/create_schema.sh | 261 ----------------------------------------- 1 file changed, 261 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 262e4743..4fc40734 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -149,267 +149,6 @@ EOF echo "$CMD" | $MYSQLCMD - -# CMD=$(cat < 0 DO - SET @id = LEFT(IDs, 32); - SET IDs = RIGHT(IDs, LENGTH(IDs)-32); - SET @sql_ids = CONCAT(@sql_ids, "UNHEX('", HEX(@id),"'),"); - END WHILE; - -- Remove trailing comma. - RETURN LEFT(@sql_ids, LENGTH(@sql_ids)-1); -END $$ -DELIMITER ; -EOF - ) - echo "$CMD" | $MYSQLCMD - - - CMD=$(cat < 0 DO - SET @leaves = CONCAT(@leaves, @pending); - SET @str = CONCAT( - "SELECT GROUP_CONCAT( DISTINCT parent_id SEPARATOR '' ) - INTO @pending FROM certs WHERE id IN (", IDsToSql(@pending), ");"); - PREPARE stmt FROM @str; - EXECUTE stmt; - DEALLOCATE PREPARE stmt; - END WHILE; - -- Run a last query to get only the DISTINCT IDs, from all that we have in @leaves. - SET @str = CONCAT( - "SELECT GROUP_CONCAT( DISTINCT id SEPARATOR '' ) - INTO @leaves FROM certs WHERE id IN (", IDsToSql(@leaves), ");"); - PREPARE stmt FROM @str; - EXECUTE stmt; - DEALLOCATE PREPARE stmt; - -- @leaves contains now all the certificate IDs in hexadecimal - -- that are reachable from the domain names - SET cert_ids = @leaves; -END $$ -DELIMITER ; -EOF - ) - echo "$CMD" | $MYSQLCMD - - - CMD=$(cat < 0 DO - SET @id = LEFT(domain_ids, 32); - SET domain_ids = RIGHT(domain_ids,LENGTH(domain_ids)-32); - SET @certIDs = ''; - CALL cert_IDs_for_domain(@id, @certIDs); - CALL payloads_for_certs(@certIDs, @payload); - REPLACE INTO domain_payloads(domain_id, payload_id, payload) VALUES( @id, UNHEX(SHA2(@payload, 256)), @payload ); - END WHILE; -END$$ -DELIMITER ; -EOF - ) - echo "$CMD" | $MYSQLCMD - - CMD=$(cat < Date: Fri, 2 Jun 2023 11:59:12 +0200 Subject: [PATCH 142/187] Remove unused test function. --- pkg/db/mysql/mysql_test.go | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 7d6cc09c..9c652e3e 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -240,18 +240,3 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S id := common.SHA256Hash32Bytes(gluedIDs) return gluedIDs, &id } - -func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { - set := make(map[common.SHA256Output]struct{}, len(policies)) - for _, pol := range policies { - id := common.SHA256Hash32Bytes(pol.Raw()) - set[id] = struct{}{} - } - - IDs := make([]*common.SHA256Output, 0, len(set)) - for k := range set { - k := k - IDs = append(IDs, &k) - } - return IDs -} From 6b9996c41c3f3c0f3452e6704d335663fa120323 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 12:22:28 +0200 Subject: [PATCH 143/187] Cleaned up updater type. --- pkg/mapserver/updater/updater.go | 164 +++++++++++-------------------- 1 file changed, 56 insertions(+), 108 deletions(-) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 82936350..e2b39a6a 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -3,6 +3,7 @@ package updater import ( "bytes" "context" + "encoding/hex" "fmt" "sort" "time" @@ -97,33 +98,6 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ return UpdateWithKeepExisting(ctx, mapUpdater.dbConn, names, IDs, parentIDs, certs, expirations, nil) } -// updateCerts: update the tables and SMT (in memory) using certificates -func (mapUpdater *MapUpdater) updateCerts(ctx context.Context, certs []*ctx509.Certificate, certChains [][]*ctx509.Certificate) error { - panic("deprecated: should never be called") - // keyValuePairs, numOfUpdates, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingCerts(ctx, certs, certChains) - // if err != nil { - // return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingCerts | %w", err) - // } else if numOfUpdates == 0 { - // return nil - // } - - // if len(keyValuePairs) == 0 { - // return nil - // } - - // keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) - // if err != nil { - // return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) - // } - - // _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) - // if err != nil { - // return fmt.Errorf("CollectCerts | Update | %w", err) - // } - - return nil -} - // UpdateRPCAndPC: update RPC and PC from url. Currently just mock PC and RPC func (mapUpdater *MapUpdater) UpdateRPCAndPC(ctx context.Context, ctUrl string, startIdx, endIdx int64) error { // get PC and RPC first @@ -139,91 +113,24 @@ func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList return mapUpdater.updateRPCAndPC(ctx, spList, rpcList) } -// updateRPCAndPC: update the tables and SMT (in memory) using PC and RPC -func (mapUpdater *MapUpdater) updateRPCAndPC(ctx context.Context, pcList []*common.SP, rpcList []*common.RPC) error { - panic("deprecated: should never be called") - - // // update the domain and - // keyValuePairs, _, err := mapUpdater.DeletemeUpdateDomainEntriesTableUsingRPCAndPC(ctx, rpcList, pcList, 10) - // if err != nil { - // return fmt.Errorf("CollectCerts | UpdateDomainEntriesUsingRPCAndPC | %w", err) - // } - - // if len(keyValuePairs) == 0 { - // return nil - // } - - // keyInput, valueInput, err := keyValuePairToSMTInput(keyValuePairs) - // if err != nil { - // return fmt.Errorf("CollectCerts | keyValuePairToSMTInput | %w", err) - // } - - // // update Sparse Merkle Tree - // _, err = mapUpdater.smt.Update(ctx, keyInput, valueInput) - // if err != nil { - // return fmt.Errorf("CollectCerts | Update | %w", err) - // } - return nil -} +func (mapUpdater *MapUpdater) updateCerts( + ctx context.Context, + certs []*ctx509.Certificate, + chains [][]*ctx509.Certificate, +) error { -// CommitSMTChanges: commit SMT changes to DB -func (mapUpdater *MapUpdater) CommitSMTChanges(ctx context.Context) error { - err := mapUpdater.smt.Commit(ctx) - if err != nil { - return fmt.Errorf("CommitChanges | Commit | %w", err) - } + // TODO(juagargi) return nil } -// keyValuePairToSMTInput: key value pair -> SMT update input -// deleteme: this function takes the payload and computes the hash of it. The hash is already -// stored in the DB with the new design: change both the function RetrieveDomainEntries and -// remove the hashing from this keyValuePairToSMTInput function. -func keyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ([][]byte, [][]byte, error) { - type inputPair struct { - Key [32]byte - Value []byte - } - updateInput := make([]inputPair, 0, len(keyValuePair)) - for _, pair := range keyValuePair { - updateInput = append(updateInput, inputPair{ - Key: pair.Key, - Value: common.SHA256Hash(pair.Value), // Compute SHA256 of the payload. - }) - } - - // Sorting is important, as the Trie.Update function expects the keys in sorted order. - sort.Slice(updateInput, func(i, j int) bool { - return bytes.Compare(updateInput[i].Key[:], updateInput[j].Key[:]) == -1 - }) - - keyResult := make([][]byte, 0, len(updateInput)) - valueResult := make([][]byte, 0, len(updateInput)) - - for _, pair := range updateInput { - // TODO(yongzhe): strange error - // if I do : append(keyResult, pair.Key[:]), the other elements in the slice will be affected - // Looks like the slice is storing the pointer of the value. - // However, append(valueResult, pair.Value) also works. I will have a look later - var newKey [32]byte - copy(newKey[:], pair.Key[:]) - keyResult = append(keyResult, newKey[:]) - - valueResult = append(valueResult, pair.Value) - - } - - return keyResult, valueResult, nil -} - -// GetRoot: get current root -func (mapUpdater *MapUpdater) GetRoot() []byte { - return mapUpdater.smt.Root -} +func (mapUpdater *MapUpdater) updateRPCAndPC( + ctx context.Context, + sps []*common.SP, + rpcs []*common.RPC, +) error { -// Close: close connection -func (mapUpdater *MapUpdater) Close() error { - return mapUpdater.smt.Close() + // TODO(juagargi) + return nil } func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]string, @@ -372,9 +279,9 @@ func UpdateSMT(ctx context.Context, conn db.Conn, cacheHeight int) error { // Load SMT. smtTrie, err := trie.NewTrie(root, common.SHA256Hash, conn) if err != nil { + err = fmt.Errorf("with root \"%s\", creating NewTrie: %w", hex.EncodeToString(root), err) panic(err) } - // smtTrie.CacheHeightLimit = cacheHeight // Get the dirty domains. domains, err := conn.RetrieveDirtyDomains(ctx) @@ -477,3 +384,44 @@ func runWhenFalse(mask []bool, fcn func(to, from int)) int { } return to } + +// keyValuePairToSMTInput: key value pair -> SMT update input +// deleteme: this function takes the payload and computes the hash of it. The hash is already +// stored in the DB with the new design: change both the function RetrieveDomainEntries and +// remove the hashing from this keyValuePairToSMTInput function. +func keyValuePairToSMTInput(keyValuePair []*db.KeyValuePair) ([][]byte, [][]byte, error) { + type inputPair struct { + Key [32]byte + Value []byte + } + updateInput := make([]inputPair, 0, len(keyValuePair)) + for _, pair := range keyValuePair { + updateInput = append(updateInput, inputPair{ + Key: pair.Key, + Value: common.SHA256Hash(pair.Value), // Compute SHA256 of the payload. + }) + } + + // Sorting is important, as the Trie.Update function expects the keys in sorted order. + sort.Slice(updateInput, func(i, j int) bool { + return bytes.Compare(updateInput[i].Key[:], updateInput[j].Key[:]) == -1 + }) + + keyResult := make([][]byte, 0, len(updateInput)) + valueResult := make([][]byte, 0, len(updateInput)) + + for _, pair := range updateInput { + // TODO(yongzhe): strange error + // if I do : append(keyResult, pair.Key[:]), the other elements in the slice will be affected + // Looks like the slice is storing the pointer of the value. + // However, append(valueResult, pair.Value) also works. I will have a look later + var newKey [32]byte + copy(newKey[:], pair.Key[:]) + keyResult = append(keyResult, newKey[:]) + + valueResult = append(valueResult, pair.Value) + + } + + return keyResult, valueResult, nil +} From ef9177d087a6a7d0e625d972ee8a3a335c0fa6d9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 2 Jun 2023 12:27:25 +0200 Subject: [PATCH 144/187] Renamed logpicker to logfetcher. --- .../{logpicker/logpicker.go => logfetcher/logfetcher.go} | 2 +- .../logpicker_test.go => logfetcher/logfetcher_test.go} | 2 +- pkg/mapserver/updater/updater.go | 8 ++++---- tests/benchmark/mapserver_benchmark/download_test.go | 6 +++--- .../mapserver_benchmark/tools/certs_analyse/main.go | 4 ++-- 5 files changed, 11 insertions(+), 11 deletions(-) rename pkg/mapserver/{logpicker/logpicker.go => logfetcher/logfetcher.go} (99%) rename pkg/mapserver/{logpicker/logpicker_test.go => logfetcher/logfetcher_test.go} (99%) diff --git a/pkg/mapserver/logpicker/logpicker.go b/pkg/mapserver/logfetcher/logfetcher.go similarity index 99% rename from pkg/mapserver/logpicker/logpicker.go rename to pkg/mapserver/logfetcher/logfetcher.go index d4f7ffeb..650a7288 100644 --- a/pkg/mapserver/logpicker/logpicker.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -1,4 +1,4 @@ -package logpicker +package logfetcher import ( "bufio" diff --git a/pkg/mapserver/logpicker/logpicker_test.go b/pkg/mapserver/logfetcher/logfetcher_test.go similarity index 99% rename from pkg/mapserver/logpicker/logpicker_test.go rename to pkg/mapserver/logfetcher/logfetcher_test.go index 4b14b609..a03a05c4 100644 --- a/pkg/mapserver/logpicker/logpicker_test.go +++ b/pkg/mapserver/logfetcher/logfetcher_test.go @@ -1,4 +1,4 @@ -package logpicker +package logfetcher import ( "context" diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index e2b39a6a..41c925c8 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -13,7 +13,7 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/db" "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" + "github.com/netsec-ethz/fpki/pkg/mapserver/logfetcher" "github.com/netsec-ethz/fpki/pkg/mapserver/trie" "github.com/netsec-ethz/fpki/pkg/util" ) @@ -22,7 +22,7 @@ const readBatchSize = 100000 // MapUpdater: map updater. It is responsible for updating the tree, and writing to db type MapUpdater struct { - Fetcher logpicker.LogFetcher + Fetcher logfetcher.LogFetcher smt *trie.Trie dbConn db.Conn } @@ -43,7 +43,7 @@ func NewMapUpdater(config *db.Configuration, root []byte, cacheHeight int) (*Map smt.CacheHeightLimit = cacheHeight return &MapUpdater{ - Fetcher: logpicker.LogFetcher{ + Fetcher: logfetcher.LogFetcher{ WorkerCount: 16, }, smt: smt, @@ -101,7 +101,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // UpdateRPCAndPC: update RPC and PC from url. Currently just mock PC and RPC func (mapUpdater *MapUpdater) UpdateRPCAndPC(ctx context.Context, ctUrl string, startIdx, endIdx int64) error { // get PC and RPC first - pcList, rpcList, err := logpicker.GetPCAndRPC(ctUrl, startIdx, endIdx, 20) + pcList, rpcList, err := logfetcher.GetPCAndRPC(ctUrl, startIdx, endIdx, 20) if err != nil { return fmt.Errorf("CollectCerts | GetPCAndRPC | %w", err) } diff --git a/tests/benchmark/mapserver_benchmark/download_test.go b/tests/benchmark/mapserver_benchmark/download_test.go index 7d0da7a0..a6f70e03 100644 --- a/tests/benchmark/mapserver_benchmark/download_test.go +++ b/tests/benchmark/mapserver_benchmark/download_test.go @@ -10,7 +10,7 @@ import ( "time" "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" + "github.com/netsec-ethz/fpki/pkg/mapserver/logfetcher" "github.com/stretchr/testify/require" ) @@ -31,7 +31,7 @@ func benchmarkDownload(b *testing.B, count int) { baseSize := 2 * 1000 * 1000 // exec only once, assume perfect measuring. Because b.N is the number of iterations, // just mimic b.N executions. - fetcher := logpicker.LogFetcher{ + fetcher := logfetcher.LogFetcher{ URL: ctURL, Start: baseSize, End: baseSize + count, @@ -54,7 +54,7 @@ func TestCreateCerts(t *testing.T) { defer cancelF() baseSize := 2 * 1000 count := 100 * 1000 - fetcher := logpicker.LogFetcher{ + fetcher := logfetcher.LogFetcher{ URL: ctURL, Start: baseSize, End: baseSize + count - 1, diff --git a/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go b/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go index b8335a4b..ee176158 100644 --- a/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go +++ b/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go @@ -12,7 +12,7 @@ import ( "github.com/google/certificate-transparency-go/x509" "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/logpicker" + "github.com/netsec-ethz/fpki/pkg/mapserver/logfetcher" ) type uniqueStringSet map[string]struct{} @@ -35,7 +35,7 @@ func main() { const baseCTSize = 2*1000 + 1600000 const count = 500 * 1000 - fetcher := logpicker.LogFetcher{ + fetcher := logfetcher.LogFetcher{ URL: "https://ct.googleapis.com/logs/argon2021", Start: baseCTSize, End: baseCTSize + count, From dd8e7eb94c941f89225d0b5e93ca296cc8ad1562 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 5 Jun 2023 10:58:58 +0200 Subject: [PATCH 145/187] Remove standalone mapserver benchmarks. --- .../mapserver_benchmark/download_test.go | 103 ----- .../responder_benchmark/main.go | 117 ----- .../mapserver_benchmark/responder_test.go | 181 -------- .../tools/certs_analyse/main.go | 283 ------------ .../tools/domain_analyser/main.go | 156 ------- .../updater_benchmark/main.go | 142 ------ .../mapserver_benchmark/updater_test.go | 416 ------------------ 7 files changed, 1398 deletions(-) delete mode 100644 tests/benchmark/mapserver_benchmark/download_test.go delete mode 100644 tests/benchmark/mapserver_benchmark/responder_benchmark/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/responder_test.go delete mode 100644 tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/tools/domain_analyser/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/updater_benchmark/main.go delete mode 100644 tests/benchmark/mapserver_benchmark/updater_test.go diff --git a/tests/benchmark/mapserver_benchmark/download_test.go b/tests/benchmark/mapserver_benchmark/download_test.go deleted file mode 100644 index a6f70e03..00000000 --- a/tests/benchmark/mapserver_benchmark/download_test.go +++ /dev/null @@ -1,103 +0,0 @@ -package benchmark - -import ( - "compress/gzip" - "context" - "encoding/pem" - "os" - "sort" - "testing" - "time" - - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/logfetcher" - "github.com/stretchr/testify/require" -) - -const ctURL = "https://ct.googleapis.com/logs/argon2021" - -func BenchmarkDownload1K(b *testing.B) { - benchmarkDownload(b, 1000) -} - -func BenchmarkDownload10K(b *testing.B) { - b.Skip("download skipped") - benchmarkDownload(b, 10*1000) -} - -func benchmarkDownload(b *testing.B, count int) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Duration(count)*time.Millisecond) - defer cancelF() - baseSize := 2 * 1000 * 1000 - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - fetcher := logfetcher.LogFetcher{ - URL: ctURL, - Start: baseSize, - End: baseSize + count, - WorkerCount: 20, - } - t0 := time.Now() - _, err := fetcher.FetchAllCertificates(ctx) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -func TestCreateCerts(t *testing.T) { - if os.Getenv("FPKI_TESTS_GENCERTS") == "" { - t.Skip("not generating new certificates") - } - ctx, cancelF := context.WithTimeout(context.Background(), 3*time.Minute) - defer cancelF() - baseSize := 2 * 1000 - count := 100 * 1000 - fetcher := logfetcher.LogFetcher{ - URL: ctURL, - Start: baseSize, - End: baseSize + count - 1, - WorkerCount: 32, - } - certs, err := fetcher.FetchAllCertificates(ctx) - require.NoError(t, err) - require.Len(t, certs, count, "we have %d certificates", len(certs)) - - f, err := os.Create("../../testdata/certs.pem.gz") - require.NoError(t, err) - z, err := gzip.NewWriterLevel(f, gzip.BestCompression) - require.NoError(t, err) - uniqueNames := make(map[string]struct{}) - for _, c := range certs { - require.NotNil(t, c.RawTBSCertificate) - err = pem.Encode(z, &pem.Block{ - Type: "CERTIFICATE", - Bytes: c.RawTBSCertificate, - }) - require.NoError(t, err) - - uniqueNames[c.Subject.CommonName] = struct{}{} - } - err = z.Close() - require.NoError(t, err) - err = f.Close() - require.NoError(t, err) - - // write the list of unique names - names := make([]string, 0, len(uniqueNames)) - for n := range uniqueNames { - if domain.IsValidDomain(n) { - names = append(names, n) - } - } - sort.Strings(names) - f, err = os.Create("../../testdata/uniqueNames.txt") - require.NoError(t, err) - for _, n := range names { - _, err = f.WriteString(n + "\n") - require.NoError(t, err) - } - err = f.Close() - require.NoError(t, err) -} diff --git a/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go b/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go deleted file mode 100644 index 5dd0f3dd..00000000 --- a/tests/benchmark/mapserver_benchmark/responder_benchmark/main.go +++ /dev/null @@ -1,117 +0,0 @@ -package main - -import ( - "compress/gzip" - "context" - "encoding/pem" - "fmt" - "io" - "math/rand" - "os" - "sync" - "time" - - _ "github.com/go-sql-driver/mysql" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" -) - -func main() { - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - - // 10M queries with 1K workers use ~ 20.3 seconds - const totalQueries = 10 * 1000 * 1000 - const numOfWorkers = 1000 - names := getNames() // only use the first 100K names, as the updater benchmark - _ = 0 // is limited to 100K certificates - fmt.Printf("%d names available\n", len(names)) - - fmt.Println("Loading responder ...") - // only use one responder - root, err := os.ReadFile("root") - if err != nil { - panic(err) - } - responder, err := responder.NewOldMapResponder(ctx, root, 233, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - fmt.Printf("requesting now (%d each worker, %d workers) ...\n", - totalQueries/numOfWorkers, numOfWorkers) - responderStartTime := time.Now() - - proofTypes := make([]map[common.ProofType]int, numOfWorkers) - var wg sync.WaitGroup - for w := 0; w < numOfWorkers; w++ { - w := w - wg.Add(1) - go func(queryCount int) { - defer wg.Done() - proofTypes[w] = make(map[common.ProofType]int) - for i := 0; i < queryCount; i++ { - name := names[rand.Intn(len(names))] - proofs, err := responder.GetProof(ctx, name) - if err != nil && err != domain.ErrInvalidDomainName { - panic(err) - } - for _, p := range proofs { - proofTypes[w][p.PoI.ProofType]++ - } - } - }(totalQueries / numOfWorkers) - } - wg.Wait() - - var presences, absences int - for _, types := range proofTypes { - presences += types[common.PoP] - absences += types[common.PoA] - } - fmt.Printf("Presences: %d Absences: %d\n", presences, absences) - responderDuration := time.Since(responderStartTime) - - fmt.Printf("time to fetch %d proofs: %s. 100K ~= %s\n", totalQueries, - responderDuration, responderDuration*time.Duration(100000)/time.Duration(totalQueries)) -} - -func getNames() []string { - f, err := os.Open("tests/benchmark/mapserver_benchmark/testdata/certs.pem.gz") - if err != nil { - panic(err) - } - z, err := gzip.NewReader(f) - if err != nil { - panic(err) - } - raw, err := io.ReadAll(z) - if err != nil { - panic(err) - } - - certs := make([]*ctx509.Certificate, 0) - for len(raw) > 0 { - var block *pem.Block - block, raw = pem.Decode(raw) - if block.Type != "CERTIFICATE" { - continue - } - c, err := ctx509.ParseTBSCertificate(block.Bytes) - if err != nil { - panic(err) - } - certs = append(certs, c) - } - err = f.Close() - if err != nil { - panic(err) - } - - names := make([]string, len(certs)) - for i, cert := range certs { - names[i] = cert.Subject.CommonName - } - return names -} diff --git a/tests/benchmark/mapserver_benchmark/responder_test.go b/tests/benchmark/mapserver_benchmark/responder_test.go deleted file mode 100644 index 7bf26272..00000000 --- a/tests/benchmark/mapserver_benchmark/responder_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package benchmark - -import ( - "bufio" - "context" - "fmt" - "io/ioutil" - "math/rand" - "os" - "os/exec" - "runtime/pprof" - "sync" - "sync/atomic" - "testing" - "time" - - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/stretchr/testify/require" -) - -// BenchmarkResponderGetProof1M uses 5.6s -func BenchmarkResponderGetProof1M(b *testing.B) { - benchmarkResponderGetProof(b, 1000*1000) -} - -func BenchmarkResponderGetProof2M(b *testing.B) { - benchmarkResponderGetProof(b, 1000*1000) -} - -// BenchmarkResponderGetProof10M uses: -// Parallel req. Time -// 64 53.75s -// 2000 55.17s -// 20000 63.90s -func BenchmarkResponderGetProof10M(b *testing.B) { - benchmarkResponderGetProof(b, 10*1000*1000) -} - -func benchmarkResponderGetProof(b *testing.B, count int) { - fmt.Println("Recreating updated DB ...") - t0 := time.Now() - swapBack := swapDBs(b) - defer swapBack() - resetDB(b) - fmt.Printf("used %s\n", time.Since(t0)) - - fmt.Println("Loading names ...") - t0 = time.Now() - names := make([]string, 0) - f, err := os.Open("testdata/uniqueNames.txt") - require.NoError(b, err) - s := bufio.NewScanner(f) - for s.Scan() { - names = append(names, s.Text()) - } - err = f.Close() - require.NoError(b, err) - fmt.Printf("used %s\n", time.Since(t0)) - - // create responder and request proof for those names - ctx, cancelF := context.WithTimeout(context.Background(), 15*time.Minute) - defer cancelF() - root, err := ioutil.ReadFile("testdata/root100K.bin") - require.NoError(b, err) - require.NotEmpty(b, root) - responder, err := responder.NewMapResponder(ctx, root, 233, "./config/mapserver_config.json") - require.NoError(b, err) - - fmt.Println("Requesting ...") - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 = time.Now() - parallelRequestLimit := 2000 // 2K requests simultaneously - wg := &sync.WaitGroup{} - var numRequests int64 = 0 - var domainData int64 = 0 - work := func(count int, names []string) { - defer wg.Done() - for i := 0; i < count; i++ { - name := names[rand.Intn(len(names))] - responses, err := responder.GetProof(ctx, name) - require.NoError(b, err) - atomic.AddInt64(&numRequests, 1) - for _, p := range responses { - atomic.AddInt64(&domainData, int64(len(p.DomainEntryBytes))) - } - } - } - wg.Add(parallelRequestLimit) - i := 0 - for ; i < count%parallelRequestLimit; i++ { - go work(count/parallelRequestLimit+1, names) - } - for ; i < parallelRequestLimit; i++ { - go work(count/parallelRequestLimit, names) - } - wg.Wait() - fmt.Printf("done %d requests, transferred %dMb, used %s\n", - numRequests, domainData/1024/1024, time.Since(t0)) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkResponderGetProofNoPrepareDB10K uses 541ms -func BenchmarkResponderGetProofNoPrepareDB10K(b *testing.B) { - benchmarkResponderGetProofNoPrepareDB(b, 10*1000) -} - -// BenchmarkResponderGetProofNoPrepareDB100K uses 6286ms -func BenchmarkResponderGetProofNoPrepareDB100K(b *testing.B) { - benchmarkResponderGetProofNoPrepareDB(b, 100*1000) -} - -// benchmarkResponderGetProofNoPrepareDB runs a micro benchmark, making count requests -// for random domains in uniqueNames.txt, and writing a cpu profile for the -// request part. It assumes the DB contains the data for the first 100K entries -// from testdata/dump100K.sql. -// The created cpu profile can be visualized with: -// go tool pprof -http=localhost:8000 tests/benchmark/mapserver_benchmark/cpuprofile.pprof -func benchmarkResponderGetProofNoPrepareDB(b *testing.B, count int) { - pprof.StopCPUProfile() - names := make([]string, 0) - f, err := os.Open("testdata/uniqueNames.txt") - require.NoError(b, err) - s := bufio.NewScanner(f) - for s.Scan() { - names = append(names, s.Text()) - } - err = f.Close() - require.NoError(b, err) - - // create responder and request proof for those names - ctx, cancelF := context.WithTimeout(context.Background(), 15*time.Minute) - defer cancelF() - root, err := ioutil.ReadFile("testdata/root100K.bin") - require.NoError(b, err) - require.NotEmpty(b, root) - responder, err := responder.NewMapResponder(ctx, root, 233, "./config/mapserver_config.json") - require.NoError(b, err) - - profileF, err := os.Create("cpuprofile.pprof") - require.NoError(b, err) - defer profileF.Close() - - b.ResetTimer() - // runtime.SetCPUProfileRate(1000000) - err = pprof.StartCPUProfile(profileF) - require.NoError(b, err) - defer pprof.StopCPUProfile() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - work := func(count int, names []string) { - for i := 0; i < count; i++ { - name := names[rand.Intn(len(names))] - responses, err := responder.GetProof(ctx, name) - _ = responses - _ = err - } - } - work(count, names) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -func resetDB(t require.TestingT) { - db.TruncateAllTablesForTest(t) - - err := exec.Command("bash", "-c", "zcat < testdata/dump100K.sql.gz | mysql -u root fpki").Run() - require.NoError(t, err) -} diff --git a/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go b/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go deleted file mode 100644 index ee176158..00000000 --- a/tests/benchmark/mapserver_benchmark/tools/certs_analyse/main.go +++ /dev/null @@ -1,283 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "context" - "encoding/gob" - "fmt" - "io/ioutil" - "os" - "time" - - "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/mapserver/logfetcher" -) - -type uniqueStringSet map[string]struct{} - -func main() { - content, err := ioutil.ReadFile("domainCount") - - var readMap map[string]int - bytes := bytes.NewBuffer(content) - - d := gob.NewDecoder(bytes) - - // Decoding the serialized data - err = d.Decode(&readMap) - if err != nil { - panic(err) - } - - fmt.Println(readMap["dotcms-tridv.hcahealthcare.com"]) - const baseCTSize = 2*1000 + 1600000 - const count = 500 * 1000 - - fetcher := logfetcher.LogFetcher{ - URL: "https://ct.googleapis.com/logs/argon2021", - Start: baseCTSize, - End: baseCTSize + count, - WorkerCount: 16, - } - - fetcher.StartFetching() - - testSet6 := make(map[string]byte) - testSet10 := make(map[string]byte) - testSet20 := make(map[string]byte) - testSet50 := make(map[string]byte) - testSet100 := make(map[string]byte) - testSet200 := make(map[string]byte) - testSet500 := make(map[string]byte) - testSet1000 := make(map[string]byte) - - getCerts := 0 - - ctx, cancelF := context.WithTimeout(context.Background(), 200*time.Minute) - defer cancelF() - - certNum := 0 - - for certNum < 500*1000 { - certs, err := fetcher.NextBatch(ctx) - certNum = certNum + len(certs) - if err != nil { - panic(err) - } - getCerts = getCerts + len(certs) - - for _, cert := range certs { - - if len(cert.Subject.CommonName) == 0 { - continue - } - - domainNames, err := domain.ParseDomainName(cert.Subject.CommonName) - if err != nil { - continue - } - - totalNum := 0 - for _, name := range domainNames { - totalNum = totalNum + readMap[name] - } - - if totalNum < 5 { - testSet6[cert.Subject.CommonName] = 1 - } else if totalNum < 10 { - testSet10[cert.Subject.CommonName] = 1 - } else if totalNum < 20 { - testSet20[cert.Subject.CommonName] = 1 - } else if totalNum < 50 { - testSet50[cert.Subject.CommonName] = 1 - } else if totalNum < 100 { - testSet100[cert.Subject.CommonName] = 1 - } else if totalNum < 200 { - testSet200[cert.Subject.CommonName] = 1 - } else if totalNum < 500 { - testSet500[cert.Subject.CommonName] = 1 - } else if totalNum < 1000 { - testSet1000[cert.Subject.CommonName] = 1 - } - } - - fmt.Println("--------------------------------------") - fmt.Println("*", len(testSet6)) - fmt.Println("*", len(testSet10)) - fmt.Println("*", len(testSet20)) - fmt.Println("*", len(testSet50)) - fmt.Println("*", len(testSet100)) - fmt.Println("*", len(testSet200)) - fmt.Println("*", len(testSet500)) - fmt.Println("*", len(testSet1000)) - } - - fmt.Println(getCerts) - fmt.Println("done") - - for name := range testSet6 { - domainNames, err := domain.ParseDomainName(name) - if err != nil { - continue - } - - for _, n := range domainNames { - if readMap[n] > 4 { - panic("length error") - } - } - } - - outputFile, err := os.OpenFile("testData6.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter := bufio.NewWriter(outputFile) - - for domainName := range testSet6 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData10.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet10 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData20.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet20 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData50.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet50 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData100.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet100 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData200.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet200 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData500.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet500 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - outputFile, err = os.OpenFile("testData1000.txt", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - panic(err) - } - - datawriter = bufio.NewWriter(outputFile) - - for domainName := range testSet1000 { - _, _ = datawriter.WriteString(domainName + "\n") - } - - datawriter.Flush() - outputFile.Close() - - /* - fmt.Println() - fmt.Println("***********************************") - for k, v := range readMap { - if v > 20 { - fmt.Println(k, " ", v) - } - } - fmt.Println("***********************************") - */ - -} - -// extractCertDomains: get domain from cert: {Common Name, SANs} -func extractCertDomains(cert *x509.Certificate) []string { - domains := make(uniqueStringSet) - if len(cert.Subject.CommonName) != 0 { - domains[cert.Subject.CommonName] = struct{}{} - } - - for _, dnsName := range cert.DNSNames { - domains[dnsName] = struct{}{} - } - - result := []string{} - for k := range domains { - result = append(result, k) - } - return result -} - -func contains(s []string, e string) bool { - for _, a := range s { - if a == e { - return true - } - } - return false -} diff --git a/tests/benchmark/mapserver_benchmark/tools/domain_analyser/main.go b/tests/benchmark/mapserver_benchmark/tools/domain_analyser/main.go deleted file mode 100644 index 1dcf010c..00000000 --- a/tests/benchmark/mapserver_benchmark/tools/domain_analyser/main.go +++ /dev/null @@ -1,156 +0,0 @@ -package main - -import ( - "bytes" - "encoding/csv" - "encoding/gob" - "fmt" - "io/ioutil" - "os" - "strconv" - - "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/domain" -) - -type uniqueStringSet map[string]struct{} - -func main() { - //countDomainDepth() - countDomainCertsNum() -} - -func countDomainCertsNum() { - content, err := ioutil.ReadFile("domainCount") - - var readMap map[string]int - bytes := bytes.NewBuffer(content) - - d := gob.NewDecoder(bytes) - - // Decoding the serialized data - err = d.Decode(&readMap) - if err != nil { - panic(err) - } - - fmt.Println(len(readMap)) - - domainCertNum := make(map[int]int) - - for _, v := range readMap { - if v == 1 { - domainCertNum[1]++ - } else if v == 2 { - domainCertNum[2]++ - } else if v == 3 { - domainCertNum[3]++ - } else if v == 4 { - domainCertNum[4]++ - } else if v == 5 { - domainCertNum[5]++ - } else if v == 6 { - domainCertNum[6]++ - } else if v == 7 { - domainCertNum[7]++ - } else if v == 8 { - domainCertNum[8]++ - } else if v == 9 { - domainCertNum[9]++ - } else if v == 10 { - domainCertNum[10]++ - } else if v <= 15 { - domainCertNum[15]++ - } else if v <= 20 { - domainCertNum[20]++ - } else if v <= 50 { - domainCertNum[50]++ - } else if v <= 100 { - domainCertNum[100]++ - } else if v <= 200 { - domainCertNum[200]++ - } else if v <= 500 { - domainCertNum[500]++ - } else if v <= 1000 { - domainCertNum[1000]++ - } else if v <= 2000 { - domainCertNum[2000]++ - } else if v <= 5000 { - domainCertNum[5000]++ - } else if v <= 10000 { - domainCertNum[10000]++ - } - - } - - csvFile, err := os.Create("domain_certs_num.csv") - csvWriter := csv.NewWriter(csvFile) - - for k, v := range domainCertNum { - csvWriter.Write([]string{strconv.Itoa(k), strconv.Itoa(v)}) - csvWriter.Flush() - } -} - -func countDomainDepth() { - content, err := ioutil.ReadFile("domainCount") - - var readMap map[string]int - bytes := bytes.NewBuffer(content) - - d := gob.NewDecoder(bytes) - - // Decoding the serialized data - err = d.Decode(&readMap) - if err != nil { - panic(err) - } - - levelDepthMap := make(map[int]int) - levelDepthMap[1] = 0 - levelDepthMap[2] = 0 - levelDepthMap[3] = 0 - levelDepthMap[4] = 0 - levelDepthMap[5] = 0 - levelDepthMap[6] = 0 - levelDepthMap[7] = 0 - levelDepthMap[8] = 0 - levelDepthMap[9] = 0 - - for domainName := range readMap { - domainNames, err := domain.ParseDomainName(domainName) - if err != nil { - continue - } - levelDepthMap[len(domainNames)]++ - - } - - fmt.Println(levelDepthMap) - - csvFile, err := os.Create("domain_depth.csv") - csvWriter := csv.NewWriter(csvFile) - - for k, v := range levelDepthMap { - csvWriter.Write([]string{strconv.Itoa(k), strconv.Itoa(v)}) - csvWriter.Flush() - } -} - -// extractCertDomains: get domain from cert: {Common Name, SANs} -func extractCertDomains(cert *x509.Certificate) []string { - domains := make(uniqueStringSet) - if len(cert.Subject.CommonName) != 0 { - domains[cert.Subject.CommonName] = struct{}{} - } - - for _, dnsName := range cert.DNSNames { - domains[dnsName] = struct{}{} - } - - result := []string{} - for k := range domains { - result = append(result, k) - } - return result -} diff --git a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go b/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go deleted file mode 100644 index af4d2011..00000000 --- a/tests/benchmark/mapserver_benchmark/updater_benchmark/main.go +++ /dev/null @@ -1,142 +0,0 @@ -package main - -import ( - "context" - "encoding/csv" - "fmt" - "math" - "os" - "strconv" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/tests/testdb" -) - -var domainCount int - -// collect 1M certs, and update them -func main() { - - domainCount = 0 - testdb.TruncateAllTablesWithoutTestObject() - - csvFile, err := os.Create("result.csv") - - if err != nil { - panic(err) - } - - csvwriter := csv.NewWriter(csvFile) - - // new updater - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - ctx, cancelF := context.WithTimeout(context.Background(), 200*time.Minute) - defer cancelF() - - // collect 100K certs - mapUpdater.Fetcher.BatchSize = 40000 - const baseCTSize = 2*1000 + 1600000 - const count = 2000 * 1000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", - baseCTSize, baseCTSize+count-1) - - updateStart := time.Now() - names := []string{} - for i := 0; ; i++ { - fmt.Println() - fmt.Println() - fmt.Println(" ---------------------- batch ", i, " ---------------------------") - - n, timeList, newNames, err, writePair, readPair, smtSize := mapUpdater.UpdateNextBatchReturnTimeList(ctx) - if err != nil { - panic(err) - } - fmt.Println("number of certs: ", n) - if n == 0 { - break - } - - names = append(names, newNames...) - - start := time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - fmt.Println("time to commit the changes: ", time.Since(start)) - timeToUpdateSMT := time.Since(start) - - domainCount = dbtest.GetDomainCountWithoutTestObject() - fmt.Println("total domains: ", domainCount) - - err = csvwriter.Write(append(append([]string{strconv.Itoa(i), timeToUpdateSMT.String()}, timeList...), - strconv.Itoa(domainCount), strconv.Itoa(countDBWriteSize(writePair)), strconv.Itoa(countDBWriteSize(readPair)), strconv.Itoa(smtSize), strconv.Itoa(len(readPair)))) - if err != nil { - panic(err) - } - - csvwriter.Flush() - } - fmt.Println("************************ Update finished ******************************") - fmt.Printf("time to get and update %d certs: %s\n", count, time.Since(updateStart)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - err = os.WriteFile("root", root, 0644) - if err != nil { - panic(err) - } -} - -func getTreeDepth() int { - treeDepth := int(math.Log2(float64(domainCount))) - fmt.Println("tree depth before: ", treeDepth) - - return 255 - treeDepth -} - -func getUniqueName(names []string) []string { - uniqueSet := make(map[string]struct{}) - for _, name := range names { - uniqueSet[name] = struct{}{} - } - - result := []string{} - - for k := range uniqueSet { - result = append(result, k) - } - return result -} - -func checkPoP(input []*common.MapServerResponse) bool { - for _, pair := range input { - if pair.PoI.ProofType == common.PoP { - if len(pair.DomainEntryBytes) == 0 { - panic("result error") - } - return true - } - } - return false -} - -func countDBWriteSize(keyValuePairs []*db.KeyValuePair) int { - totalSize := 0 - for _, pair := range keyValuePairs { - totalSize = totalSize + len(pair.Value) - totalSize = totalSize + len(pair.Key) - } - return totalSize -} diff --git a/tests/benchmark/mapserver_benchmark/updater_test.go b/tests/benchmark/mapserver_benchmark/updater_test.go deleted file mode 100644 index e3359052..00000000 --- a/tests/benchmark/mapserver_benchmark/updater_test.go +++ /dev/null @@ -1,416 +0,0 @@ -package benchmark - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "os/exec" - "testing" - "time" - - _ "github.com/go-sql-driver/mysql" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/util" - "github.com/stretchr/testify/require" -) - -func BenchmarkFullUpdate1K(b *testing.B) { - benchmarkFullUpdate(b, 1000) -} - -// BenchmarkFullUpdate10K uses ~ 1438 ms -// Target is updating 17M certs in 2 hours = (linear) = 7200s/17M = 0.424ms per certificate => -// Target for this test is 0.42 ms * 10K = 4200 ms. -// Linear regression (with 6 points) -// y= 0.1161x + 542.6176 milliseconds -// Linear correlation coefficient is 0.9966 -// prediction is y(17M) = 1974243 ms = 1974.243 s = 33 minutes -// -// n log n fitting ( m*x*log(m*x) + c ) (use MSE e.g. mycurvefit.com) -// y= 0.02722022x log(0.02722022x) + 1348.432 -// prediction is y(17M) = 2722347 ms = 2722.348 s = 45 minutes -// -// Reproduce and print milliseconds: -// run: go test -run=XXX -bench=FullUpdate ./tests/benchmark/mapserver_benchmark/ -// and pipe it to: grep ^BenchmarkFullUpdate | awk '{printf("%30s, %013.6f\n",$1,$3/1000000) }' | \ -// sed 's/BenchmarkFullUpdate//'|sed 's/-24//'|sed 's/K/000/' -func BenchmarkFullUpdate10K(b *testing.B) { - benchmarkFullUpdate(b, 10*1000) -} - -func BenchmarkFullUpdate20K(b *testing.B) { - benchmarkFullUpdate(b, 20*1000) -} - -func BenchmarkFullUpdate30K(b *testing.B) { - benchmarkFullUpdate(b, 30*1000) -} - -func BenchmarkFullUpdate40K(b *testing.B) { - benchmarkFullUpdate(b, 40*1000) -} - -func BenchmarkFullUpdate50K(b *testing.B) { - benchmarkFullUpdate(b, 50*1000) -} - -func BenchmarkFullUpdate60K(b *testing.B) { - benchmarkFullUpdate(b, 60*1000) -} - -func BenchmarkFullUpdate70K(b *testing.B) { - benchmarkFullUpdate(b, 70*1000) -} - -func BenchmarkFullUpdate80K(b *testing.B) { - benchmarkFullUpdate(b, 80*1000) -} - -func BenchmarkFullUpdate90K(b *testing.B) { - benchmarkFullUpdate(b, 90*1000) -} - -func BenchmarkFullUpdate100K(b *testing.B) { - benchmarkFullUpdate(b, 100*1000) -} - -func BenchmarkFullUpdate200K(b *testing.B) { - benchmarkFullUpdate(b, 200*1000) -} - -func benchmarkFullUpdate(b *testing.B, count int) { - expensiveBenchmark(b, count) - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - - // Create empty chains: - certChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - err = up.UpdateCerts(ctx, certs, certChains) - elapsed := time.Since(t0) - require.NoError(b, err) - err = up.Close() - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// TestDoUpdatesFromTestDataCerts replaces the DB with an updated DB -// from all certificates in the testdata/certs.pem.gz file. -func TestDoUpdatesFromTestDataCerts(t *testing.T) { - if os.Getenv("FPKI_TESTS_GENCERTS") == "" { - t.Skip("not generating new certificates") - } - ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancelF() - swapBack := swapDBs(t) - defer swapBack() - fmt.Println("Loading certs ...") - r, err := util.NewGzipReader("../../testdata/certs.pem.gz") - require.NoError(t, err) - certs, err := util.LoadCertsWithPEMReader(r) - require.NoError(t, err) - require.NoError(t, r.Close()) - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - db.TruncateAllTablesForTest(t) - - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(t, err) - - batchSize := 10 * 1000 - for i := 0; i < len(certs); i += batchSize { - certs := certs[i : i+batchSize] - err = up.UpdateCerts(ctx, certs, emptyChains[:len(certs)]) - require.NoError(t, err) - err = up.CommitSMTChanges(ctx) - require.NoError(t, err) - fmt.Printf("Updated %d certs ...\n", i) - } - root := up.GetRoot() - err = up.Close() - require.NoError(t, err) - err = ioutil.WriteFile("../../testdata/root100K.bin", root, 0664) - require.NoError(t, err) - - // dump contents using mysqldump - err = exec.Command("bash", "-c", "mysqldump -u root fpki |gzip - "+ - ">../../testdata/dump100K.sql.gz").Run() - require.NoError(t, err) -} - -// BenchmarkUpdateDomainEntriesUsingCerts10K uses ~ 1246 ms -func BenchmarkUpdateDomainEntriesUsingCerts10K(b *testing.B) { - benchmarkUpdateDomainEntriesUsingCerts(b, 10*1000) -} - -func benchmarkUpdateDomainEntriesUsingCerts(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - r, err := util.NewGzipReader("../../testdata/certs.pem.gz") - require.NoError(b, err) - certs, err := util.LoadCertsWithPEMReader(r) - require.NoError(b, err) - require.NoError(t, r.Close()) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkFetchUpdatedDomainHash10K uses ~ 31 ms -func BenchmarkFetchUpdatedDomainHash10K(b *testing.B) { - benchmarkFetchUpdatedDomainHash(b, 10*1000) -} - -func benchmarkFetchUpdatedDomainHash(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - _, err = up.FetchUpdatedDomainHash(ctx) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkRetrieveDomainEntries10K uses ~ 114 ms -func BenchmarkRetrieveDomainEntries10K(b *testing.B) { - benchmarkRetrieveDomainEntries(b, 10*1000) -} - -func benchmarkRetrieveDomainEntries(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - require.NoError(b, err) - updatedDomainHash, err := up.FetchUpdatedDomainHash(ctx) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - _, err = up.Conn().RetrieveDomainEntries(ctx, updatedDomainHash) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkKeyValuePairToSMTInput10K uses ~ 21 ms -func BenchmarkKeyValuePairToSMTInput10K(b *testing.B) { - benchmarkKeyValuePairToSMTInput(b, 10*1000) -} - -func benchmarkKeyValuePairToSMTInput(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - require.NoError(b, err) - updatedDomainHash, err := up.FetchUpdatedDomainHash(ctx) - require.NoError(b, err) - keyValuePairs, err := up.Conn(). - RetrieveDomainEntries(ctx, updatedDomainHash) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - _, _, err = up.KeyValuePairToSMTInput(keyValuePairs) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkSmtUpdate10K uses ~ 30 ms -func BenchmarkSmtUpdate10K(b *testing.B) { - benchmarkSmtUpdate(b, 10*1000) -} - -func benchmarkSmtUpdate(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - require.NoError(b, err) - updatedDomainHash, err := up.FetchUpdatedDomainHash(ctx) - require.NoError(b, err) - keyValuePairs, err := up.Conn(). - RetrieveDomainEntries(ctx, updatedDomainHash) - require.NoError(b, err) - k, v, err := up.KeyValuePairToSMTInput(keyValuePairs) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - _, err = up.SMT().Update(ctx, k, v) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// BenchmarkCommitChanges10K uses ~ 356 ms -func BenchmarkCommitChanges10K(b *testing.B) { - benchmarkCommitChanges(b, 10*1000) -} - -func benchmarkCommitChanges(b *testing.B, count int) { - swapBack := swapDBs(b) - defer swapBack() - raw, err := gunzip(b, "../../testdata/certs.pem.gz") - require.NoError(b, err) - certs := loadCertsFromPEM(b, raw) - require.GreaterOrEqual(b, len(certs), count) - certs = certs[:count] - emptyChains := make([][]*ctx509.Certificate, len(certs)) - - ctx, cancelF := context.WithTimeout(context.Background(), 20*time.Second) - defer cancelF() - up, err := updater.NewMapTestUpdater(nil, 233) - require.NoError(b, err) - - _, _, err = up.UpdateDomainEntriesUsingCerts(ctx, certs, emptyChains, 10) - require.NoError(b, err) - updatedDomainHash, err := up.FetchUpdatedDomainHash(ctx) - require.NoError(b, err) - keyValuePairs, err := up.Conn().RetrieveDomainEntries(ctx, updatedDomainHash) - require.NoError(b, err) - k, v, err := up.KeyValuePairToSMTInput(keyValuePairs) - require.NoError(b, err) - _, err = up.SMT().Update(ctx, k, v) - require.NoError(b, err) - - b.ResetTimer() - - // exec only once, assume perfect measuring. Because b.N is the number of iterations, - // just mimic b.N executions. - t0 := time.Now() - err = up.CommitSMTChanges(ctx) - elapsed := time.Since(t0) - require.NoError(b, err) - for i := 1; i < b.N; i++ { - time.Sleep(elapsed) - } -} - -// swapDBs swaps a possibly existing production DB with a new one, to be able to perform a test -// TODO(juagargi) ensure that the data from the DB is preserved. Thoughts about this are below: -// IMO this is hard to do with a function because if it may be called from different -// processes we can't just use e.g. sync.Once, and also returning the DB to the previous state -// even when the test panics would be hard. -// If we just not use a real DB but a mock, this follows better the spirit of a unit test or -// a benchmark, and would not affect any global data. -func swapDBs(t require.TestingT) func() { - swapBack := func() { - // this will swap the DB back to its original state - } - // prepare the DB for the benchmark - db.TruncateAllTablesForTest(t) - return swapBack -} - -func expensiveBenchmark(b *testing.B, count int) { - if count > 30000 && os.Getenv("FPKI_BENCH") == "" { - b.Skip("benchmark is expensive. Skipping") - } -} From c069de00ca562d0ceb551eaa4e11b49bc7a70a46 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 5 Jun 2023 11:01:42 +0200 Subject: [PATCH 146/187] Remove old unused integration tests. --- tests/integration/grpc_test/main.go | 205 --------------------- tests/integration/old_mapserver/main.go | 234 ------------------------ 2 files changed, 439 deletions(-) delete mode 100644 tests/integration/grpc_test/main.go delete mode 100644 tests/integration/old_mapserver/main.go diff --git a/tests/integration/grpc_test/main.go b/tests/integration/grpc_test/main.go deleted file mode 100644 index 2e8dee0b..00000000 --- a/tests/integration/grpc_test/main.go +++ /dev/null @@ -1,205 +0,0 @@ -package main - -import ( - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "strings" - "sync" - "time" - - "github.com/netsec-ethz/fpki/pkg/domain" - "github.com/netsec-ethz/fpki/pkg/grpc/grpcclient" - "github.com/netsec-ethz/fpki/pkg/grpc/grpcserver" - "github.com/netsec-ethz/fpki/pkg/mapserver/prover" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - dbtest "github.com/netsec-ethz/fpki/tests/pkg/db" - - ct "github.com/google/certificate-transparency-go" - ctTls "github.com/google/certificate-transparency-go/tls" - ctX509 "github.com/google/certificate-transparency-go/x509" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" -) - -var wg sync.WaitGroup - -func main() { - dbtest.TruncateAllTablesWithoutTestObject() - - // new map updator - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - mapUpdater.Fetcher.BatchSize = 10000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", 1120000, 1120999) - _, err = mapUpdater.UpdateNextBatch(ctx) - if err != nil { - panic(err) - } - - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - // get a new responder, and load an existing tree - mapResponder, err := grpcserver.NewGRPCServer(ctx, root, 233, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - closeChan := make(chan byte) - go mapResponder.StartWork(closeChan, 50051) - - // re-collect the added certs - collectedCertMap := []ctX509.Certificate{} - for i := 0; i < 50; i++ { - certList, err := getCerts("https://ct.googleapis.com/logs/argon2021", int64(1120000+i*20), int64(1120000+i*20+19)) - //fmt.Println("downloading : ", int64(1120000+i*20), " - ", int64(1120000+i*20+19)) - if err != nil { - panic(err) - } - collectedCertMap = append(collectedCertMap, certList...) - } - - numberOfWorker := 15 - wg.Add(numberOfWorker) - step := len(collectedCertMap) / numberOfWorker - - for i := 0; i < numberOfWorker; i++ { - worker(ctx, collectedCertMap[i*step:i*step+step-1]) - } - - wg.Wait() - - closeChan <- 1 - - fmt.Println("map server succeed!") -} - -func worker(ctx context.Context, certs []ctX509.Certificate) { - for _, cert := range certs { - if cert.Subject.CommonName != "" { - proofs, err := grpcclient.GetProofs(cert.Subject.CommonName, 50051) - if err != nil { - if err == domain.ErrInvalidDomainName { - continue - } - panic(err) - } - if !checkProof(cert, proofs) { - panic("certs not found") - } - } - } - wg.Done() -} - -func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) bool { - caName := cert.Issuer.String() - for _, proof := range proofs { - if !strings.Contains(cert.Subject.CommonName, proof.Domain) { - panic("wrong domain proofs") - } - proofType, isCorrect, err := prover.VerifyProofByDomain(proof) - if err != nil { - panic(err) - } - - if !isCorrect { - panic("wrong proof") - } - - if proofType == mapCommon.PoA { - if len(proof.DomainEntryBytes) != 0 { - panic("domain entry bytes not empty for PoA") - } - } - if proofType == mapCommon.PoP { - domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - panic(err) - } - // get the correct CA entry - for _, caEntry := range domainEntry.Entries { - if caEntry.CAName == caName { - // check if the cert is in the CA entry - for _, certRaw := range caEntry.DomainCerts { - if bytes.Equal(certRaw, cert.Raw) { - return true - } - } - } - } - } - } - return false -} - -// CertData: merkle tree leaf -type CertData struct { - LeafInput string `json:"leaf_input"` - ExtraData string `json:"extra_data"` -} - -// CertLog: Data from CT log -type CertLog struct { - Entries []CertData -} - -// copy of function from logpicker_worker.go -func getCerts(ctURL string, start int64, end int64) ([]ctX509.Certificate, error) { - url := fmt.Sprintf(ctURL+"/ct/v1/get-entries?start=%d&end=%d"", start, end) - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("http.Get %w", err) - } - - buf := new(bytes.Buffer) - buf.ReadFrom(resp.Body) - - var resultsCerLog CertLog - json.Unmarshal(buf.Bytes(), &resultsCerLog) - - certList := []ctX509.Certificate{} - - // parse merkle leaves and append it to the result -parse_cert_loop: - for _, entry := range resultsCerLog.Entries { - leafBytes, _ := base64.RawStdEncoding.DecodeString(entry.LeafInput) - var merkleLeaf ct.MerkleTreeLeaf - ctTls.Unmarshal(leafBytes, &merkleLeaf) - - var certificate *ctX509.Certificate - switch entryType := merkleLeaf.TimestampedEntry.EntryType; entryType { - case ct.X509LogEntryType: - certificate, err = ctX509.ParseCertificate(merkleLeaf.TimestampedEntry.X509Entry.Data) - if err != nil { - fmt.Println("ERROR: ParseCertificate ", err) - continue parse_cert_loop - } - case ct.PrecertLogEntryType: - certificate, err = ctX509.ParseTBSCertificate(merkleLeaf.TimestampedEntry.PrecertEntry.TBSCertificate) - if err != nil { - fmt.Println("ERROR: ParseTBSCertificate ", err) - continue parse_cert_loop - } - } - certList = append(certList, *certificate) - } - return certList, nil -} diff --git a/tests/integration/old_mapserver/main.go b/tests/integration/old_mapserver/main.go deleted file mode 100644 index 8f013998..00000000 --- a/tests/integration/old_mapserver/main.go +++ /dev/null @@ -1,234 +0,0 @@ -package main - -import ( - "bytes" - "context" - "database/sql" - "encoding/base64" - "encoding/json" - "fmt" - "net/http" - "strings" - "sync" - - ct "github.com/google/certificate-transparency-go" - ctTls "github.com/google/certificate-transparency-go/tls" - ctX509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/domain" - mapCommon "github.com/netsec-ethz/fpki/pkg/mapserver/common" - "github.com/netsec-ethz/fpki/pkg/mapserver/prover" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - - "time" -) - -var wg sync.WaitGroup - -type result struct { - Proofs [][]mapCommon.MapServerResponse - Err error -} - -// "https://ct.googleapis.com/logs/argon2021" - -// TestUpdaterAndResponder: store a list of domain entries -> fetch inclusion -> verify inclusion -func main() { - // truncate tables - db, err := sql.Open("mysql", "root:@tcp(127.0.0.1:3306)/fpki?maxAllowedPacket=1073741824") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE domainEntries;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE updates;") - if err != nil { - panic(err) - } - - _, err = db.Exec("TRUNCATE tree;") - if err != nil { - panic(err) - } - - // new map updator - mapUpdater, err := updater.NewMapUpdater(nil, 233) - if err != nil { - panic(err) - } - - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - //start := time.Now() - // download the certs and update the domain entries - mapUpdater.Fetcher.BatchSize = 10000 - mapUpdater.StartFetching("https://ct.googleapis.com/logs/argon2021", 1120000, 1120999) - _, err = mapUpdater.UpdateNextBatch(ctx) - if err != nil { - panic(err) - } - - //end := time.Now() - //fmt.Println("time to get 10000 certs: ", end.Sub(start)) - - //start = time.Now() - err = mapUpdater.CommitSMTChanges(ctx) - if err != nil { - panic(err) - } - //end = time.Now() - //fmt.Println("time to commit changes: ", end.Sub(start)) - - root := mapUpdater.GetRoot() - err = mapUpdater.Close() - if err != nil { - panic(err) - } - - // get a new responder, and load an existing tree - mapResponder, err := responder.NewOldMapResponder(ctx, root, 233, "./config/mapserver_config.json") - if err != nil { - panic(err) - } - - // re-collect the added certs - collectedCertList := []ctX509.Certificate{} - for i := 0; i < 50; i++ { - certList, err := getCerts("https://ct.googleapis.com/logs/argon2021", int64(1120000+i*20), int64(1120000+i*20+19)) - //fmt.Println("downloading : ", int64(1120000+i*20), " - ", int64(1120000+i*20+19)) - if err != nil { - panic(err) - } - collectedCertList = append(collectedCertList, certList...) - } - - numberOfWorker := 15 - wg.Add(numberOfWorker) - step := len(collectedCertList) / numberOfWorker - - for i := 0; i < numberOfWorker; i++ { - worker(ctx, collectedCertList[i*step:i*step+step-1], mapResponder) - } - - wg.Wait() - - fmt.Println("map server succeed!") -} - -func worker(ctx context.Context, certs []ctX509.Certificate, mapResponder *responder.OldMapResponder) { - for _, cert := range certs { - if cert.Subject.CommonName != "" { - proofs, err := mapResponder.GetProof(ctx, cert.Subject.CommonName) - if err != nil { - if err == domain.ErrInvalidDomainName { - continue - } - panic(err) - } - if !checkProof(cert, proofs) { - panic("certs not found") - } - } - } - wg.Done() -} - -func checkProof(cert ctX509.Certificate, proofs []mapCommon.MapServerResponse) bool { - caName := cert.Issuer.String() - for _, proof := range proofs { - if !strings.Contains(cert.Subject.CommonName, proof.Domain) { - panic("wrong domain proofs") - } - proofType, isCorrect, err := prover.VerifyProofByDomain(proof) - if err != nil { - panic(err) - } - - if !isCorrect { - panic("wrong proof") - } - - if proofType == mapCommon.PoA { - if len(proof.DomainEntryBytes) != 0 { - panic("domain entry bytes not empty for PoA") - } - } - if proofType == mapCommon.PoP { - domainEntry, err := mapCommon.DeserializeDomainEntry(proof.DomainEntryBytes) - if err != nil { - panic(err) - } - // get the correct CA entry - for _, caEntry := range domainEntry.Entries { - if caEntry.CAName == caName { - // check if the cert is in the CA entry - for _, certRaw := range caEntry.DomainCerts { - if bytes.Equal(certRaw, cert.Raw) { - return true - } - } - } - } - } - } - return false -} - -// CertData: merkle tree leaf -type CertData struct { - LeafInput string `json:"leaf_input"` - ExtraData string `json:"extra_data"` -} - -// CertLog: Data from CT log -type CertLog struct { - Entries []CertData -} - -// copy of function from logpicker_worker.go -func getCerts(ctURL string, start int64, end int64) ([]ctX509.Certificate, error) { - url := fmt.Sprintf(ctURL+"/ct/v1/get-entries?start=%d&end=%d"", start, end) - resp, err := http.Get(url) - if err != nil { - return nil, fmt.Errorf("http.Get %w", err) - } - - buf := new(bytes.Buffer) - buf.ReadFrom(resp.Body) - - var resultsCerLog CertLog - json.Unmarshal(buf.Bytes(), &resultsCerLog) - - certList := []ctX509.Certificate{} - - // parse merkle leaves and append it to the result -parse_cert_loop: - for _, entry := range resultsCerLog.Entries { - leafBytes, _ := base64.RawStdEncoding.DecodeString(entry.LeafInput) - var merkleLeaf ct.MerkleTreeLeaf - ctTls.Unmarshal(leafBytes, &merkleLeaf) - - var certificate *ctX509.Certificate - switch entryType := merkleLeaf.TimestampedEntry.EntryType; entryType { - case ct.X509LogEntryType: - certificate, err = ctX509.ParseCertificate(merkleLeaf.TimestampedEntry.X509Entry.Data) - if err != nil { - fmt.Println("ERROR: ParseCertificate ", err) - continue parse_cert_loop - } - case ct.PrecertLogEntryType: - certificate, err = ctX509.ParseTBSCertificate(merkleLeaf.TimestampedEntry.PrecertEntry.TBSCertificate) - if err != nil { - fmt.Println("ERROR: ParseTBSCertificate ", err) - continue parse_cert_loop - } - } - certList = append(certList, *certificate) - } - return certList, nil -} From ee5a5363f5cafc2a78d045869b92cdace1e29b16 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 8 Jun 2023 19:43:44 +0200 Subject: [PATCH 147/187] New log fetcher based on google's certificate-transparency client. --- pkg/mapserver/logfetcher/logfetcher.go | 393 ++++++++++++-------- pkg/mapserver/logfetcher/logfetcher_test.go | 231 +++++++----- pkg/mapserver/updater/updater.go | 33 +- 3 files changed, 394 insertions(+), 263 deletions(-) diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 650a7288..335b0523 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -2,226 +2,299 @@ package logfetcher import ( "bufio" - "bytes" "context" "crypto/rand" - "encoding/base64" - "encoding/json" + "crypto/tls" "fmt" "net/http" "os" - "sync" "time" ct "github.com/google/certificate-transparency-go" - ctTls "github.com/google/certificate-transparency-go/tls" + "github.com/google/certificate-transparency-go/client" + "github.com/google/certificate-transparency-go/jsonclient" ctx509 "github.com/google/certificate-transparency-go/x509" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/domain" ) -// LogFetcher is used to download batches of certificates. It has state and keeps some routines -// downloading certificates in the background, trying to prefetch the next two batches. -// By default it uses 16 workers, and batches of size 1K. +const defaultServerBatchSize = 128 +const defaultProcessBatchSize = defaultServerBatchSize * 128 + +const preloadCount = 2 // Number of batches the LogFetcher tries to preload. + +// LogFetcher is used to download CT TBS certificates. It has state and keeps some routines +// downloading certificates in the background, trying to prefetch preloadCount batches. +// LogFetcher uses the certificate-transparency-go/client from google to do the heavy lifting. +// The default size of the server side batch is 128, i.e. the server expects queries in blocks +// of 128 entries. +// TODO(juagargi) Use lists of CT log servers: check certificate-transparency-go/ctutil/sctcheck +// or ct/client/ctclient for a full and standard list that may already implement this. type LogFetcher struct { - URL string - Start int - End int - WorkerCount int - BatchSize int - resultChan chan []*ctx509.Certificate // batches of certificates - errChan chan error - stopChan chan struct{} // tells the workers to stop fetching + url string + start int64 // TODO(juagargi) start & end should go into fetch() and not as part as the type. + end int64 + + serverBatchSize int64 // The server requires queries in blocks of this size. + processBatchSize int64 // We unblock NextBatch in batches of this size. + ctClient *client.LogClient + chanResults chan *result + chanStop chan struct{} // tells the workers to stop fetching + stopping bool // Set at the same time than sending to chanStop } -// StartFetching will start fetching certificates in the background, so that there is -// at most two batches ready to be immediately read by NextBatch. -func (f *LogFetcher) StartFetching() { - if f.BatchSize == 0 { - f.BatchSize = 1000 +type result struct { + certs []*ctx509.Certificate + chains [][]*ctx509.Certificate + err error +} + +func NewLogFetcher(url string) (*LogFetcher, error) { + httpClient := &http.Client{ + Timeout: 10 * time.Second, + Transport: &http.Transport{ + TLSHandshakeTimeout: 30 * time.Second, + ResponseHeaderTimeout: 30 * time.Second, + MaxIdleConnsPerHost: 10, + DisableKeepAlives: false, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, } - if f.WorkerCount == 0 { - f.WorkerCount = 16 + opts := jsonclient.Options{UserAgent: "ct-go-ctclient/1.0"} + ctClient, err := client.New(url, httpClient, opts) + if err != nil { + return nil, err } - f.resultChan = make(chan []*ctx509.Certificate, 2) - f.errChan = make(chan error) - f.stopChan = make(chan struct{}) + return &LogFetcher{ + url: url, + + serverBatchSize: defaultServerBatchSize, + processBatchSize: defaultProcessBatchSize, + ctClient: ctClient, + chanResults: make(chan *result, preloadCount), + chanStop: make(chan struct{}), + }, nil +} + +// StartFetching will start fetching certificates in the background, so that there is +// at most two batches ready to be immediately read by NextBatch. +func (f *LogFetcher) StartFetching(start, end int64) { + f.start = start + f.end = end go f.fetch() } func (f *LogFetcher) StopFetching() { - f.stopChan <- struct{}{} + f.stopping = true + f.chanStop <- struct{}{} } // NextBatch returns the next batch of certificates as if it were a channel. // The call blocks until a whole batch is available. The last batch may have less elements. // Returns nil when there is no more batches, i.e. all certificates have been fetched. -func (f *LogFetcher) NextBatch(ctx context.Context) ([]*ctx509.Certificate, error) { +func (f *LogFetcher) NextBatch( + ctx context.Context, +) ( + certs []*ctx509.Certificate, + chains [][]*ctx509.Certificate, + err error) { + select { case <-ctx.Done(): f.StopFetching() - return nil, fmt.Errorf("NextBatch %w", ctx.Err()) - case certs := <-f.resultChan: - return certs, nil - case err := <-f.errChan: - return nil, err + err = fmt.Errorf("NextBatch %w", ctx.Err()) + return + case res, ok := <-f.chanResults: + if !ok { + // Channel is closed. + return + } + if err = res.err; err != nil { + return + } + certs = res.certs + chains = res.chains + return } } -// FetchAllCertificates will block until all certificates [start,end] have been fetched. -func (f *LogFetcher) FetchAllCertificates(ctx context.Context) ([]*ctx509.Certificate, error) { - f.StartFetching() - certs := make([]*ctx509.Certificate, 0, f.End-f.Start+1) +// FetchAllCertificates will block until all certificates and chains [start,end] have been fetched. +func (f *LogFetcher) FetchAllCertificates( + ctx context.Context, + start, + end int64, +) ( + certs []*ctx509.Certificate, + chains [][]*ctx509.Certificate, + err error, +) { + + f.StartFetching(start, end) + certs = make([]*ctx509.Certificate, 0, f.end-f.start+1) + chains = make([][]*ctx509.Certificate, 0, f.end-f.start+1) for { - batch, err := f.NextBatch(ctx) - if err != nil { - return nil, err + bCerts, bChains, bErr := f.NextBatch(ctx) + if bErr != nil { + err = bErr + return } - if len(batch) == 0 { + if len(bCerts) == 0 { break } - certs = append(certs, batch...) + certs = append(certs, bCerts...) + chains = append(chains, bChains...) } - return certs, nil + return } func (f *LogFetcher) fetch() { - for start, end := f.Start, min(f.End, f.Start+f.BatchSize-1); start <= f.End; start, end = - start+f.BatchSize, min(end+f.BatchSize, f.End) { + defer close(f.chanResults) + defer close(f.chanStop) + + if f.start > f.end { + return + } + // Taking into account the batchSize, compute the # of calls to getEntriesInBatches. + // It calls count-1 times with the batchSize, and 1 time with the remainder. + count := 1 + (f.end-f.start)/f.processBatchSize - certs, err := getCertificates(f.URL, start, end, f.WorkerCount, f.stopChan) + leafEntries := make([]*ct.LeafEntry, f.processBatchSize) + for i := int64(0); i < count; i++ { + start := f.start + i*f.processBatchSize + end := min(f.end, start+f.processBatchSize-1) + n, err := f.getRawEntriesInBatches(leafEntries, start, end) if err != nil { - f.errChan <- err - return + f.chanResults <- &result{ + err: err, + } + return // Don't continue processing when errors. + } + certEntries := make([]*ctx509.Certificate, n) + chainEntries := make([][]*ctx509.Certificate, n) + // Parse each entry to certificates and chains. + for i, leaf := range leafEntries[:n] { + index := f.start + int64(i) + raw, err := ct.RawLogEntryFromLeaf(index, leaf) + if err != nil { + f.chanResults <- &result{ + err: err, + } + return + } + // Certificate. + cert, err := ctx509.ParseCertificate(raw.Cert.Data) + if err != nil { + f.chanResults <- &result{ + err: err, + } + return + } + certEntries[i] = cert + // Chain. + chainEntries[i] = make([]*ctx509.Certificate, len(raw.Chain)) + for j, c := range raw.Chain { + chainEntries[i][j], err = ctx509.ParseCertificate(c.Data) + if err != nil { + f.chanResults <- &result{ + err: err, + } + return + } + } + } + // Send the result. + f.chanResults <- &result{ + certs: certEntries, + chains: chainEntries, } - f.resultChan <- certs } - close(f.errChan) - close(f.resultChan) - close(f.stopChan) } -// getCertificates fetches certificates from CT log. -// It will download end - start + 1 certificates, starting at start, and finishing with end. -func getCertificates(ctURL string, startIndex, endIndex, numOfWorker int, stopChan chan struct{}) ( - []*ctx509.Certificate, error) { +// streamRawEntries fetches certificates from CT log using getCerts. +// streamRawEntries repeats a call to getCerts as many times as necessary in batches of +// serverBatchSize. +// streamRawEntries will download end - start + 1 certificates, +// starting at start, and finishing with end. +func (f *LogFetcher) getRawEntriesInBatches(leafEntries []*ct.LeafEntry, start, end int64) ( + int64, error) { - count := endIndex - startIndex + 1 - if count < numOfWorker { - numOfWorker = count - } - if numOfWorker == 0 { - return nil, nil - } - - certsCol := make([][]*ctx509.Certificate, numOfWorker) - errs := make([]error, numOfWorker) - wg := sync.WaitGroup{} - wg.Add(numOfWorker) + assert(end >= start, "logic error: call to getRawEntriesInBatches with %d and %d", start, end) + _ = leafEntries[end-start] // Fail early if wrong size. - stride := count / numOfWorker - rem := count % numOfWorker + // TODO(juagargi) should we align the calls to serverBatchSize + batchCount := (end - start + 1) / f.serverBatchSize - for i := 0; i < rem; i++ { - go func(start, end int, certsPtr *[]*ctx509.Certificate, errPtr *error) { - defer wg.Done() - *certsPtr, *errPtr = getCerts(ctURL, start, end, stopChan) - }(startIndex+i*stride, startIndex+(i+1)*stride, &certsCol[i], &errs[i]) - } - for i := rem; i < numOfWorker; i++ { - go func(start, end int, certsPtr *[]*ctx509.Certificate, errPtr *error) { - defer wg.Done() - *certsPtr, *errPtr = getCerts(ctURL, start, end, stopChan) - }(startIndex+i*stride, startIndex+(i+1)*stride-1, &certsCol[i], &errs[i]) - } + // Do batches. + for i := int64(0); !f.stopping && i < batchCount; i++ { + bStart := start + i*f.serverBatchSize + bEnd := bStart + f.serverBatchSize - 1 + entries := leafEntries[i*f.serverBatchSize : (i+1)*f.serverBatchSize] - certs := make([]*ctx509.Certificate, 0, count) - wg.Wait() - for i := 0; i < numOfWorker; i++ { - if errs[i] != nil { - return nil, errs[i] + n, err := f.getRawEntries(entries, bStart, bEnd) + if err != nil { + return i * f.serverBatchSize, err + } + if f.stopping { + return bEnd - start + 1, nil } - certs = append(certs, certsCol[i]...) + assert(n == f.serverBatchSize, "bad size in getRawEntriesInBatches") } - return certs, nil -} -// getCerts gets certificates from CT log. It will request all certs in [start,end] (including both) -func getCerts(ctURL string, start, end int, stopChan chan struct{}) ([]*ctx509.Certificate, error) { - allCerts := make([]*ctx509.Certificate, 0, end-start+1) - for end >= start { - url := fmt.Sprintf(ctURL+"/ct/v1/get-entries?start=%d&end=%d"", start, end) - resp, err := http.Get(url) + // Do remainder of batches. + remStart := batchCount*f.serverBatchSize + start + remEnd := end + if remEnd >= remStart { + // There is a remainder todo. + entries := leafEntries[batchCount*f.serverBatchSize : end-start+1] + n, err := f.getRawEntries(entries, remStart, remEnd) if err != nil { - return nil, fmt.Errorf("getCerts | http.Get %w", err) + return batchCount * f.serverBatchSize, err } - select { - case <-stopChan: // requested to stop - return nil, nil - default: + if f.stopping { + return remStart + n, nil } - newCerts, err := parseCertificatesFromCTLogServerResponse(resp) - if err != nil { - return nil, err - } - start += len(newCerts) - allCerts = append(allCerts, newCerts...) + assert(n == remEnd-remStart+1, "bad remainder size in getRawEntriesInBatches") } - return allCerts, nil + + return end - start + 1, nil } -// parseCertificatesFromCTLogServerResponse iteratively gets all requested certificates, -// with as many HTTP requests as necessary. -func parseCertificatesFromCTLogServerResponse(resp *http.Response) ([]*ctx509.Certificate, error) { - type CertLog struct { - Entries []struct { - LeafInput string `json:"leaf_input"` - ExtraData string `json:"extra_data"` - } - } - buf := new(bytes.Buffer) - buf.ReadFrom(resp.Body) - var ctCerts CertLog - err := json.Unmarshal(buf.Bytes(), &ctCerts) - if err != nil { - return nil, fmt.Errorf("getCerts | json unmarshal %w\n%s", err, buf.String()) - } +// getRawEntries downloads raw entries. It doesn't have a concept of batch size, and will +// re-query the server if it didn't return as many entries as requested. +// The function returns all entries [start,end], both inclusive. +// It returns the number of retrieved entries, plus maybe an error. +// The rawEntries must be at least of size end-start+1, or panic. +func (f *LogFetcher) getRawEntries( + leafEntries []*ct.LeafEntry, + start, + end int64, +) (int64, error) { - certs := make([]*ctx509.Certificate, len(ctCerts.Entries)) - // parse merkle leaves and append them to the result - for i, entry := range ctCerts.Entries { - leafBytes, _ := base64.RawStdEncoding.DecodeString(entry.LeafInput) - var merkleLeaf ct.MerkleTreeLeaf - ctTls.Unmarshal(leafBytes, &merkleLeaf) - - var certificate *ctx509.Certificate - switch entryType := merkleLeaf.TimestampedEntry.EntryType; entryType { - case ct.X509LogEntryType: - certificate, err = ctx509.ParseCertificate(merkleLeaf.TimestampedEntry.X509Entry.Data) - if err != nil { - switch err.(type) { - case ctx509.NonFatalErrors: - fmt.Println(err.Error()) - default: - return nil, fmt.Errorf("getCerts | ParseCertificate %w", err) - } - } - case ct.PrecertLogEntryType: - certificate, err = ctx509.ParseTBSCertificate(merkleLeaf.TimestampedEntry.PrecertEntry.TBSCertificate) - if err != nil { - switch err.(type) { - case ctx509.NonFatalErrors: - fmt.Println(err.Error()) - default: - return nil, fmt.Errorf("getCerts | ParseTBSCertificate %w", err) - } - } + _ = leafEntries[end-start] // Fail early if the slice is too small. + + for offset := int64(0); offset < end-start+1; { + select { + case <-f.chanStop: // requested to stop + return 0, nil default: - return nil, fmt.Errorf("getCerts | CT type unknown %v", entryType) } - certs[i] = certificate + rsp, err := f.ctClient.GetRawEntries(context.Background(), start+offset, end) + if err != nil { + return offset, err + } + for i := int64(0); i < int64(len(rsp.Entries)); i++ { + e := rsp.Entries[i] + leafEntries[offset+i] = &e + } + offset += int64(len(rsp.Entries)) } - return certs, nil + return end - start + 1, nil } // GetPCAndRPC: get PC and RPC from url @@ -273,9 +346,15 @@ func generateRandomBytes() []byte { return token } -func min(a, b int) int { +func min(a, b int64) int64 { if b < a { return b } return a } + +func assert(cond bool, format string, params ...any) { + if !cond { + panic(fmt.Errorf(format, params...)) + } +} diff --git a/pkg/mapserver/logfetcher/logfetcher_test.go b/pkg/mapserver/logfetcher/logfetcher_test.go index a03a05c4..689667bc 100644 --- a/pkg/mapserver/logfetcher/logfetcher_test.go +++ b/pkg/mapserver/logfetcher/logfetcher_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + ct "github.com/google/certificate-transparency-go" ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/require" ) @@ -13,137 +14,190 @@ import ( const ctURL = "https://ct.googleapis.com/logs/argon2021" -func TestGetCerts(t *testing.T) { - stopChan := make(chan struct{}) - start := 1 * 1000 * 1000 - count := 100 - certs, err := getCerts(ctURL, start, start+count-1, stopChan) - require.NoError(t, err) - require.Len(t, certs, 100, "got %d", len(certs)) +func TestGetRawEntries(t *testing.T) { + cases := map[string]struct { + start int64 + end int64 + }{ + "simple": { + start: 0, + end: 0, + }, + "long": { + start: 0, + end: 511, + }, + "non_aligned": { + start: 0, + end: 1, + }, + "middle": { + start: 2000, + end: 2001, + }, + "longer": { + start: 2000, + end: 2201, + }, + } + for name, tc := range cases { + name, tc := name, tc + t.Run(name, func(t *testing.T) { + t.Parallel() + + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + rawEntries := make([]*ct.LeafEntry, tc.end-tc.start+1) + n, err := f.getRawEntries(rawEntries, tc.start, tc.end) + require.NoError(t, err) + require.Equal(t, n, tc.end-tc.start+1) + }) + } } -func TestDownloadCertSize(t *testing.T) { +func TestGetRawEntriesInBatches(t *testing.T) { cases := map[string]struct { - start int - end int - numWorkers int + start int64 + end int64 + batchSize int64 }{ - "0": { - start: 2000, - end: 2000 - 1, - numWorkers: 1, - }, + "1": { - start: 2000, - end: 2000, - numWorkers: 1, + start: 2000, + end: 2000, }, "2": { - start: 2000, - end: 2001, - numWorkers: 1, + start: 2000, + end: 2001, + }, + "2_2": { + start: 2000, + end: 2001, + batchSize: 2, + }, + "3_2": { + start: 2000, + end: 2002, + batchSize: 2, + }, + "100": { + start: 2000, + end: 2100 - 1, + }, + "100_2": { + start: 2000, + end: 2100 - 1, + batchSize: 2, }, - "100_1": { - start: 2000, - end: 2100 - 1, - numWorkers: 1, + "100_13": { + start: 2000, + end: 2100 - 1, + batchSize: 13, }, - "100_3": { - start: 2000, - end: 2100 - 1, - numWorkers: 3, + "long": { + start: 0, + end: 128, + batchSize: 1, }, } for name, tc := range cases { name, tc := name, tc t.Run(name, func(t *testing.T) { t.Parallel() - stopChan := make(chan struct{}) - certs, err := getCertificates(ctURL, tc.start, tc.end, tc.numWorkers, stopChan) + + f, err := NewLogFetcher(ctURL) require.NoError(t, err) - require.Len(t, certs, tc.end-tc.start+1, - "got %d instead of %d", len(certs), tc.end-tc.start+1) + if tc.batchSize != 0 { + f.serverBatchSize = tc.batchSize + f.processBatchSize = f.serverBatchSize * 128 + } + + entries := make([]*ct.LeafEntry, tc.end-tc.start+1) + n, err := f.getRawEntriesInBatches(entries, tc.start, tc.end) + require.NoError(t, err) + expected := tc.end - tc.start + 1 + require.Equal(t, expected, n) }) } } func TestLogFetcher(t *testing.T) { cases := map[string]struct { - start int - end int - numWorkers int - batchSize int + start int + end int + batchSize int }{ "0": { - start: 2000, - end: 2000 - 1, - numWorkers: 1, + start: 2000, + end: 2000 - 1, }, "1": { - start: 2000, - end: 2000, - numWorkers: 1, + start: 2000, + end: 2000, }, "2": { - start: 2000, - end: 2001, - numWorkers: 1, - }, - "100_1_0": { - start: 2000, - end: 2100 - 1, - numWorkers: 1, - }, - "100_1_2": { - start: 2000, - end: 2100 - 1, - numWorkers: 1, - batchSize: 2, - }, - "100_3_0": { - start: 2000, - end: 2100 - 1, - numWorkers: 3, - }, - "100_7_13": { - start: 2000, - end: 2100 - 1, - numWorkers: 7, - batchSize: 13, + start: 2000, + end: 2001, + }, + "6_1": { + start: 2000, + end: 2005, + batchSize: 1, + }, + "100": { + start: 2000, + end: 2100 - 1, + }, + "100_2": { + start: 2000, + end: 2100 - 1, + batchSize: 2, + }, + "100_13": { + start: 2000, + end: 2100 - 1, + batchSize: 13, }, } for name, tc := range cases { name, tc := name, tc t.Run(name, func(t *testing.T) { t.Parallel() + ctx, cancelF := context.WithTimeout(context.Background(), 10*time.Second) defer cancelF() - f := &LogFetcher{ - URL: ctURL, - Start: tc.start, - End: tc.end, - WorkerCount: tc.numWorkers, - BatchSize: tc.batchSize, + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + if tc.batchSize > 0 { + f.serverBatchSize = int64(tc.batchSize) + f.processBatchSize = 128 * f.serverBatchSize } - f.StartFetching() + f.StartFetching(int64(tc.start), int64(tc.end)) allCerts := make([]*ctx509.Certificate, 0) + allChains := make([][]*ctx509.Certificate, 0) for { - certs, err := f.NextBatch(ctx) + certs, chains, err := f.NextBatch(ctx) + t.Logf("batch with %d elems", len(certs)) require.NoError(t, err) - require.LessOrEqual(t, len(certs), f.BatchSize, - "got %d instead of %d", len(certs), tc.batchSize) + require.LessOrEqual(t, len(certs), int(f.processBatchSize), + "%d is not <= than %d", len(certs), f.processBatchSize) allCerts = append(allCerts, certs...) + allChains = append(allChains, chains...) if certs == nil && err == nil { break } } - require.Len(t, allCerts, tc.end-tc.start+1, "got %d instead of %d", len(allCerts), tc.end-tc.start+1) - certs, err := f.NextBatch(ctx) + require.Len(t, allChains, tc.end-tc.start+1, + "got %d instead of %d", len(allChains), tc.end-tc.start+1) + + // Again. It should return empty. + certs, chains, err := f.NextBatch(ctx) require.NoError(t, err) require.Nil(t, certs) + require.Nil(t, chains) }) } } @@ -151,15 +205,14 @@ func TestLogFetcher(t *testing.T) { func TestTimeoutLogFetcher(t *testing.T) { ctx, cancelF := context.WithTimeout(context.Background(), time.Nanosecond) defer cancelF() - f := &LogFetcher{ - URL: ctURL, - Start: 2000, - End: 2000 * 2000, - BatchSize: 1, - WorkerCount: 1, - } - certs, err := f.FetchAllCertificates(ctx) + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + f.StartFetching(2000, 2000*2000) + + // Attempt to fetch something really big that would need more than 1 nanosec. + certs, chains, err := f.FetchAllCertificates(ctx, 2000, 2000*2000) require.Error(t, err) require.ErrorIs(t, err, context.DeadlineExceeded) require.Len(t, certs, 0) + require.Len(t, chains, 0) } diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 41c925c8..153ef16e 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -28,48 +28,47 @@ type MapUpdater struct { } // NewMapUpdater: return a new map updater. -func NewMapUpdater(config *db.Configuration, root []byte, cacheHeight int) (*MapUpdater, error) { +func NewMapUpdater(config *db.Configuration, url string) (*MapUpdater, error) { // db conn for map updater dbConn, err := mysql.Connect(config) if err != nil { return nil, fmt.Errorf("NewMapUpdater | db.Connect | %w", err) } + // deleteme // SMT - smt, err := trie.NewTrie(root, common.SHA256Hash, dbConn) + smt, err := trie.NewTrie(nil, common.SHA256Hash, dbConn) if err != nil { return nil, fmt.Errorf("NewMapServer | NewTrie | %w", err) } - smt.CacheHeightLimit = cacheHeight + smt.CacheHeightLimit = 32 + + fetcher, err := logfetcher.NewLogFetcher(url) + if err != nil { + return nil, err + } return &MapUpdater{ - Fetcher: logfetcher.LogFetcher{ - WorkerCount: 16, - }, - smt: smt, - dbConn: dbConn, + Fetcher: *fetcher, + smt: smt, + dbConn: dbConn, }, nil } // StartFetching will initiate the CT logs fetching process in the background, trying to // obtain the next batch of certificates and have it ready for the next update. -func (u *MapUpdater) StartFetching(ctURL string, startIndex, endIndex int) { - u.Fetcher.URL = ctURL - u.Fetcher.Start = startIndex - u.Fetcher.End = endIndex - u.Fetcher.StartFetching() +func (u *MapUpdater) StartFetching(startIndex, endIndex int64) { + u.Fetcher.StartFetching(startIndex, endIndex) } // UpdateNextBatch downloads the next batch from the CT log server and updates the domain and // Updates tables. Also the SMT. func (u *MapUpdater) UpdateNextBatch(ctx context.Context) (int, error) { - certs, err := u.Fetcher.NextBatch(ctx) + certs, chains, err := u.Fetcher.NextBatch(ctx) if err != nil { return 0, fmt.Errorf("CollectCerts | GetCertMultiThread | %w", err) } - // TODO(cyrill): parse and add certificate chains from CT log server - emptyCertChains := make([][]*ctx509.Certificate, len(certs)) - return len(certs), u.updateCerts(ctx, certs, emptyCertChains) + return len(certs), u.updateCerts(ctx, certs, chains) } // UpdateCertsLocally: add certs (in the form of asn.1 encoded byte arrays) directly without querying log From 071e7156cb55d1e2ffb761d9460734310dca9fc0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 8 Jun 2023 22:09:53 +0200 Subject: [PATCH 148/187] Add speed test. --- pkg/mapserver/logfetcher/logfetcher_test.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/pkg/mapserver/logfetcher/logfetcher_test.go b/pkg/mapserver/logfetcher/logfetcher_test.go index 689667bc..75057eb6 100644 --- a/pkg/mapserver/logfetcher/logfetcher_test.go +++ b/pkg/mapserver/logfetcher/logfetcher_test.go @@ -2,6 +2,7 @@ package logfetcher import ( "context" + "fmt" "testing" "time" @@ -207,7 +208,6 @@ func TestTimeoutLogFetcher(t *testing.T) { defer cancelF() f, err := NewLogFetcher(ctURL) require.NoError(t, err) - f.StartFetching(2000, 2000*2000) // Attempt to fetch something really big that would need more than 1 nanosec. certs, chains, err := f.FetchAllCertificates(ctx, 2000, 2000*2000) @@ -216,3 +216,19 @@ func TestTimeoutLogFetcher(t *testing.T) { require.Len(t, certs, 0) require.Len(t, chains, 0) } + +func TestSpeed(t *testing.T) { + ctx, cancelF := context.WithTimeout(context.Background(), 5*time.Minute) + defer cancelF() + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + t0 := time.Now() + start := int64(8000) + end := start + 10000 - 1 + _, _, err = f.FetchAllCertificates(ctx, start, end) + t1 := time.Now() + elapsed := t1.Sub(t0) + fmt.Printf("Elapsed: %s, %f certs / minute\n", + elapsed, float64(end-start+1)/elapsed.Minutes()) + require.NoError(t, err) +} From 9a725d5e22bb0895ac73db48db0ac01ce494ace6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 9 Jun 2023 13:52:32 +0200 Subject: [PATCH 149/187] Fix bug with stopping. --- pkg/mapserver/logfetcher/logfetcher.go | 17 ++++-- pkg/mapserver/logfetcher/logfetcher_test.go | 64 +++++++++++++++++++-- pkg/mapserver/updater/updater.go | 2 - 3 files changed, 70 insertions(+), 13 deletions(-) diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 335b0523..ae7bfe74 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -136,8 +136,8 @@ func (f *LogFetcher) FetchAllCertificates( ) { f.StartFetching(start, end) - certs = make([]*ctx509.Certificate, 0, f.end-f.start+1) - chains = make([][]*ctx509.Certificate, 0, f.end-f.start+1) + certs = make([]*ctx509.Certificate, 0, end-start+1) + chains = make([][]*ctx509.Certificate, 0, end-start+1) for { bCerts, bChains, bErr := f.NextBatch(ctx) if bErr != nil { @@ -175,11 +175,15 @@ func (f *LogFetcher) fetch() { } return // Don't continue processing when errors. } + if f.stopping { + f.stopping = false // We are handling the stop now. + return + } certEntries := make([]*ctx509.Certificate, n) chainEntries := make([][]*ctx509.Certificate, n) // Parse each entry to certificates and chains. for i, leaf := range leafEntries[:n] { - index := f.start + int64(i) + index := start + int64(i) raw, err := ct.RawLogEntryFromLeaf(index, leaf) if err != nil { f.chanResults <- &result{ @@ -230,8 +234,11 @@ func (f *LogFetcher) getRawEntriesInBatches(leafEntries []*ct.LeafEntry, start, // TODO(juagargi) should we align the calls to serverBatchSize batchCount := (end - start + 1) / f.serverBatchSize + if f.stopping { + return 0, nil + } // Do batches. - for i := int64(0); !f.stopping && i < batchCount; i++ { + for i := int64(0); i < batchCount; i++ { bStart := start + i*f.serverBatchSize bEnd := bStart + f.serverBatchSize - 1 entries := leafEntries[i*f.serverBatchSize : (i+1)*f.serverBatchSize] @@ -241,7 +248,7 @@ func (f *LogFetcher) getRawEntriesInBatches(leafEntries []*ct.LeafEntry, start, return i * f.serverBatchSize, err } if f.stopping { - return bEnd - start + 1, nil + return i*f.serverBatchSize + n, nil } assert(n == f.serverBatchSize, "bad size in getRawEntriesInBatches") } diff --git a/pkg/mapserver/logfetcher/logfetcher_test.go b/pkg/mapserver/logfetcher/logfetcher_test.go index 75057eb6..4b765ba8 100644 --- a/pkg/mapserver/logfetcher/logfetcher_test.go +++ b/pkg/mapserver/logfetcher/logfetcher_test.go @@ -51,11 +51,29 @@ func TestGetRawEntries(t *testing.T) { rawEntries := make([]*ct.LeafEntry, tc.end-tc.start+1) n, err := f.getRawEntries(rawEntries, tc.start, tc.end) require.NoError(t, err) - require.Equal(t, n, tc.end-tc.start+1) + require.Equal(t, tc.end-tc.start+1, n) }) } } +func TestStoppingGetRawEntries(t *testing.T) { + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + f.start = 3000 + f.end = f.start + 10000 + + // In 1 second, trigger a stop signal. + go func() { + time.Sleep(time.Second) + f.StopFetching() + }() + // Manually call getRawEntries as if called from the parent. + leafEntries := make([]*ct.LeafEntry, f.end-f.start+1) + n, err := f.getRawEntries(leafEntries, f.start, f.end) + require.NoError(t, err) + require.Equal(t, int64(0), n) +} + func TestGetRawEntriesInBatches(t *testing.T) { cases := map[string]struct { start int64 @@ -122,6 +140,41 @@ func TestGetRawEntriesInBatches(t *testing.T) { } } +func TestStoppingGetRawEntriesInBatches(t *testing.T) { + // Prepare a test case for getRawEntriesInBatches, where the server responds in batches of 1 + // element, and we process in batches of 100K. + // This means that getRawEntriesInBatches will have to make 100K calls to getRawEntries, + // and in the middle of them, we will request to stop the fetcher. + f, err := NewLogFetcher(ctURL) + require.NoError(t, err) + f.serverBatchSize = 1 + f.processBatchSize = 100000 + f.start = 3000 + f.end = f.start + 3000*3000 // Whatever but larger than processBatchSize + + // Trigger a stop signal after 1 sec. Fast enough for getRawEntriesInBatches to not be done yet + go func() { + time.Sleep(1 * time.Second) + f.StopFetching() + }() + + // Manually call getRawEntriesInBatches as if called from "fetch()". + leafEntries := make([]*ct.LeafEntry, f.processBatchSize) + start := f.start + end := f.start + f.processBatchSize - 1 + n, err := f.getRawEntriesInBatches(leafEntries, start, end) + require.NoError(t, err) + // Some leaves where downloaded. + require.Greater(t, n, int64(0)) + // But not all. + require.Less(t, n, f.processBatchSize) + + // Check that all leaves returned are non nil. + for i := range leafEntries[:n] { + require.NotNil(t, leafEntries[i], "nil leaf at %d", i) + } +} + func TestLogFetcher(t *testing.T) { cases := map[string]struct { start int @@ -179,7 +232,6 @@ func TestLogFetcher(t *testing.T) { allChains := make([][]*ctx509.Certificate, 0) for { certs, chains, err := f.NextBatch(ctx) - t.Logf("batch with %d elems", len(certs)) require.NoError(t, err) require.LessOrEqual(t, len(certs), int(f.processBatchSize), "%d is not <= than %d", len(certs), f.processBatchSize) @@ -204,13 +256,13 @@ func TestLogFetcher(t *testing.T) { } func TestTimeoutLogFetcher(t *testing.T) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Nanosecond) + ctx, cancelF := context.WithTimeout(context.Background(), time.Second) defer cancelF() f, err := NewLogFetcher(ctURL) require.NoError(t, err) - // Attempt to fetch something really big that would need more than 1 nanosec. - certs, chains, err := f.FetchAllCertificates(ctx, 2000, 2000*2000) + // Attempt to fetch something really big that would need more than 1 sec. + certs, chains, err := f.FetchAllCertificates(ctx, 2000, 666000000) require.Error(t, err) require.ErrorIs(t, err, context.DeadlineExceeded) require.Len(t, certs, 0) @@ -223,7 +275,7 @@ func TestSpeed(t *testing.T) { f, err := NewLogFetcher(ctURL) require.NoError(t, err) t0 := time.Now() - start := int64(8000) + start := int64(18000) end := start + 10000 - 1 _, _, err = f.FetchAllCertificates(ctx, start, end) t1 := time.Now() diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 153ef16e..a531a9fb 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -18,8 +18,6 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) -const readBatchSize = 100000 - // MapUpdater: map updater. It is responsible for updating the tree, and writing to db type MapUpdater struct { Fetcher logfetcher.LogFetcher From 8d3b6e72c0c1a11ff0abb759c10ec7c0f95d972a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 13 Jun 2023 13:34:32 +0200 Subject: [PATCH 150/187] Remove unused function, and simplify _if_ condition. --- pkg/common/structure.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/pkg/common/structure.go b/pkg/common/structure.go index b4bac0c7..f2c7a16e 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -9,7 +9,6 @@ import ( // of "policy objects". A policy object is that one that represents functionality of policies // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. type PolicyObject interface { - __PolicyObjectMarkerMethod() Raw() []byte Domain() string } @@ -19,9 +18,8 @@ type PolicyObjectBase struct { Subject string `json:",omitempty"` } -func (PolicyObjectBase) __PolicyObjectMarkerMethod() {} -func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } -func (o PolicyObjectBase) Domain() string { return o.Subject } +func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } +func (o PolicyObjectBase) Domain() string { return o.Subject } // root certificate signing request type RCSR struct { @@ -177,8 +175,7 @@ func (rpc *RPC) Equal(rpc_ *RPC) bool { } func (sprt *SPRT) Equal(sprt_ *SPRT) bool { - return true && - sprt.SPT.Equal(sprt_.SPT) && + return sprt.SPT.Equal(sprt_.SPT) && sprt.Reason == sprt_.Reason } From 99127a35487007ba25a43693a1e8431b86d7cc1f Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 13 Jun 2023 17:21:40 +0200 Subject: [PATCH 151/187] Fix Check*Exist db functions. Because MySQL doesn't guarantee the order of rows when using UNION ALL, we have to ensure that the rows are returned in the expected order, which is that of the list of parameters. Because we haven't been able to make it fail in tests or real life, there is no tests checking for this situation. --- pkg/db/mysql/certs.go | 14 ++++++++++---- pkg/db/mysql/policies.go | 14 ++++++++++---- tools/create_schema.sh | 9 ++++++++- 3 files changed, 28 insertions(+), 9 deletions(-) diff --git a/pkg/db/mysql/certs.go b/pkg/db/mysql/certs.go index 2848e1e1..08765a60 100644 --- a/pkg/db/mysql/certs.go +++ b/pkg/db/mysql/certs.go @@ -151,9 +151,13 @@ func (c *mysqlDB) checkCertsExist(ctx context.Context, ids []*common.SHA256Outpu } // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. - elems := make([]string, len(data)) - for i := range elems { - elems[i] = "SELECT ? AS cert_id" + + // The id_placeholders list contains strings that allow an ID to be placed with a sequential + // number, so that the IDs are returned in the same order in the DB engine as they are present + // in the list parameter here. + id_placeholders := make([]string, len(data)) + for i := range id_placeholders { + id_placeholders[i] = fmt.Sprintf("SELECT ? AS cert_id, %d AS list_seq", i) } // The query means: join two tables, one with the values I am passing as arguments (those @@ -161,7 +165,9 @@ func (c *mysqlDB) checkCertsExist(ctx context.Context, ids []*common.SHA256Outpu // Finally, group_concat all rows into just one field of type string. str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + "SELECT (CASE WHEN certs.cert_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + - strings.Join(elems, " UNION ALL ") + + "SELECT cert_id FROM(" + + strings.Join(id_placeholders, " UNION ALL ") + + ") AS sorted_by_list_seq ORDER BY list_seq" + ") AS request LEFT JOIN ( SELECT cert_id FROM certs ) AS certs ON " + "certs.cert_id = request.cert_id) AS t" diff --git a/pkg/db/mysql/policies.go b/pkg/db/mysql/policies.go index 77f26f98..2371edf3 100644 --- a/pkg/db/mysql/policies.go +++ b/pkg/db/mysql/policies.go @@ -26,9 +26,13 @@ func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Ou } // Prepare a query that returns a vector of bits, 1 means ID is present, 0 means is not. - elems := make([]string, len(data)) - for i := range elems { - elems[i] = "SELECT ? AS policy_id" + + // The id_placeholders list contains strings that allow an ID to be placed with a sequential + // number, so that the IDs are returned in the same order in the DB engine as they are present + // in the list parameter here. + id_placeholders := make([]string, len(data)) + for i := range id_placeholders { + id_placeholders[i] = fmt.Sprintf("SELECT ? AS policy_id, %d AS list_seq", i) } // The query means: join two tables, one with the values I am passing as arguments (those @@ -36,7 +40,9 @@ func (c *mysqlDB) CheckPoliciesExist(ctx context.Context, ids []*common.SHA256Ou // Finally, group_concat all rows into just one field of type string. str := "SELECT GROUP_CONCAT(presence SEPARATOR '') FROM (" + "SELECT (CASE WHEN policies.policy_id IS NOT NULL THEN 1 ELSE 0 END) AS presence FROM (" + - strings.Join(elems, " UNION ALL ") + + "SELECT policy_id FROM(" + + strings.Join(id_placeholders, " UNION ALL ") + + ") AS sorted_by_list_seq ORDER BY list_seq" + ") AS request LEFT JOIN ( SELECT policy_id FROM policies ) AS policies ON " + "policies.policy_id = request.policy_id) AS t" diff --git a/tools/create_schema.sh b/tools/create_schema.sh index 4fc40734..c53dbea9 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -153,7 +153,13 @@ EOF USE $DBNAME; DROP PROCEDURE IF EXISTS calc_dirty_domains; DELIMITER $$ --- Because MySQL doesn't support FULL OUTER JOIN, we have to emulate it with: +-- Because MySQL doesn't support FULL OUTER JOIN, we have to emulate it. +-- We want: +-- SELECT * FROM t1 +-- FULL OUTER JOIN +-- SELECT * FROM t2 +-- ------------------------------------ +-- We emulate is with: -- SELECT * FROM t1 -- LEFT JOIN t2 ON t1.id = t2.id -- UNION @@ -163,6 +169,7 @@ DELIMITER $$ -- -- The table t1 is a CTE that retrieves the certificates. -- The table t2 is a CTE that retrieves the policies. +-- ------------------------------------ -- This SP needs ~ 5 seconds per 20K dirty domains. CREATE PROCEDURE calc_dirty_domains() BEGIN From 7283036fe90388af16dd7b3e2e2422134b72c474 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 13 Jun 2023 17:42:39 +0200 Subject: [PATCH 152/187] Remove unused, wrong apps as tests under tests/. --- tests/benchmark/db_benchmark/db.go | 136 --------------- tests/benchmark/logserver_benchmark/main.go | 92 ---------- tests/benchmark/smt_benchmark/main.go | 137 --------------- tests/integration/mapserver/main.go | 183 -------------------- tests/smt_demo/README.md | 63 ------- tests/smt_demo/main.go | 180 ------------------- 6 files changed, 791 deletions(-) delete mode 100644 tests/benchmark/db_benchmark/db.go delete mode 100644 tests/benchmark/logserver_benchmark/main.go delete mode 100644 tests/benchmark/smt_benchmark/main.go delete mode 100644 tests/integration/mapserver/main.go delete mode 100644 tests/smt_demo/README.md delete mode 100644 tests/smt_demo/main.go diff --git a/tests/benchmark/db_benchmark/db.go b/tests/benchmark/db_benchmark/db.go deleted file mode 100644 index 94e3ba89..00000000 --- a/tests/benchmark/db_benchmark/db.go +++ /dev/null @@ -1,136 +0,0 @@ -package main - -import ( - "context" - "encoding/hex" - "fmt" - "math/rand" - "strconv" - "time" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/netsec-ethz/fpki/pkg/tests/testdb" -) - -func main() { - testdb.TruncateAllTablesWithoutTestObject() - // ***************************************************************** - // open a db connection - // ***************************************************************** - - conn, err := mysql.Connect(nil) - if err != nil { - panic(err) - } - - // ***************************************************************** - // insert 1M node first - // ***************************************************************** - for i := 0; i < 100; i++ { - newKVPair := getKeyValuePair(i*45000, i*45000+49999, generateRandomBytes()) - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - start := time.Now() - _, err = conn.UpdateTreeNodes(ctx, newKVPair) - if err != nil { - panic(err) - } - - end := time.Now() - fmt.Println("iteration ", i, " current iteration: ", i, ", time ", end.Sub(start)) - } - - // ***************************************************************** - // read one value, single-threaded - // ***************************************************************** - for i := 0; i < 100; i++ { - keys := getKeys(i*1000, i*1000+999) - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - start := time.Now() - for _, k := range keys { - value, err := conn.RetrieveTreeNode(ctx, k) - if err != nil { - panic(err) - } - if value == nil { - panic("no result") - } - } - end := time.Now() - fmt.Println("Single-thread READ for 1000 read: index: ", i*1000, "time: ", end.Sub(start)) - } - - // ***************************************************************** - // delete entries - // ***************************************************************** - for i := 0; i < 1000; i++ { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - keys := getKeys(i*1000, i*1000+999) - - start := time.Now() - _, err := conn.DeleteTreeNodes(ctx, keys) - if err != nil { - panic(err) - } - - end := time.Now() - fmt.Println("DELETE ", i*1000, "time ", end.Sub(start)) - } -} - -func generateRandomBytes() []byte { - token := make([]byte, 1024*10) - rand.Read(token) - return token -} - -func getRandomKeys() []string { - result := []string{} - for i := 0; i < 1000; i++ { - keyHash := common.SHA256Hash([]byte(strconv.Itoa(rand.Intn(900000)))) - keyString := hex.EncodeToString(keyHash) - result = append(result, keyString) - } - return result -} - -func getKeys(startIdx, endIdx int) []common.SHA256Output { - result := []common.SHA256Output{} - for i := startIdx; i <= endIdx; i++ { - keyHash := common.SHA256Hash([]byte(strconv.Itoa(i))) - keyHash32Bytes := [32]byte{} - copy(keyHash32Bytes[:], keyHash) - result = append(result, keyHash32Bytes) - } - return result -} - -func getKeyValuePair(startIdx, endIdx int, content []byte) []*db.KeyValuePair { - result := []*db.KeyValuePair{} - for i := startIdx; i <= endIdx; i++ { - keyHash := common.SHA256Hash([]byte(strconv.Itoa(i))) - keyHash32Bytes := [32]byte{} - copy(keyHash32Bytes[:], keyHash) - result = append(result, &db.KeyValuePair{Key: keyHash32Bytes, Value: content}) - } - return result -} - -func getRandomIndex(min, max int) []int { - result := make(map[int]struct{}) - for len(result) < 5000 { - newRand := rand.Intn(max-min) + min - result[newRand] = struct{}{} - } - - resultList := []int{} - for k := range result { - resultList = append(resultList, k) - } - return resultList -} diff --git a/tests/benchmark/logserver_benchmark/main.go b/tests/benchmark/logserver_benchmark/main.go deleted file mode 100644 index 2eefe5f9..00000000 --- a/tests/benchmark/logserver_benchmark/main.go +++ /dev/null @@ -1,92 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "math/rand" - "os" - "time" - - "github.com/netsec-ethz/fpki/pkg/policylog/client" -) - -//TestCreateTreeAddLeafThenGetPoI: Add leaves to tree -> get Proof of Inclusion -// Used to measure the time to add leaves -func main() { - flag.Parse() - err := os.MkdirAll("./file_exchange/policylog/trees_config", os.ModePerm) - if err != nil { - panic(err) - } - - // init admin adminClient - adminClient, err := client.GetAdminClient("config/adminclient_config.json") - if err != nil { - panic(err) - } - - // create new tree - tree, err := adminClient.CreateNewTree() - if err != nil { - panic(err) - } - - // init log client - logClient, err := client.NewLogClient("config/logclient_config.json", tree.TreeId) - if err != nil { - panic(err) - } - - // prepare 20 leaves - leaves := [][]byte{} - for i := 1; i < 20; i++ { - leaves = append(leaves, generateRandomBytes()) - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*time.Duration(1000)) - defer cancel() - - start := time.Now() - - // add 20 leaves to log - addLeavesResult := logClient.AddLeaves(ctx, leaves) - if len(addLeavesResult.Errs) != 0 { - panic("add leaves error") - } - - elapsed := time.Since(start) - fmt.Println("queue leaves succeed!") - fmt.Println(elapsed) - - // wait some time for the policy log to actually add the leaves - // in final implementation, more elegant method is applied. - time.Sleep(2000 * time.Millisecond) - - // update the tree size of the policy log - err = logClient.UpdateTreeSize(ctx) - if err != nil { - panic(err) - } - - start = time.Now() - - // fetch PoI - incResult := logClient.FetchInclusions(ctx, leaves) - if len(incResult.Errs) != 0 { - panic("fetch inclusion error") - } - - elapsed = time.Since(start) - fmt.Println("fetch proofs succeed!") - fmt.Println(elapsed) - - os.RemoveAll("./file_exchange") - fmt.Println("test succeed!") -} - -func generateRandomBytes() []byte { - token := make([]byte, 300) - rand.Read(token) - return token -} diff --git a/tests/benchmark/smt_benchmark/main.go b/tests/benchmark/smt_benchmark/main.go deleted file mode 100644 index ed6de447..00000000 --- a/tests/benchmark/smt_benchmark/main.go +++ /dev/null @@ -1,137 +0,0 @@ -package main - -import ( - "context" - "crypto/rand" - "encoding/csv" - "fmt" - "os" - "runtime" - "sort" - "sync" - "time" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" - "github.com/netsec-ethz/fpki/pkg/tests/testdb" -) - -var wg sync.WaitGroup - -// benchmark for sparse merkle tree -func main() { - testdb.TruncateAllTablesWithoutTestObject() - - conn, err := db.Connect(nil) - if err != nil { - panic(err) - } - - smt, err := trie.NewTrie(nil, common.SHA256Hash, conn) - if err != nil { - panic(err) - } - - smt.CacheHeightLimit = 200 - - csvFile, err := os.Create("smt_update.csv") - - if err != nil { - panic(err) - } - - csvwriter := csv.NewWriter(csvFile) - - proof_csvFile, err := os.Create("smt_proof.csv") - - if err != nil { - panic(err) - } - - proof_csvwriter := csv.NewWriter(proof_csvFile) - - benchmark10MAccounts10Ktps(smt, csvwriter, proof_csvwriter) - -} - -func benchmark10MAccounts10Ktps(smt *trie.Trie, update_writer *csv.Writer, proof_writer *csv.Writer) { - for i := 0; i < 50; i++ { - fmt.Println("Iteration ", i, " ------------------------------") - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - newKeys := getFreshData(100000, 32) - newValues := getFreshData(100000, 32) - - start := time.Now() - smt.Update(ctx, newKeys, newValues) - end := time.Now() - - err := smt.Commit(ctx) - if err != nil { - panic(err) - } - end2 := time.Now() - - elapsed := end.Sub(start) - elapsed2 := end2.Sub(end) - - var m runtime.MemStats - runtime.ReadMemStats(&m) - fmt.Println("update time for 100,000 leaves in memory: ", elapsed, - "\ntime to commit changes to db : ", elapsed2, - "\ncache size : ", smt.GetLiveCacheSize(), - "\nRAM : ", m.Sys/1024/1024, " MiB") - fmt.Println() - fmt.Println() - - update_writer.Write([]string{elapsed.String(), elapsed2.String()}) - update_writer.Flush() - - benchmark200KProofs(newKeys, smt, proof_writer) - } -} - -func benchmark200KProofs(allKeys [][]byte, smt *trie.Trie, proof_writer *csv.Writer) { - fmt.Println("length of keys: ", len(allKeys)) - - wg.Add(1000) - start := time.Now() - for i := 0; i < 1000; i++ { - go worker(allKeys[i*100:i*100+99], smt) - } - wg.Wait() - end := time.Now() - fmt.Println("time to retrieve 100,000 proofs: ", end.Sub(start)) - proof_writer.Write([]string{end.Sub(start).String()}) - proof_writer.Flush() -} - -func worker(input [][]byte, smt *trie.Trie) { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - for _, key := range input { - _, _, _, _, err := smt.MerkleProof(ctx, key) - if err != nil { - panic(err) - } - } - - wg.Done() -} - -func getFreshData(size, length int) [][]byte { - var data [][]byte - for i := 0; i < size; i++ { - key := make([]byte, 32) - _, err := rand.Read(key) - if err != nil { - panic(err) - } - data = append(data, common.SHA256Hash(key)[:length]) - } - sort.Sort(trie.DataArray(data)) - return data -} diff --git a/tests/integration/mapserver/main.go b/tests/integration/mapserver/main.go deleted file mode 100644 index 66cf6b70..00000000 --- a/tests/integration/mapserver/main.go +++ /dev/null @@ -1,183 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "time" - - ctx509 "github.com/google/certificate-transparency-go/x509" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/db/mysql" - "github.com/netsec-ethz/fpki/pkg/mapserver/responder" - "github.com/netsec-ethz/fpki/pkg/mapserver/updater" - "github.com/netsec-ethz/fpki/pkg/util" -) - -const ( - BatchSize = 1000 - DBName = "mapServerIT" -) - -func main() { - os.Exit(mainFunc()) -} - -func mainFunc() int { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - - config := db.NewConfig(mysql.WithDefaults(), db.WithDB(DBName)) - - // // Create an empty test DB - // err := tests.CreateTestDB(ctx, DBName) - // panicIfError(err) - // defer func() { - // err := tests.RemoveTestDB(ctx, config) - // panicIfError(err) - // }() - // fmt.Printf("created DB %s.\n", DBName) - - // // Test connect several times. - // conn, err := mysql.Connect(config) - // panicIfError(err) - // panicIfError(conn.Close()) - // conn, err = mysql.Connect(config) - // panicIfError(err) - // panicIfError(conn.Close()) - // fmt.Println("done testing the DB connection.") - - // // Ingest data. - // ingestData(ctx, config) - // fmt.Println("done ingesting test data.") - - // Get a responder. - res := getResponder(ctx, config) - fmt.Println("done loading a responder.") - - // Compare proofs against expected results. - data := getSomeDataPointsToTest(ctx, config) - errors := false - for _, d := range data { - fmt.Printf("checking %s ... ", d.Name) - proof, err := res.GetProof(ctx, d.Name) - panicIfError(err) - fmt.Printf("has %d steps\n", len(proof)) - // Present domains will surely have certificates. - for _, c := range d.Certs { - err = util.CheckProof(proof, d.Name, c) - if err != nil { - errors = true - fmt.Printf("error found with %s: %s\n", d.Name, err) - } - } - } - if errors { - return 1 - } - - return 0 -} - -func ingestData(ctx context.Context, config *db.Configuration) { - // Connect to the test DB - conn, err := mysql.Connect(config) - panicIfError(err) - defer func() { - panicIfError(conn.Close()) - }() - - // Ingest the testdata. - raw, err := util.ReadAllGzippedFile("./tests/testdata/2-xenon2023.csv.gz") - panicIfError(err) - payloads, IDs, parentIDs, names, err := util.LoadCertsAndChainsFromCSV(raw) - panicIfError(err) - - // Insert the certificates into the test DB in batches. - expirations := util.ExtractExpirations(payloads) - for i := 0; i < (len(names) / BatchSize); i++ { - b := i * BatchSize // begin - e := (i + 1) * BatchSize // end - err = updater.UpdateCertsWithKeepExisting(ctx, conn, names[b:e], expirations[b:e], - payloads[b:e], IDs[b:e], parentIDs[b:e]) - panicIfError(err) - } - // Remainder of the certificates - b := (len(names) / BatchSize) * BatchSize - err = updater.UpdateCertsWithKeepExisting(ctx, conn, names[b:], expirations[b:], - payloads[b:], IDs[b:], parentIDs[b:]) - panicIfError(err) - - // Build the domain_payloads entries from dirty. - err = updater.CoalescePayloadsForDirtyDomains(ctx, conn, 2) - panicIfError(err) - - // Do the SMT update. - err = updater.UpdateSMT(ctx, conn, 32) - panicIfError(err) -} - -func getResponder(ctx context.Context, config *db.Configuration) *responder.MapResponder { - // Connect to the test DB - conn, err := mysql.Connect(config) - panicIfError(err) - - // Retrieve some domains - res, err := responder.NewMapResponder( - ctx, - "./tests/integration/mapserver/config/mapserver_config.json", - conn) - panicIfError(err) - return res -} - -type DataPoint struct { - Name string - Certs []*ctx509.Certificate -} - -func getSomeDataPointsToTest(ctx context.Context, config *db.Configuration) []DataPoint { - // Connect to the test DB - conn, err := mysql.Connect(config) - panicIfError(err) - defer func() { - panicIfError(conn.Close()) - }() - - // Some names from the test DB. - names := []string{ - // (4568 certs), - "*.us-west-2.es.amazonaws.com", - - // (2198 certs), - "flowers-to-the-world.com", - - // (1 cert), - "vg01.sjc006.ix.nflxvideo.net", - - // (0 certs), - "doesnnotexist.iamsure.of.that.ch", - } - - // Find certificates for these names. - data := make([]DataPoint, len(names)) - for i, name := range names { - data[i].Name = name - ID := common.SHA256Hash32Bytes([]byte(name)) - _, payload, err := conn.RetrieveDomainCertificatesPayload(ctx, ID) - panicIfError(err) - // payload contains several certificates. - data[i].Certs, err = ctx509.ParseCertificates(payload) - panicIfError(err) - fmt.Printf("found %d certs for %s\n", len(data[i].Certs), name) - } - return data -} - -func panicIfError(err error) { - if err != nil { - panic(err) - } -} diff --git a/tests/smt_demo/README.md b/tests/smt_demo/README.md deleted file mode 100644 index 9f83a310..00000000 --- a/tests/smt_demo/README.md +++ /dev/null @@ -1,63 +0,0 @@ -# SMT Demo -This is a demo for the efficient and database-supported SMT implementation. - -The key and value stored in the SMT should be 32 bytes(you can use SHA256). Before using the SMT library, you should hash the key and values, and sort the key-value pairs by hashed keys from low to high, before adding the pairs into SMT(more details on the demo). One pair should be (32bytes, 32bytes), and the order should be (000, XXX), (001, XXX), (100, XXX), (111, XXX) - -You can use Update() to update one batch of key-value pairs. Commit() is optional if you don't want to store the SMT permanently. - -If you want to persist the SMT, you need: -1. Commit() after every changes (after Update(){}). All changes after Commit() will not be stored in the database but memory. If the program crashes, the uncommitted changes will be lost. -2. Store the Tree Head before terminating the program. You need the Tree Head to reload the SMT - -If you only want to run SMT in memory, Commit() is not useful. - -## Functions -``` -// NewSMT creates a new SMT given a root and a hash function. -// for an empty SMT, root can be nil -func NewTrie(root []byte, hash func(data ...[]byte) []byte, store db.Conn) (*Trie, error) -``` - -``` -// Adds or update a sorted list of keys and their values to the SMT -// To delete, set the value to DefaultLeaf([]byte{0}). -func (s *Trie) Update(ctx context.Context, keys, values [][]byte) ([]byte, error) -``` -``` -// commit changes to database -// SMT lib has some internal lists to record the changes to the database. So no parameter is needed. You just need to call commit() once you want to persist the changes. -// Call Commit() before terminating, or periodically commit the changes, to avoid data loss and memory exhaustion. -(s *Trie) Commit(ctx context.Context) error -``` -``` -// LoadCache loads the first several layers of the merkle tree into memory. Depth is configured by "CacheHeightLimit" -// This is called after a SMT restarts so that it doesn't become slow with db reads -// LoadCache also updates the Root with the given root. -(s *Trie) LoadCache(ctx context.Context, root []byte) error -``` -``` -// MerkleProofPast generates a Merkle proof of inclusion or non-inclusion -// for a given SMT root -// returns the audit path, bool (if key is included), key, value, error -// for PoP, key-value pair will be the key-value pair of queried key -// for PoA, key-value pair will be the key-value pair of leaf on the path of the non-included key -(s *Trie) MerkleProof(ctx context.Context, key, root []byte) ([][]byte, bool, []byte, []byte, error) -``` -``` -// VerifyInclusion verifies that key/value is included in the SMT with latest root -VerifyInclusion(root []byte, auditPath [][]byte, key, value []byte) bool -``` -``` -// VerifyInclusion verifies that key/value is included in the SMT with latest root -// "key" is the non-included key -// "proofValue", "proofKey" is returned from MerkleProof() -VerifyNonInclusion(root []byte, ap [][]byte, key, proofValue, proofKey []byte) bool -``` -## How to run? -You need to install MySQL, and MySQL should not require a password for the root user. If you encounter password isseus, this might help: https://stackoverflow.com/questions/3032054/how-to-remove-mysql-root-password - -``` -cd .. -./tools/create_schema.sh (WARNING!!! make sure you don't have a db schema called "fpki", otherwise the existing "fpki" schema will be overwritten) -go run smt_demo/main.go -``` \ No newline at end of file diff --git a/tests/smt_demo/main.go b/tests/smt_demo/main.go deleted file mode 100644 index 46929ce9..00000000 --- a/tests/smt_demo/main.go +++ /dev/null @@ -1,180 +0,0 @@ -package main - -import ( - "bytes" - "context" - "crypto/rand" - "fmt" - "sort" - "time" - - _ "github.com/go-sql-driver/mysql" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/db" - "github.com/netsec-ethz/fpki/pkg/mapserver/trie" -) - -// PLEASE take a look at getFreshData(){}. You need to sort the key-value pairs before adding them to SMT. -// If you want to review the source code, the package is in pkg/mapserver/tire - -func main() { - ctx, cancelF := context.WithTimeout(context.Background(), time.Minute) - defer cancelF() - //*************************************************************** - // create a new db conn - //*************************************************************** - dbConn, err := db.Connect(nil) - if err != nil { - panic(err) - } - - //*************************************************************** - // get a new SMT - //*************************************************************** - smt, err := trie.NewTrie(nil, common.SHA256Hash, dbConn) - if err != nil { - panic(err) - } - - // depth of layers which are cached in memory - // 255 means no cache, and 0 means caching the whole tree in memory - // used to speed-up the SMT. - // 0: best performance, but large memory is required(depend on how many leaves you want to cache) - // num of layer is roughly log_2(num of inserted leaves) - smt.CacheHeightLimit = 233 - - //*************************************************************** - // update SMT with random key-value pairs - //*************************************************************** - // Add data to empty SMT - keys := getFreshData(100, 32) - values := getFreshData(100, 32) - smt.Update(ctx, keys, values) - - //*************************************************************** - // generate Proof of Presence, and verify them - //*************************************************************** - for i, key := range keys { - ap, isIncluded, k, v, _ := smt.MerkleProof(ctx, key) - if !isIncluded { - panic("proof type error") - } - if !trie.VerifyInclusion(smt.Root, ap, key, values[i]) { - panic("failed to verify inclusion proof") - } - if !bytes.Equal(key, k) && !bytes.Equal(values[i], v) { - panic("merkle proof didn't return the correct key-value pair") - } - } - - //*************************************************************** - // generate Proof of Absence, and verify them - //*************************************************************** - emptyKey := common.SHA256Hash([]byte("non-memvqbdqwdqwdqber")) - ap_, included_, proofKey_, proofValue_, _ := smt.MerkleProof(ctx, emptyKey) - if included_ { - panic("failed to verify non inclusion proof") - } - if !trie.VerifyNonInclusion(smt.Root, ap_, emptyKey, proofValue_, proofKey_) { - panic("failed to verify non inclusion proof") - } - - //*************************************************************** - // commit changes to db - //*************************************************************** - err = smt.Commit(ctx) - if err != nil { - panic(err) - } - - //*************************************************************** - // create a new db conn - //*************************************************************** - dbConn1, err := db.Connect(nil) - if err != nil { - panic(err) - } - //*************************************************************** - // start a new SMT - //*************************************************************** - // NOTE!!!: to load a existing SMT, previous Tree Root is needed - smt1, err := trie.NewTrie(smt.Root, common.SHA256Hash, dbConn1) - - //*************************************************************** - // reload cache - //*************************************************************** - // Optional. During proof-fetching, library will also gradually load the leaves. - err = smt1.LoadCache(ctx, smt.Root) - if err != nil { - panic(err) - } - - //*************************************************************** - // verify PoP - //*************************************************************** - for i, key_ := range keys { - ap_, included_, k_, v_, _ := smt1.MerkleProof(ctx, key_) - if !trie.VerifyInclusion(smt1.Root, ap_, key_, values[i]) { - panic("failed to verify new inclusion proof") - } - if !included_ { - panic("PoP failed") - } - if !bytes.Equal(key_, k_) && !bytes.Equal(values[i], v_) { - panic("new merkle proof didn't return the correct key-value pair") - } - } - - //*************************************************************** - // verify PoA - //*************************************************************** - emptyKey = common.SHA256Hash([]byte("non-member")) - ap_, included_, proofKey_, proofValue_, _ = smt1.MerkleProof(ctx, emptyKey) - if included_ { - panic("failed to verify new non inclusion proof") - } - if !trie.VerifyNonInclusion(smt1.Root, ap_, emptyKey, proofValue_, proofKey_) { - panic("failed to verify new non inclusion proof") - } - - //*************************************************************** - // delete some key-value pairs - //*************************************************************** - defaultValues := make([][]byte, 50) - modifiedKeys := make([][]byte, 50) - for i := 0; i < 50; i++ { - defaultValues[i] = []byte{0} - modifiedKeys[i] = keys[i] - } - - smt1.Update(ctx, modifiedKeys, defaultValues) - - //*************************************************************** - // verify PoA of deleted keys - //*************************************************************** - for _, key := range modifiedKeys { - ap, included, proofKey, proofValue, _ := smt1.MerkleProof(ctx, key) - if included { - panic("PoP failed") - } - if !trie.VerifyNonInclusion(smt1.Root, ap, key, proofValue, proofKey) { - panic("failed to verify new inclusion proof") - } - } - - fmt.Println("succeed!") -} - -func getFreshData(size, length int) [][]byte { - var data [][]byte - for i := 0; i < size; i++ { - key := make([]byte, 32) - _, err := rand.Read(key) - if err != nil { - panic(err) - } - data = append(data, common.SHA256Hash(key)[:length]) - } - sort.Sort(trie.DataArray(data)) - return data -} From 686c49d103e2503f74431f7775462b5b7c3c5e6b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 13 Jun 2023 17:44:03 +0200 Subject: [PATCH 153/187] Rename Domain() to Subject() in PolicyObject. --- pkg/common/crypto/crypto.go | 4 ++-- pkg/common/crypto/crypto_test.go | 6 +++--- pkg/common/structure.go | 18 +++++++++--------- pkg/common/structure_test.go | 24 ++++++++++++------------ pkg/domainowner/domainowner.go | 2 +- pkg/logverifier/logverifier_test.go | 6 +++--- pkg/mapserver/logfetcher/logfetcher.go | 4 ++-- pkg/mapserver/updater/updater.go | 4 ++-- pkg/mapserver/updater/updater_test.go | 2 +- pkg/pca/pca.go | 4 ++-- pkg/tests/random/random.go | 4 ++-- pkg/util/types_test.go | 6 +++--- 12 files changed, 42 insertions(+), 42 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 70e64b87..25b714c4 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -113,7 +113,7 @@ func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: rcsr.Subject, + RawSubject: rcsr.RawSubject, }, Version: rcsr.Version, PublicKeyAlgorithm: rcsr.PublicKeyAlgorithm, @@ -208,7 +208,7 @@ func CASignSP(psr *common.PSR, caPrivKey *rsa.PrivateKey, caName string, serialN sp := &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: psr.DomainName, + RawSubject: psr.DomainName, }, Policies: psr.Policies, RootCertSignature: psr.RootCertSignature, diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 93cbb39b..ca9a2514 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -20,7 +20,7 @@ func TestSignatureOfRCSR(t *testing.T) { test := &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "this is a test", + RawSubject: "this is a test", }, Version: 44, TimeStamp: time.Now(), @@ -52,7 +52,7 @@ func TestIssuanceOfRPC(t *testing.T) { rcsr := &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "this is a test", + RawSubject: "this is a test", }, Version: 44, TimeStamp: time.Now(), @@ -106,7 +106,7 @@ func TestIssuanceOfSP(t *testing.T) { rcsr := &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "this is a test", + RawSubject: "this is a test", }, Version: 44, TimeStamp: time.Now(), diff --git a/pkg/common/structure.go b/pkg/common/structure.go index f2c7a16e..c946baca 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -10,16 +10,16 @@ import ( // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. type PolicyObject interface { Raw() []byte - Domain() string + Subject() string } type PolicyObjectBase struct { - RawJSON []byte `json:"-"` // omit from JSON (un)marshaling - Subject string `json:",omitempty"` + RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + RawSubject string `json:"Subject,omitempty"` } -func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } -func (o PolicyObjectBase) Domain() string { return o.Subject } +func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } +func (o PolicyObjectBase) Subject() string { return o.RawSubject } // root certificate signing request type RCSR struct { @@ -109,7 +109,7 @@ type Policy struct { // listed funcs are Equal() func for each structure func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { return true && - rcsr.Subject == rcsr_.Subject && + rcsr.RawSubject == rcsr_.RawSubject && rcsr.Version == rcsr_.Version && rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && @@ -122,7 +122,7 @@ func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { func (s SPT) Equal(o SPT) bool { return true && s.Version == o.Version && - s.Subject == o.Subject && + s.RawSubject == o.RawSubject && s.CAName == o.CAName && s.LogID == o.LogID && s.CertType == o.CertType && @@ -148,7 +148,7 @@ func (s Policy) Equal(o Policy) bool { func (s SP) Equal(o SP) bool { return true && s.TimeStamp.Equal(o.TimeStamp) && - s.Subject == o.Subject && + s.RawSubject == o.RawSubject && s.CAName == o.CAName && s.SerialNumber == o.SerialNumber && bytes.Equal(s.CASignature, o.CASignature) && @@ -160,7 +160,7 @@ func (s SP) Equal(o SP) bool { func (rpc *RPC) Equal(rpc_ *RPC) bool { return true && rpc.SerialNumber == rpc_.SerialNumber && - rpc.Subject == rpc_.Subject && + rpc.RawSubject == rpc_.RawSubject && rpc.Version == rpc_.Version && rpc.PublicKeyAlgorithm == rpc_.PublicKeyAlgorithm && bytes.Equal(rpc.PublicKey, rpc_.PublicKey) && diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index cef1807b..baafbeac 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -31,7 +31,7 @@ func TestGenerateGoldenFiles(t *testing.T) { func TestEqual(t *testing.T) { rcsr := &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "bandqhvdbdlwnd", + RawSubject: "bandqhvdbdlwnd", }, Version: 6789, TimeStamp: time.Now(), @@ -47,7 +47,7 @@ func TestEqual(t *testing.T) { spt1 := common.SPT{ Version: 12313, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "hihihihihhi", + RawSubject: "hihihihihhi", }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, @@ -62,7 +62,7 @@ func TestEqual(t *testing.T) { spt2 := common.SPT{ Version: 12368713, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "hohohoho", + RawSubject: "hohohoho", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, @@ -80,7 +80,7 @@ func TestEqual(t *testing.T) { SPT: common.SPT{ Version: 12314, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "bad domain", + RawSubject: "bad domain", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1729381, @@ -99,7 +99,7 @@ func TestEqual(t *testing.T) { rpc := &common.RPC{ SerialNumber: 1729381, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "bad domain", + RawSubject: "bad domain", }, Version: 1729381, PublicKeyAlgorithm: common.RSA, @@ -122,7 +122,7 @@ func TestJsonReadWrite(t *testing.T) { spt1 := &common.SPT{ Version: 12313, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "hihihihihhi", + RawSubject: "hihihihihhi", }, CAName: "I'm honest CA, nice to meet you", LogID: 1231323, @@ -137,7 +137,7 @@ func TestJsonReadWrite(t *testing.T) { spt2 := &common.SPT{ Version: 12368713, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "hohohoho", + RawSubject: "hohohoho", }, CAName: "I'm malicious CA, nice to meet you", LogID: 1324123, @@ -152,7 +152,7 @@ func TestJsonReadWrite(t *testing.T) { rpc := &common.RPC{ SerialNumber: 1729381, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "bad domain", + RawSubject: "bad domain", }, Version: 1729381, PublicKeyAlgorithm: common.RSA, @@ -182,7 +182,7 @@ func randomRPC(t tests.T) *common.RPC { return &common.RPC{ SerialNumber: 1729381, PolicyObjectBase: common.PolicyObjectBase{ - Subject: "RPC CA", + RawSubject: "RPC CA", }, Version: 1729381, PublicKeyAlgorithm: common.RSA, @@ -201,7 +201,7 @@ func randomRPC(t tests.T) *common.RPC { func randomRCSR(t tests.T) *common.RCSR { return &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "subject", + RawSubject: "subject", }, Version: 6789, TimeStamp: nowWithoutMonotonic(), @@ -220,7 +220,7 @@ func randomSP(t tests.T) *common.SP { }, TimeStamp: nowWithoutMonotonic(), PolicyObjectBase: common.PolicyObjectBase{ - Subject: "domainname.com", + RawSubject: "domainname.com", }, CAName: "ca1", SerialNumber: rand.Int(), @@ -237,7 +237,7 @@ func randomSP(t tests.T) *common.SP { func randomSPT(t tests.T) *common.SPT { return &common.SPT{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "hohohoho", + RawSubject: "hohohoho", }, Version: 12368713, CAName: "I'm malicious CA, nice to meet you", diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index b392ffcd..3518318b 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -44,7 +44,7 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS // generate rcsr rcsr := &common.RCSR{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, + RawSubject: domainName, }, Version: version, TimeStamp: time.Now(), diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 86f9c9a5..992c48a7 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -37,7 +37,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Mock up a RPC. rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "fpki.com", + RawSubject: "fpki.com", }, SerialNumber: 2, Version: 1, @@ -117,7 +117,7 @@ func TestCheckRPC(t *testing.T) { // Mock a RPC. rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "fpki.com", + RawSubject: "fpki.com", }, SerialNumber: 2, Version: 1, @@ -170,7 +170,7 @@ func TestCheckSP(t *testing.T) { // Mock an SP. sp := &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "fpki.com", + RawSubject: "fpki.com", }, Policies: common.Policy{ TrustedCA: []string{"US CA"}, diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index ae7bfe74..043766cb 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -327,7 +327,7 @@ func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int } resultPC = append(resultPC, &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, + RawSubject: domainName, }, TimeStamp: time.Now(), CASignature: generateRandomBytes(), @@ -335,7 +335,7 @@ func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int resultRPC = append(resultRPC, &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, + RawSubject: domainName, }, NotBefore: time.Now(), }) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index a531a9fb..6785ca90 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -155,7 +155,7 @@ func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]stri payloads[i] = pol.Raw() id := common.SHA256Hash32Bytes(pol.Raw()) policyIDs[i] = &id - policySubjects[i] = pol.Domain() + policySubjects[i] = pol.Subject() } err = insertPolicies(ctx, conn, policySubjects, policyIDs, payloads) @@ -202,7 +202,7 @@ func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]s payloads[i] = pol.Raw() id := common.SHA256Hash32Bytes(pol.Raw()) policyIDs[i] = &id - policySubjects[i] = pol.Domain() + policySubjects[i] = pol.Subject() } // Check which policies are already present in the DB. maskPols, err := conn.CheckPoliciesExist(ctx, policyIDs) diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 5e02aa0d..a923c644 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -97,7 +97,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { // Check policy coalescing. policiesPerName := make(map[string][]common.PolicyObject, len(pols)) for _, pol := range pols { - policiesPerName[pol.Domain()] = append(policiesPerName[pol.Domain()], pol) + policiesPerName[pol.Subject()] = append(policiesPerName[pol.Subject()], pol) } for name, policies := range policiesPerName { id := common.SHA256Hash32Bytes([]byte(name)) diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index a9be36e9..1d472641 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -95,7 +95,7 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { // move the rpc from pre-rpc to valid-rpc delete(pca.preRPCByDomains, k) - pca.validRPCsByDomains[v.Subject] = v + pca.validRPCsByDomains[v.RawSubject] = v } else { return fmt.Errorf("Fail to verify one SPT RPC") } @@ -117,7 +117,7 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { // move the rpc from pre-rpc to valid-rpc delete(pca.preRPCByDomains, k) - pca.validSPsByDomains[v.Subject] = v + pca.validSPsByDomains[v.RawSubject] = v } else { return fmt.Errorf("Fail to verify one SPT SP") } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 25ab3b1a..ab72a062 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -37,7 +37,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic // Create one RPC and one SP for that name. rpc := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, + RawSubject: domainName, }, SerialNumber: 1, Version: 1, @@ -51,7 +51,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic sp := &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: domainName, + RawSubject: domainName, }, CAName: "c0.com", CASignature: RandomBytesForTest(t, 100), diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index 82f9bc6f..3d7d5323 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -21,13 +21,13 @@ func TestToTypedSlice(t *testing.T) { orig := []*common.RPC{ { PolicyObjectBase: common.PolicyObjectBase{ - Subject: "a.com", + RawSubject: "a.com", }, Version: 1, }, { PolicyObjectBase: common.PolicyObjectBase{ - Subject: "b.com", + RawSubject: "b.com", }, Version: 1, }, @@ -47,7 +47,7 @@ func TestToType(t *testing.T) { { orig := &common.RPC{ PolicyObjectBase: common.PolicyObjectBase{ - Subject: "a.com", + RawSubject: "a.com", }, Version: 1, } From 297dec98dc935c431528817ccfc131118f89fdb6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 15 Jun 2023 09:04:55 +0200 Subject: [PATCH 154/187] Add creation functions for policy objects. This will ease the process of refactoring these types laterAdd creation functions for policy objects. This will ease the process of refactoring these types later.. --- pkg/common/crypto/crypto.go | 54 +++-- pkg/common/crypto/crypto_test.go | 81 +++---- pkg/common/json_test.go | 49 ++-- pkg/common/structure.go | 148 +++++++++++- pkg/common/structure_test.go | 245 ++------------------ pkg/domainowner/domainowner.go | 25 +- pkg/logverifier/logverifier_test.go | 70 ++---- pkg/logverifier/verifier.go | 14 +- pkg/mapserver/logfetcher/logfetcher.go | 22 +- pkg/mapserver/logfetcher/logfetcher_test.go | 2 + pkg/mapserver/updater/updater.go | 2 +- pkg/pca/sign_and_log.go | 2 +- pkg/tests/random/random.go | 108 ++++++++- pkg/util/types_test.go | 23 +- 14 files changed, 402 insertions(+), 443 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 25b714c4..76c6fec7 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -111,23 +111,21 @@ func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.RPC) error { func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, caPrivKey *rsa.PrivateKey, caName string) (*common.RPC, error) { - rpc := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: rcsr.RawSubject, - }, - Version: rcsr.Version, - PublicKeyAlgorithm: rcsr.PublicKeyAlgorithm, - PublicKey: rcsr.PublicKey, - CAName: caName, - SignatureAlgorithm: common.SHA256, - TimeStamp: time.Now(), - PRCSignature: rcsr.PRCSignature, - NotBefore: notBefore, - NotAfter: time.Now().AddDate(0, 0, 90), - SerialNumber: serialNumber, - CASignature: []byte{}, - SPTs: []common.SPT{}, - } + rpc := common.NewRPC( + rcsr.RawSubject, + serialNumber, + rcsr.Version, + rcsr.PublicKeyAlgorithm, + rcsr.PublicKey, + notBefore, + time.Now().AddDate(0, 0, 90), + caName, + common.SHA256, + time.Now(), + rcsr.PRCSignature, + []byte{}, + nil, + ) signature, err := signStructRSASHA256(rpc, caPrivKey) if err != nil { @@ -206,20 +204,20 @@ func VerifyPSRUsingRPC(psr *common.PSR, rpc *common.RPC) error { func CASignSP(psr *common.PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) ( *common.SP, error) { - sp := &common.SP{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: psr.DomainName, - }, - Policies: psr.Policies, - RootCertSignature: psr.RootCertSignature, - TimeStamp: time.Now(), - CAName: caName, - SerialNumber: serialNum, - } + sp := common.NewSP( + psr.SubjectRaw, + psr.Policy, + time.Now(), + caName, + serialNum, + nil, + psr.RootCertSignature, + nil, + ) caSignature, err := signStructRSASHA256(sp, caPrivKey) if err != nil { - return &common.SP{}, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) + return nil, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) } sp.CASignature = caSignature diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index ca9a2514..00811559 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -18,22 +18,17 @@ func TestSignatureOfRCSR(t *testing.T) { privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") - test := &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "this is a test", - }, - Version: 44, - TimeStamp: time.Now(), - PublicKeyAlgorithm: common.RSA, - SignatureAlgorithm: common.SHA256, - PRCSignature: random.RandomBytesForTest(t, 32), - Signature: random.RandomBytesForTest(t, 32), - } - pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "RSA key to bytes error") - - test.PublicKey = pubKeyBytes + test := common.NewRCSR("this is a test", + 44, + time.Now(), + common.RSA, + pubKeyBytes, + common.SHA256, + random.RandomBytesForTest(t, 32), + random.RandomBytesForTest(t, 32), + ) err = crypto.RCSRCreateSignature(privKey, test) require.NoError(t, err, "RCSR sign signature error") @@ -50,23 +45,17 @@ func TestIssuanceOfRPC(t *testing.T) { privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - rcsr := &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "this is a test", - }, - Version: 44, - TimeStamp: time.Now(), - PublicKeyAlgorithm: common.RSA, - SignatureAlgorithm: common.SHA256, - PRCSignature: random.RandomBytesForTest(t, 32), - Signature: random.RandomBytesForTest(t, 32), - } - - // add public key pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") - - rcsr.PublicKey = pubKeyBytes + rcsr := common.NewRCSR("this is a test", + 44, + time.Now(), + common.RSA, + pubKeyBytes, + common.SHA256, + random.RandomBytesForTest(t, 32), + random.RandomBytesForTest(t, 32), + ) // generate signature for rcsr err = crypto.RCSRCreateSignature(privKey, rcsr) @@ -80,6 +69,7 @@ func TestIssuanceOfRPC(t *testing.T) { require.NoError(t, err, "RCSR Verify Signature error") pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") + require.NoError(t, err) rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") require.NoError(t, err, "RCSR Generate RPC error") @@ -104,22 +94,17 @@ func TestIssuanceOfSP(t *testing.T) { privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - rcsr := &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "this is a test", - }, - Version: 44, - TimeStamp: time.Now(), - PublicKeyAlgorithm: common.RSA, - SignatureAlgorithm: common.SHA256, - PRCSignature: random.RandomBytesForTest(t, 32), - } - - // add public key pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") - - rcsr.PublicKey = pubKeyBytes + rcsr := common.NewRCSR("this is a test", + 44, + time.Now(), + common.RSA, + pubKeyBytes, + common.SHA256, + random.RandomBytesForTest(t, 32), + random.RandomBytesForTest(t, 32), + ) // generate signature for rcsr err = crypto.RCSRCreateSignature(privKey, rcsr) @@ -142,10 +127,12 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 3: domain owner generate SP // ------------------------------------- - psr := &common.PSR{ - TimeStamp: time.Now(), - DomainName: "test_SP", - } + psr := common.NewPSR( + "test_SP", + common.Policy{}, + time.Now(), + nil, + ) err = crypto.DomainOwnerSignPSR(privKey, psr) require.NoError(t, err, "DomainOwnerSignPSR error") diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 3e88af87..98a119d0 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/tests/random" ) // TestPolicyObjects checks that the structure types in the test cases can be converted to JSON and @@ -18,37 +19,37 @@ func TestPolicyObjects(t *testing.T) { data any }{ "rpcPtr": { - data: randomRPC(t), + data: random.RandomRPC(t), }, "rpcValue": { - data: *randomRPC(t), + data: *random.RandomRPC(t), }, "rcsr": { - data: randomRCSR(t), + data: random.RandomRCSR(t), }, "sp": { - data: randomSP(t), + data: random.RandomSP(t), }, "spt": { - data: *randomSPT(t), + data: *random.RandomSPT(t), }, "list": { data: []any{ - randomRPC(t), - randomRCSR(t), - randomSP(t), - randomSPRT(t), - randomPSR(t), + random.RandomRPC(t), + random.RandomRCSR(t), + random.RandomSP(t), + random.RandomSPRT(t), + random.RandomPSR(t), randomTrillianProof(t), randomLogRootV1(t), }, }, "list_embedded": { data: []any{ - randomRPC(t), + random.RandomRPC(t), []any{ - randomSP(t), - randomSPT(t), + random.RandomSP(t), + random.RandomSPT(t), }, []any{ randomTrillianProof(t), @@ -58,14 +59,14 @@ func TestPolicyObjects(t *testing.T) { }, "multiListPtr": { data: &[]any{ - randomRPC(t), - *randomRPC(t), + random.RandomRPC(t), + *random.RandomRPC(t), []any{ - randomSP(t), - *randomSP(t), + random.RandomSP(t), + *random.RandomSP(t), &[]any{ - randomSPT(t), - *randomSPT(t), + random.RandomSPT(t), + *random.RandomSPT(t), }, }, }, @@ -97,7 +98,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { getRawElemsFcn func(obj any) [][]byte // Return the Raw components of this thing. }{ "rpc": { - obj: randomRPC(t), + obj: random.RandomRPC(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { rpc := obj.(*common.RPC) @@ -105,7 +106,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { }, }, "spPtr": { - obj: randomSP(t), + obj: random.RandomSP(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { sp := obj.(*common.SP) @@ -113,7 +114,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { }, }, "spValue": { - obj: *randomSP(t), + obj: *random.RandomSP(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { sp := obj.(common.SP) @@ -122,8 +123,8 @@ func TestPolicyObjectBaseRaw(t *testing.T) { }, "list": { obj: []any{ - randomSP(t), - randomRPC(t), + random.RandomSP(t), + random.RandomRPC(t), }, rawElemsCount: 2, getRawElemsFcn: func(obj any) [][]byte { diff --git a/pkg/common/structure.go b/pkg/common/structure.go index c946baca..a27e3a57 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -33,6 +33,31 @@ type RCSR struct { Signature []byte `json:",omitempty"` } +func NewRCSR( + Subject string, + Version int, + TimeStamp time.Time, + PublicKeyAlgo PublicKeyAlgorithm, + PublicKey []byte, + SignatureAlgo SignatureAlgorithm, + PRCSignature []byte, + Signature []byte, +) *RCSR { + + return &RCSR{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + Version: Version, + TimeStamp: TimeStamp, + PublicKeyAlgorithm: PublicKeyAlgo, + PublicKey: PublicKey, + SignatureAlgorithm: SignatureAlgo, + PRCSignature: PRCSignature, + Signature: Signature, + } +} + // root policy certificate type RPC struct { PolicyObjectBase @@ -50,12 +75,55 @@ type RPC struct { SPTs []SPT `json:",omitempty"` } +func NewRPC( + Subject string, + SerialNumber int, + Version int, + PublicKeyAlgorithm PublicKeyAlgorithm, + PublicKey []byte, + NotBefore time.Time, + NotAfter time.Time, + CAName string, + SignatureAlgorithm SignatureAlgorithm, + TimeStamp time.Time, + PRCSignature []byte, + CASignature []byte, + SPTs []SPT, +) *RPC { + + return &RPC{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + SerialNumber: SerialNumber, + Version: Version, + PublicKeyAlgorithm: PublicKeyAlgorithm, + PublicKey: PublicKey, + NotBefore: NotBefore, + NotAfter: NotAfter, + CAName: CAName, + SignatureAlgorithm: SignatureAlgorithm, + TimeStamp: TimeStamp, + PRCSignature: PRCSignature, + CASignature: CASignature, + SPTs: SPTs, + } +} + // PCRevocation is for now empty. type PCRevocation struct { PolicyObjectBase // TODO(juagargi) define the revocation. } +func NewPCRevocation(subject string) *PCRevocation { + return &PCRevocation{ + PolicyObjectBase{ + RawSubject: subject, + }, + } +} + // signed policy timestamp type SPT struct { PolicyObjectBase @@ -70,12 +138,48 @@ type SPT struct { Signature []byte `json:",omitempty"` } +func NewSPT( + Subject string, + Version int, + CAName string, + LogID int, + CertType uint8, + AddedTS time.Time, + STH []byte, + PoI []byte, + STHSerialNumber int, + Signature []byte, +) *SPT { + + return &SPT{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + Version: Version, + CAName: CAName, + LogID: LogID, + CertType: CertType, + AddedTS: AddedTS, + STH: STH, + PoI: PoI, + STHSerialNumber: STHSerialNumber, + Signature: Signature, + } +} + // signed policy revocation timestamp type SPRT struct { SPT Reason int `json:",omitempty"` } +func NewSPRT(SPT *SPT, Reason int) *SPRT { + return &SPRT{ + SPT: *SPT, + Reason: Reason, + } +} + // Signed Policy type SP struct { PolicyObjectBase @@ -88,14 +192,54 @@ type SP struct { SPTs []SPT `json:",omitempty"` } +func NewSP( + Subject string, + Policies Policy, + TimeStamp time.Time, + CAName string, + SerialNumber int, + CASignature []byte, + RootCertSignature []byte, + SPTs []SPT, +) *SP { + + return &SP{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + Policies: Policies, + TimeStamp: TimeStamp, + CAName: CAName, + SerialNumber: SerialNumber, + CASignature: CASignature, + RootCertSignature: RootCertSignature, + SPTs: SPTs, + } +} + // Policy Signing Request type PSR struct { - Policies Policy `json:",omitempty"` + SubjectRaw string `json:",omitempty"` + Policy Policy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - DomainName string `json:",omitempty"` RootCertSignature []byte `json:",omitempty"` } +func NewPSR( + Subject string, + Policy Policy, + TimeStamp time.Time, + RootCertSignature []byte, +) *PSR { + + return &PSR{ + SubjectRaw: Subject, + Policy: Policy, + TimeStamp: TimeStamp, + RootCertSignature: RootCertSignature, + } +} + // Domain policy type Policy struct { TrustedCA []string `json:",omitempty"` diff --git a/pkg/common/structure_test.go b/pkg/common/structure_test.go index baafbeac..dd23b26f 100644 --- a/pkg/common/structure_test.go +++ b/pkg/common/structure_test.go @@ -1,11 +1,9 @@ package common_test import ( - "math/rand" "os" "path" "testing" - "time" "github.com/google/trillian" trilliantypes "github.com/google/trillian/types" @@ -21,7 +19,7 @@ var update = tests.UpdateGoldenFiles() func TestGenerateGoldenFiles(t *testing.T) { // Update the JSON files in tests/testdata if *update { - obj := []any{randomSP(t), randomSP(t)} + obj := []any{random.RandomSP(t), random.RandomSP(t)} err := common.ToJSONFile(obj, "../../tests/testdata/2-SPs.json") require.NoError(t, err) } @@ -29,142 +27,29 @@ func TestGenerateGoldenFiles(t *testing.T) { // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { - rcsr := &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "bandqhvdbdlwnd", - }, - Version: 6789, - TimeStamp: time.Now(), - PublicKeyAlgorithm: common.RSA, - PublicKey: random.RandomBytesForTest(t, 32), - SignatureAlgorithm: common.SHA256, - PRCSignature: random.RandomBytesForTest(t, 32), - Signature: random.RandomBytesForTest(t, 32), - } - - require.True(t, rcsr.Equal(rcsr), "RCSR Equal() error") + rcsr := random.RandomRCSR(t) + require.True(t, rcsr.Equal(rcsr)) - spt1 := common.SPT{ - Version: 12313, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "hihihihihhi", - }, - CAName: "I'm honest CA, nice to meet you", - LogID: 1231323, - CertType: 0x11, - AddedTS: time.Now(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 131678, - Signature: random.RandomBytesForTest(t, 32), - } - - spt2 := common.SPT{ - Version: 12368713, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "hohohoho", - }, - CAName: "I'm malicious CA, nice to meet you", - LogID: 1324123, - CertType: 0x21, - AddedTS: time.Now(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 114378, - Signature: random.RandomBytesForTest(t, 32), - } + spt1 := *random.RandomSPT(t) + spt2 := *random.RandomSPT(t) + require.True(t, spt1.Equal(spt1)) + require.True(t, spt2.Equal(spt2)) + require.False(t, spt1.Equal(spt2)) + require.False(t, spt2.Equal(spt1)) - require.True(t, spt1.Equal(spt1) && spt2.Equal(spt2) && !spt1.Equal(spt2) && !spt2.Equal(spt1), "SPT Equal() error") + sprt := random.RandomSPRT(t) + require.True(t, sprt.Equal(sprt)) - sprt := &common.SPRT{ - SPT: common.SPT{ - Version: 12314, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "bad domain", - }, - CAName: "I'm malicious CA, nice to meet you", - LogID: 1729381, - CertType: 0x21, - AddedTS: time.Now(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 1729381, - Signature: random.RandomBytesForTest(t, 32), - }, - Reason: 1729381, - } - - require.True(t, sprt.Equal(sprt), "SPRT Equal() error") - - rpc := &common.RPC{ - SerialNumber: 1729381, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "bad domain", - }, - Version: 1729381, - PublicKeyAlgorithm: common.RSA, - PublicKey: random.RandomBytesForTest(t, 32), - NotBefore: time.Now(), - NotAfter: time.Now(), - CAName: "bad domain", - SignatureAlgorithm: common.SHA256, - TimeStamp: time.Now(), - PRCSignature: random.RandomBytesForTest(t, 32), - CASignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{spt1, spt2}, - } - - require.True(t, rpc.Equal(rpc), "RPC Equal() error") + rpc := random.RandomRPC(t) + require.True(t, rpc.Equal(rpc)) } // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) func TestJsonReadWrite(t *testing.T) { - spt1 := &common.SPT{ - Version: 12313, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "hihihihihhi", - }, - CAName: "I'm honest CA, nice to meet you", - LogID: 1231323, - CertType: 0x11, - AddedTS: time.Now(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 131678, - Signature: random.RandomBytesForTest(t, 32), - } - - spt2 := &common.SPT{ - Version: 12368713, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "hohohoho", - }, - CAName: "I'm malicious CA, nice to meet you", - LogID: 1324123, - CertType: 0x21, - AddedTS: time.Now(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 114378, - Signature: random.RandomBytesForTest(t, 32), - } - - rpc := &common.RPC{ - SerialNumber: 1729381, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "bad domain", - }, - Version: 1729381, - PublicKeyAlgorithm: common.RSA, - PublicKey: random.RandomBytesForTest(t, 32), - NotBefore: time.Now(), - NotAfter: time.Now(), - CAName: "bad domain", - SignatureAlgorithm: common.SHA256, - TimeStamp: time.Now(), - PRCSignature: random.RandomBytesForTest(t, 32), - CASignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{*spt1, *spt2}, + rpc := random.RandomRPC(t) + rpc.SPTs = []common.SPT{ + *random.RandomSPT(t), + *random.RandomSPT(t), } tempFile := path.Join(os.TempDir(), "rpctest.json") @@ -178,98 +63,6 @@ func TestJsonReadWrite(t *testing.T) { require.True(t, rpc.Equal(rpc1), "Json error") } -func randomRPC(t tests.T) *common.RPC { - return &common.RPC{ - SerialNumber: 1729381, - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "RPC CA", - }, - Version: 1729381, - PublicKeyAlgorithm: common.RSA, - PublicKey: random.RandomBytesForTest(t, 32), - NotBefore: nowWithoutMonotonic(), - NotAfter: nowWithoutMonotonic(), - CAName: "RPC CA", - SignatureAlgorithm: common.SHA256, - TimeStamp: nowWithoutMonotonic(), - PRCSignature: random.RandomBytesForTest(t, 32), - CASignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{*randomSPT(t), *randomSPT(t)}, - } -} - -func randomRCSR(t tests.T) *common.RCSR { - return &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "subject", - }, - Version: 6789, - TimeStamp: nowWithoutMonotonic(), - PublicKeyAlgorithm: common.RSA, - PublicKey: random.RandomBytesForTest(t, 32), - SignatureAlgorithm: common.SHA256, - PRCSignature: random.RandomBytesForTest(t, 32), - Signature: random.RandomBytesForTest(t, 32), - } -} - -func randomSP(t tests.T) *common.SP { - return &common.SP{ - Policies: common.Policy{ - TrustedCA: []string{"ca1", "ca2"}, - }, - TimeStamp: nowWithoutMonotonic(), - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "domainname.com", - }, - CAName: "ca1", - SerialNumber: rand.Int(), - CASignature: random.RandomBytesForTest(t, 32), - RootCertSignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{ - *randomSPT(t), - *randomSPT(t), - *randomSPT(t), - }, - } -} - -func randomSPT(t tests.T) *common.SPT { - return &common.SPT{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "hohohoho", - }, - Version: 12368713, - CAName: "I'm malicious CA, nice to meet you", - LogID: 1324123, - CertType: 0x21, - AddedTS: nowWithoutMonotonic(), - STH: random.RandomBytesForTest(t, 32), - PoI: random.RandomBytesForTest(t, 32), - STHSerialNumber: 114378, - Signature: random.RandomBytesForTest(t, 32), - } -} - -func randomSPRT(t tests.T) *common.SPRT { - return &common.SPRT{ - SPT: *randomSPT(t), - Reason: 1729381, - } -} - -func randomPSR(t tests.T) *common.PSR { - return &common.PSR{ - Policies: common.Policy{ - TrustedCA: []string{"one CA", "another CA"}, - AllowedSubdomains: []string{"sub1.com", "sub2.com"}, - }, - TimeStamp: nowWithoutMonotonic(), - DomainName: "domain_name.com", - RootCertSignature: random.RandomBytesForTest(t, 32), - } -} - func randomTrillianProof(t tests.T) *trillian.Proof { return &trillian.Proof{ LeafIndex: 1, @@ -286,7 +79,3 @@ func randomLogRootV1(t tests.T) *trilliantypes.LogRootV1 { Metadata: random.RandomBytesForTest(t, 40), } } - -func nowWithoutMonotonic() time.Time { - return time.Unix(time.Now().Unix(), 0) -} diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 3518318b..d2470740 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -41,17 +41,16 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS return nil, fmt.Errorf("GenerateRCSR | RsaPublicKeyToPemBytes | %w", err) } - // generate rcsr - rcsr := &common.RCSR{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: domainName, - }, - Version: version, - TimeStamp: time.Now(), - PublicKeyAlgorithm: common.RSA, - PublicKey: pubKeyBytes, - SignatureAlgorithm: common.SHA256, - } + rcsr := common.NewRCSR( + domainName, + version, + time.Now(), + common.RSA, + pubKeyBytes, + common.SHA256, + nil, + nil, + ) // if domain owner still have the private key of the previous RPC -> can avoid cool-off period if prevKey, ok := do.privKeyByDomainName[domainName]; ok { @@ -80,9 +79,9 @@ func (do *DomainOwner) GeneratePSR(domainName string, policy common.Policy) (*co } psr := &common.PSR{ - Policies: policy, + SubjectRaw: domainName, + Policy: policy, TimeStamp: time.Now(), - DomainName: domainName, } err := crypto.DomainOwnerSignPSR(rpcKeyPair, psr) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 992c48a7..1c3a1124 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -25,29 +25,17 @@ func TestVerifyInclusionByHash(t *testing.T) { }, } - // Create a mock STH with the correct root hash. + // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "BSH/yAK1xdSSNMxzNbBD4pdAsqUin8L3st6w9su+nRk="), + RootHash: tests.MustDecodeBase64(t, "/Pk2HUaMxp2JDmKrEw8H/vqhjs3xsUcU2JUDaDD+bDE="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, } // Mock up a RPC. - rpc := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "fpki.com", - }, - SerialNumber: 2, - Version: 1, - PublicKey: random.RandomBytesForTest(t, 32), - NotBefore: util.TimeFromSecs(42), - NotAfter: util.TimeFromSecs(142), - CAName: "pca", - TimeStamp: util.TimeFromSecs(100), - CASignature: random.RandomBytesForTest(t, 32), - } + rpc := random.RandomRPC(t) // Serialize it without SPTs. serializedRPC, err := common.ToJSON(rpc) @@ -96,7 +84,7 @@ func TestCheckRPC(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "qtkcR3q27tgl90D5Wl1yCRYPEcvXcDvqEi1HH1mnffg="), + RootHash: tests.MustDecodeBase64(t, "QxOQbyfff8Hi5UWqpLC0abhJzpQC3a+6kMgD5nepfCA="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -115,24 +103,12 @@ func TestCheckRPC(t *testing.T) { require.NoError(t, err) // Mock a RPC. - rpc := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "fpki.com", - }, - SerialNumber: 2, - Version: 1, - PublicKey: random.RandomBytesForTest(t, 32), - NotBefore: util.TimeFromSecs(42), - NotAfter: util.TimeFromSecs(142), - CAName: "pca", - TimeStamp: util.TimeFromSecs(100), - CASignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{ - { - AddedTS: util.TimeFromSecs(99), - STH: serializedSTH, - PoI: serializedPoI, - }, + rpc := random.RandomRPC(t) + rpc.SPTs = []common.SPT{ + { + AddedTS: util.TimeFromSecs(99), + STH: serializedSTH, + PoI: serializedPoI, }, } @@ -149,7 +125,7 @@ func TestCheckSP(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "8rAPQQeydFrBYHkreAlISGoGeHXFLlTqWM8Xb0wJNiY="), + RootHash: tests.MustDecodeBase64(t, "p/zmpyI3xc064LO9NvXi99BqQoCQPO7GeMgzrBlAUKM="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -168,24 +144,12 @@ func TestCheckSP(t *testing.T) { require.NoError(t, err) // Mock an SP. - sp := &common.SP{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "fpki.com", - }, - Policies: common.Policy{ - TrustedCA: []string{"US CA"}, - }, - TimeStamp: util.TimeFromSecs(444), - CAName: "pca", - SerialNumber: 4, - CASignature: random.RandomBytesForTest(t, 32), - RootCertSignature: random.RandomBytesForTest(t, 32), - SPTs: []common.SPT{ - { - AddedTS: util.TimeFromSecs(444), - STH: serializedSTH, - PoI: serializedPoI, - }, + sp := random.RandomSP(t) + sp.SPTs = []common.SPT{ + { + AddedTS: util.TimeFromSecs(444), + STH: serializedSTH, + PoI: serializedPoI, }, } diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 77480695..637e2600 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -90,21 +90,23 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa // Proofs might contain multiple proofs for different leaves, while the content of each leaf // is identical. Trillian will return all the proofs for one content. // So one successful verification is enough. - var specialErr error for _, proof := range proofs { err := logProof.VerifyInclusion(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, leafHash, proof.Hashes, trustedRoot.RootHash) if err == nil { - return nil } if _, ok := err.(logProof.RootMismatchError); !ok { - specialErr = err + return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", err) } - } - if specialErr != nil { - return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", specialErr) + + // deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + // leafHash, proof.Hashes) + // if err != nil { + // panic(err) + // } + // fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) } // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 043766cb..4cfdc756 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -304,11 +304,11 @@ func (f *LogFetcher) getRawEntries( return end - start + 1, nil } -// GetPCAndRPC: get PC and RPC from url +// GetPCAndRPCs: get PC and RPC from url // TODO(yongzhe): currently just generate random PC and RPC using top 1k domain names -func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int) ([]*common.SP, []*common.RPC, error) { - resultPC := []*common.SP{} - resultRPC := []*common.RPC{} +func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker int) ([]*common.SP, []*common.RPC, error) { + resultPCs := make([]*common.SP, 0) + resultRPCs := make([]*common.RPC, 0) f, err := os.Open(ctURL) if err != nil { @@ -325,7 +325,7 @@ func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int //fmt.Println("invalid domain name: ", domainName) continue } - resultPC = append(resultPC, &common.SP{ + resultPCs = append(resultPCs, &common.SP{ PolicyObjectBase: common.PolicyObjectBase{ RawSubject: domainName, }, @@ -333,18 +333,16 @@ func GetPCAndRPC(ctURL string, startIndex int64, endIndex int64, numOfWorker int CASignature: generateRandomBytes(), }) - resultRPC = append(resultRPC, &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: domainName, - }, - NotBefore: time.Now(), - }) + rpc := &common.RPC{} + rpc.RawSubject = domainName + rpc.NotBefore = time.Now() + resultRPCs = append(resultRPCs, rpc) } if err := scanner.Err(); err != nil { return nil, nil, fmt.Errorf("GetPCAndRPC | scanner.Err | %w", err) } - return resultPC, resultRPC, nil + return resultPCs, resultRPCs, nil } func generateRandomBytes() []byte { diff --git a/pkg/mapserver/logfetcher/logfetcher_test.go b/pkg/mapserver/logfetcher/logfetcher_test.go index 4b765ba8..76d68173 100644 --- a/pkg/mapserver/logfetcher/logfetcher_test.go +++ b/pkg/mapserver/logfetcher/logfetcher_test.go @@ -270,6 +270,8 @@ func TestTimeoutLogFetcher(t *testing.T) { } func TestSpeed(t *testing.T) { + t.Skip("Enable to measure speed of the log fetcher") + ctx, cancelF := context.WithTimeout(context.Background(), 5*time.Minute) defer cancelF() f, err := NewLogFetcher(ctURL) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 6785ca90..ec6475f5 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -98,7 +98,7 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ // UpdateRPCAndPC: update RPC and PC from url. Currently just mock PC and RPC func (mapUpdater *MapUpdater) UpdateRPCAndPC(ctx context.Context, ctUrl string, startIdx, endIdx int64) error { // get PC and RPC first - pcList, rpcList, err := logfetcher.GetPCAndRPC(ctUrl, startIdx, endIdx, 20) + pcList, rpcList, err := logfetcher.GetPCAndRPCs(ctUrl, startIdx, endIdx, 20) if err != nil { return fmt.Errorf("CollectCerts | GetPCAndRPC | %w", err) } diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index a0b460f1..69181102 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -73,7 +73,7 @@ func (pca *PCA) SignAndLogSP(psr *common.PSR) error { } func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { - rpc, ok := pca.validRPCsByDomains[psr.DomainName] + rpc, ok := pca.validRPCsByDomains[psr.SubjectRaw] if !ok { return fmt.Errorf("findRPCAndVerifyPSR | validRPCsByDomains | no valid rpc at this moment") } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index ab72a062..c193e564 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -35,16 +35,10 @@ func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyObject { // Create one RPC and one SP for that name. - rpc := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: domainName, - }, - SerialNumber: 1, - Version: 1, - PublicKey: RandomBytesForTest(t, 32), - CAName: "c0.com", - CASignature: RandomBytesForTest(t, 100), - } + rpc := RandomRPC(t) + rpc.RawSubject = domainName + rpc.CAName = "c0.com" + data, err := common.ToJSON(rpc) require.NoError(t, err) rpc.RawJSON = data @@ -99,3 +93,97 @@ func BuildTestRandomCertHierarchy(t tests.T, domainName string) ( return } + +func RandomTimeWithoutMonotonic() time.Time { + return time.Date( + 1900+rand.Intn(200), // 1900-2100 + time.Month(1+rand.Intn(12)), // 1-12 + 1+rand.Intn(31), // 1-31 + rand.Intn(24), // 0-23 + rand.Intn(60), // 0-59 + rand.Intn(60), // 0-59 + 0, + time.UTC, + ) +} + +func RandomSPT(t tests.T) *common.SPT { + return common.NewSPT( + "spt subject", + rand.Intn(10), + "Issuer", + rand.Intn(100000), // 0-99,999 + 0x21, + RandomTimeWithoutMonotonic(), + RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), + rand.Intn(1000), + RandomBytesForTest(t, 32), + ) +} + +func RandomRPC(t tests.T) *common.RPC { + return common.NewRPC( + "RPC subject", + rand.Intn(10), + rand.Intn(10), + common.RSA, + RandomBytesForTest(t, 32), + RandomTimeWithoutMonotonic(), + RandomTimeWithoutMonotonic(), + "Issuer", + common.SHA256, + RandomTimeWithoutMonotonic(), + RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), + []common.SPT{*RandomSPT(t), *RandomSPT(t)}, + ) +} + +func RandomSPRT(t tests.T) *common.SPRT { + return common.NewSPRT(RandomSPT(t), rand.Intn(1000)) +} + +func RandomSP(t tests.T) *common.SP { + return common.NewSP( + "domainname.com", + common.Policy{ + TrustedCA: []string{"ca1", "ca2"}, + }, + RandomTimeWithoutMonotonic(), + "ca1", + rand.Int(), + RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), + []common.SPT{ + *RandomSPT(t), + *RandomSPT(t), + *RandomSPT(t), + }, + ) +} + +func RandomPSR(t tests.T) *common.PSR { + return common.NewPSR( + "domain_name.com", + common.Policy{ + TrustedCA: []string{"one CA", "another CA"}, + AllowedSubdomains: []string{"sub1.com", "sub2.com"}, + }, + RandomTimeWithoutMonotonic(), + RandomBytesForTest(t, 32), + ) +} + +func RandomRCSR(t tests.T) *common.RCSR { + return common.NewRCSR( + "subject", + 6789, + RandomTimeWithoutMonotonic(), + common.RSA, + RandomBytesForTest(t, 32), + common.SHA256, + RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), + ) +} diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index 3d7d5323..bd8571f2 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -19,18 +19,8 @@ func TestToTypedSlice(t *testing.T) { // slice of *common.RPC { orig := []*common.RPC{ - { - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "a.com", - }, - Version: 1, - }, - { - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "b.com", - }, - Version: 1, - }, + {Version: 1}, + {Version: 2}, } s := make([]any, len(orig)) for i, e := range orig { @@ -45,12 +35,9 @@ func TestToTypedSlice(t *testing.T) { func TestToType(t *testing.T) { // *common.RPC { - orig := &common.RPC{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: "a.com", - }, - Version: 1, - } + orig := &common.RPC{} + orig.RawSubject = "a.com" + orig.Version = 1 e := any(orig) r, err := ToType[*common.RPC](e) require.NoError(t, err) From acb3e8e7037bda816fc78965050799c6080f590b Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 15 Jun 2023 17:39:37 +0200 Subject: [PATCH 155/187] Split policies into two interfaces. MarshallableObject is something that can be (un)marshaled to JSON. PolicyDocument is any policy that contains a subject. --- cmd/ingest/certProcessor.go | 2 +- pkg/common/structure.go | 11 ++++++++--- pkg/db/mysql/mysql_test.go | 2 +- pkg/mapserver/updater/updater.go | 4 ++-- pkg/mapserver/updater/updater_test.go | 4 ++-- pkg/tests/random/random.go | 4 ++-- pkg/util/io.go | 4 ++-- 7 files changed, 18 insertions(+), 13 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 74aacdce..531b89db 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -79,7 +79,7 @@ const ( type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*common.SHA256Output, []*common.SHA256Output, []*ctx509.Certificate, []*time.Time, - []common.PolicyObject) error + []common.PolicyDocument) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { diff --git a/pkg/common/structure.go b/pkg/common/structure.go index a27e3a57..5172fe69 100644 --- a/pkg/common/structure.go +++ b/pkg/common/structure.go @@ -5,11 +5,16 @@ import ( "time" ) -// PolicyObject is an interface that is implemented by all objects that are part of the set +// MarshallableObject is an object that can be marshalled and unmarshalled to and from JSON. +type MarshallableObject interface { + Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). +} + +// PolicyDocument is an interface that is implemented by all objects that are part of the set // of "policy objects". A policy object is that one that represents functionality of policies // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. -type PolicyObject interface { - Raw() []byte +type PolicyDocument interface { + MarshallableObject Subject() string } diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 9c652e3e..fdb97e95 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -218,7 +218,7 @@ func testCertHierarchyForLeafs(t tests.T, leaves []string) (certs []*ctx509.Cert // testPolicyHierarchyForLeafs returns simply a policy hierarchy per leaf name, created using // the function BuildTestRandomPolicyHierarchy. -func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyObject, +func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyDocument, polIDs []*common.SHA256Output) { for _, name := range leaves { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index ec6475f5..a432900f 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -133,7 +133,7 @@ func (mapUpdater *MapUpdater) updateRPCAndPC( func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyObject, + policies []common.PolicyDocument, ) error { // Insert all specified certificates. @@ -165,7 +165,7 @@ func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]stri func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyObject, + policies []common.PolicyDocument, ) error { // First check which certificates are already present in the DB. diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index a923c644..79450648 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -95,7 +95,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { } // Check policy coalescing. - policiesPerName := make(map[string][]common.PolicyObject, len(pols)) + policiesPerName := make(map[string][]common.PolicyDocument, len(pols)) for _, pol := range pols { policiesPerName[pol.Subject()] = append(policiesPerName[pol.Subject()], pol) } @@ -161,7 +161,7 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S return gluedIDs, &id } -func computeIDsOfPolicies(policies []common.PolicyObject) []*common.SHA256Output { +func computeIDsOfPolicies(policies []common.PolicyDocument) []*common.SHA256Output { set := make(map[common.SHA256Output]struct{}, len(policies)) for _, pol := range policies { id := common.SHA256Hash32Bytes(pol.Raw()) diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index c193e564..ae41ef48 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -33,7 +33,7 @@ func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { } } -func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyObject { +func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyDocument { // Create one RPC and one SP for that name. rpc := RandomRPC(t) rpc.RawSubject = domainName @@ -55,7 +55,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic require.NoError(t, err) sp.RawJSON = data - return []common.PolicyObject{rpc, sp} + return []common.PolicyDocument{rpc, sp} } // BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate diff --git a/pkg/util/io.go b/pkg/util/io.go index 8f659e85..f5303b53 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -159,13 +159,13 @@ func LoadCertsAndChainsFromCSV( // LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their // serialized form. -func LoadPoliciesFromRaw(b []byte) ([]common.PolicyObject, error) { +func LoadPoliciesFromRaw(b []byte) ([]common.PolicyDocument, error) { obj, err := common.FromJSON(b) if err != nil { return nil, err } // The returned object should be of type list. - pols, err := ToTypedSlice[common.PolicyObject](obj) + pols, err := ToTypedSlice[common.PolicyDocument](obj) if err != nil { return nil, err } From 3e3bce41a4085bd7a66f0ee16a82831b75684a6a Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 15 Jun 2023 17:50:39 +0200 Subject: [PATCH 156/187] Renamed two files. --- pkg/common/{structure.go => policies.go} | 0 pkg/common/{structure_test.go => policies_test.go} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename pkg/common/{structure.go => policies.go} (100%) rename pkg/common/{structure_test.go => policies_test.go} (100%) diff --git a/pkg/common/structure.go b/pkg/common/policies.go similarity index 100% rename from pkg/common/structure.go rename to pkg/common/policies.go diff --git a/pkg/common/structure_test.go b/pkg/common/policies_test.go similarity index 100% rename from pkg/common/structure_test.go rename to pkg/common/policies_test.go From e3bb170f3f20b3717382c0b224d5f1f2e7ccf964 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 15 Jun 2023 18:01:02 +0200 Subject: [PATCH 157/187] Split policy documents into three files. Embedded documents that sit inside other policies, documents used to issue another policy documents, or regular policy documents inside the map server and DB. --- pkg/common/embedded_policies.go | 81 +++++++++++++++ pkg/common/policies.go | 172 ++------------------------------ pkg/common/policy_issuance.go | 77 ++++++++++++++ 3 files changed, 169 insertions(+), 161 deletions(-) create mode 100644 pkg/common/embedded_policies.go create mode 100644 pkg/common/policy_issuance.go diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go new file mode 100644 index 00000000..b4411299 --- /dev/null +++ b/pkg/common/embedded_policies.go @@ -0,0 +1,81 @@ +package common + +import ( + "bytes" + "time" +) + +// SPT is a signed policy timestamp. +type SPT struct { + PolicyObjectBase + Version int `json:",omitempty"` + CAName string `json:",omitempty"` + LogID int `json:",omitempty"` + CertType uint8 `json:",omitempty"` + AddedTS time.Time `json:",omitempty"` + STH []byte `json:",omitempty"` + PoI []byte `json:",omitempty"` + STHSerialNumber int `json:",omitempty"` + Signature []byte `json:",omitempty"` +} + +func NewSPT( + Subject string, + Version int, + CAName string, + LogID int, + CertType uint8, + AddedTS time.Time, + STH []byte, + PoI []byte, + STHSerialNumber int, + Signature []byte, +) *SPT { + + return &SPT{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + Version: Version, + CAName: CAName, + LogID: LogID, + CertType: CertType, + AddedTS: AddedTS, + STH: STH, + PoI: PoI, + STHSerialNumber: STHSerialNumber, + Signature: Signature, + } +} + +// SPRT is a signed policy revocation timestamp. +type SPRT struct { + SPT + Reason int `json:",omitempty"` +} + +func NewSPRT(SPT *SPT, Reason int) *SPRT { + return &SPRT{ + SPT: *SPT, + Reason: Reason, + } +} + +func (s SPT) Equal(o SPT) bool { + return true && + s.Version == o.Version && + s.RawSubject == o.RawSubject && + s.CAName == o.CAName && + s.LogID == o.LogID && + s.CertType == o.CertType && + s.AddedTS.Equal(o.AddedTS) && + bytes.Equal(s.STH, o.STH) && + bytes.Equal(s.PoI, o.PoI) && + s.STHSerialNumber == o.STHSerialNumber && + bytes.Equal(s.Signature, o.Signature) +} + +func (sprt *SPRT) Equal(sprt_ *SPRT) bool { + return sprt.SPT.Equal(sprt_.SPT) && + sprt.Reason == sprt_.Reason +} diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 5172fe69..06d35059 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -27,41 +27,6 @@ func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } func (o PolicyObjectBase) Subject() string { return o.RawSubject } // root certificate signing request -type RCSR struct { - PolicyObjectBase - Version int `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` - PublicKey []byte `json:",omitempty"` - SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` - PRCSignature []byte `json:",omitempty"` - Signature []byte `json:",omitempty"` -} - -func NewRCSR( - Subject string, - Version int, - TimeStamp time.Time, - PublicKeyAlgo PublicKeyAlgorithm, - PublicKey []byte, - SignatureAlgo SignatureAlgorithm, - PRCSignature []byte, - Signature []byte, -) *RCSR { - - return &RCSR{ - PolicyObjectBase: PolicyObjectBase{ - RawSubject: Subject, - }, - Version: Version, - TimeStamp: TimeStamp, - PublicKeyAlgorithm: PublicKeyAlgo, - PublicKey: PublicKey, - SignatureAlgorithm: SignatureAlgo, - PRCSignature: PRCSignature, - Signature: Signature, - } -} // root policy certificate type RPC struct { @@ -129,62 +94,6 @@ func NewPCRevocation(subject string) *PCRevocation { } } -// signed policy timestamp -type SPT struct { - PolicyObjectBase - Version int `json:",omitempty"` - CAName string `json:",omitempty"` - LogID int `json:",omitempty"` - CertType uint8 `json:",omitempty"` - AddedTS time.Time `json:",omitempty"` - STH []byte `json:",omitempty"` - PoI []byte `json:",omitempty"` - STHSerialNumber int `json:",omitempty"` - Signature []byte `json:",omitempty"` -} - -func NewSPT( - Subject string, - Version int, - CAName string, - LogID int, - CertType uint8, - AddedTS time.Time, - STH []byte, - PoI []byte, - STHSerialNumber int, - Signature []byte, -) *SPT { - - return &SPT{ - PolicyObjectBase: PolicyObjectBase{ - RawSubject: Subject, - }, - Version: Version, - CAName: CAName, - LogID: LogID, - CertType: CertType, - AddedTS: AddedTS, - STH: STH, - PoI: PoI, - STHSerialNumber: STHSerialNumber, - Signature: Signature, - } -} - -// signed policy revocation timestamp -type SPRT struct { - SPT - Reason int `json:",omitempty"` -} - -func NewSPRT(SPT *SPT, Reason int) *SPRT { - return &SPRT{ - SPT: *SPT, - Reason: Reason, - } -} - // Signed Policy type SP struct { PolicyObjectBase @@ -222,78 +131,12 @@ func NewSP( } } -// Policy Signing Request -type PSR struct { - SubjectRaw string `json:",omitempty"` - Policy Policy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` -} - -func NewPSR( - Subject string, - Policy Policy, - TimeStamp time.Time, - RootCertSignature []byte, -) *PSR { - - return &PSR{ - SubjectRaw: Subject, - Policy: Policy, - TimeStamp: TimeStamp, - RootCertSignature: RootCertSignature, - } -} - -// Domain policy +// Policy is a domain policy. type Policy struct { TrustedCA []string `json:",omitempty"` AllowedSubdomains []string `json:",omitempty"` } -//---------------------------------------------------------------- -// Equal function -//---------------------------------------------------------------- - -// listed funcs are Equal() func for each structure -func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { - return true && - rcsr.RawSubject == rcsr_.RawSubject && - rcsr.Version == rcsr_.Version && - rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && - rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && - bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && - rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && - bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && - bytes.Equal(rcsr.Signature, rcsr_.Signature) -} - -func (s SPT) Equal(o SPT) bool { - return true && - s.Version == o.Version && - s.RawSubject == o.RawSubject && - s.CAName == o.CAName && - s.LogID == o.LogID && - s.CertType == o.CertType && - s.AddedTS.Equal(o.AddedTS) && - bytes.Equal(s.STH, o.STH) && - bytes.Equal(s.PoI, o.PoI) && - s.STHSerialNumber == o.STHSerialNumber && - bytes.Equal(s.Signature, o.Signature) -} - -func (s Policy) Equal(o Policy) bool { - if len(s.TrustedCA) != len(o.TrustedCA) { - return false - } - for i, v := range s.TrustedCA { - if v != o.TrustedCA[i] { - return false - } - } - return true -} - func (s SP) Equal(o SP) bool { return true && s.TimeStamp.Equal(o.TimeStamp) && @@ -323,9 +166,16 @@ func (rpc *RPC) Equal(rpc_ *RPC) bool { equalSPTs(rpc.SPTs, rpc_.SPTs) } -func (sprt *SPRT) Equal(sprt_ *SPRT) bool { - return sprt.SPT.Equal(sprt_.SPT) && - sprt.Reason == sprt_.Reason +func (s Policy) Equal(o Policy) bool { + if len(s.TrustedCA) != len(o.TrustedCA) { + return false + } + for i, v := range s.TrustedCA { + if v != o.TrustedCA[i] { + return false + } + } + return true } func equalSPTs(a, b []SPT) bool { diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go new file mode 100644 index 00000000..65ed610f --- /dev/null +++ b/pkg/common/policy_issuance.go @@ -0,0 +1,77 @@ +package common + +import ( + "bytes" + "time" +) + +type RCSR struct { + PolicyObjectBase + Version int `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` + PublicKey []byte `json:",omitempty"` + SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` + PRCSignature []byte `json:",omitempty"` + Signature []byte `json:",omitempty"` +} + +func NewRCSR( + Subject string, + Version int, + TimeStamp time.Time, + PublicKeyAlgo PublicKeyAlgorithm, + PublicKey []byte, + SignatureAlgo SignatureAlgorithm, + PRCSignature []byte, + Signature []byte, +) *RCSR { + + return &RCSR{ + PolicyObjectBase: PolicyObjectBase{ + RawSubject: Subject, + }, + Version: Version, + TimeStamp: TimeStamp, + PublicKeyAlgorithm: PublicKeyAlgo, + PublicKey: PublicKey, + SignatureAlgorithm: SignatureAlgo, + PRCSignature: PRCSignature, + Signature: Signature, + } +} + +// PSR is a Policy Signing Request. +type PSR struct { + SubjectRaw string `json:",omitempty"` + Policy Policy `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` +} + +func NewPSR( + Subject string, + Policy Policy, + TimeStamp time.Time, + RootCertSignature []byte, +) *PSR { + + return &PSR{ + SubjectRaw: Subject, + Policy: Policy, + TimeStamp: TimeStamp, + RootCertSignature: RootCertSignature, + } +} + +func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { + return true && + rcsr.RawSubject == rcsr_.RawSubject && + rcsr.Version == rcsr_.Version && + rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && + rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && + bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && + rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && + bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && + bytes.Equal(rcsr.Signature, rcsr_.Signature) +} From 854060cb219ec21c59dc07f0ed2ca12c82b30b9d Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 16 Jun 2023 15:43:06 +0200 Subject: [PATCH 158/187] Rename Policy to DomainPolicy. --- pkg/common/crypto/crypto_test.go | 2 +- pkg/common/policies.go | 22 +++++++++---------- pkg/common/policy_issuance.go | 10 ++++----- pkg/domainowner/domainowner.go | 2 +- pkg/tests/random/random.go | 4 ++-- .../main.go | 4 ++-- 6 files changed, 22 insertions(+), 22 deletions(-) diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 00811559..a4516786 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -129,7 +129,7 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- psr := common.NewPSR( "test_SP", - common.Policy{}, + common.DomainPolicy{}, time.Now(), nil, ) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 06d35059..b1f82264 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -97,18 +97,18 @@ func NewPCRevocation(subject string) *PCRevocation { // Signed Policy type SP struct { PolicyObjectBase - Policies Policy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - CAName string `json:",omitempty"` - SerialNumber int `json:",omitempty"` - CASignature []byte `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` - SPTs []SPT `json:",omitempty"` + Policies DomainPolicy `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + CAName string `json:",omitempty"` + SerialNumber int `json:",omitempty"` + CASignature []byte `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` + SPTs []SPT `json:",omitempty"` } func NewSP( Subject string, - Policies Policy, + Policies DomainPolicy, TimeStamp time.Time, CAName string, SerialNumber int, @@ -131,8 +131,8 @@ func NewSP( } } -// Policy is a domain policy. -type Policy struct { +// DomainPolicy is a domain policy. +type DomainPolicy struct { TrustedCA []string `json:",omitempty"` AllowedSubdomains []string `json:",omitempty"` } @@ -166,7 +166,7 @@ func (rpc *RPC) Equal(rpc_ *RPC) bool { equalSPTs(rpc.SPTs, rpc_.SPTs) } -func (s Policy) Equal(o Policy) bool { +func (s DomainPolicy) Equal(o DomainPolicy) bool { if len(s.TrustedCA) != len(o.TrustedCA) { return false } diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 65ed610f..7be3f1bb 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -43,15 +43,15 @@ func NewRCSR( // PSR is a Policy Signing Request. type PSR struct { - SubjectRaw string `json:",omitempty"` - Policy Policy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` + SubjectRaw string `json:",omitempty"` + Policy DomainPolicy `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` } func NewPSR( Subject string, - Policy Policy, + Policy DomainPolicy, TimeStamp time.Time, RootCertSignature []byte, ) *PSR { diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index d2470740..71c853b3 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -72,7 +72,7 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS } // GeneratePSR: generate one psr for one specific domain. -func (do *DomainOwner) GeneratePSR(domainName string, policy common.Policy) (*common.PSR, error) { +func (do *DomainOwner) GeneratePSR(domainName string, policy common.DomainPolicy) (*common.PSR, error) { rpcKeyPair, ok := do.privKeyByDomainName[domainName] if !ok { return nil, fmt.Errorf("GeneratePSR | No valid RPC for domain %s", domainName) diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index ae41ef48..e79e6c05 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -147,7 +147,7 @@ func RandomSPRT(t tests.T) *common.SPRT { func RandomSP(t tests.T) *common.SP { return common.NewSP( "domainname.com", - common.Policy{ + common.DomainPolicy{ TrustedCA: []string{"ca1", "ca2"}, }, RandomTimeWithoutMonotonic(), @@ -166,7 +166,7 @@ func RandomSP(t tests.T) *common.SP { func RandomPSR(t tests.T) *common.PSR { return common.NewPSR( "domain_name.com", - common.Policy{ + common.DomainPolicy{ TrustedCA: []string{"one CA", "another CA"}, AllowedSubdomains: []string{"sub1.com", "sub2.com"}, }, diff --git a/tests/integration/domainowner_pca_policlog_interaction/main.go b/tests/integration/domainowner_pca_policlog_interaction/main.go index fc06119d..a5a39ed3 100644 --- a/tests/integration/domainowner_pca_policlog_interaction/main.go +++ b/tests/integration/domainowner_pca_policlog_interaction/main.go @@ -127,11 +127,11 @@ func main() { logErrAndQuit(fmt.Errorf("rpcs num error")) } - policy1 := common.Policy{ + policy1 := common.DomainPolicy{ TrustedCA: []string{"swiss CA"}, } - policy2 := common.Policy{ + policy2 := common.DomainPolicy{ TrustedCA: []string{"US CA"}, } From 0ed72329c76132d94569c700a5a23a53b594a064 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 16 Jun 2023 15:55:37 +0200 Subject: [PATCH 159/187] Rename MarshallableObject to MarshallableDocument. Organize types and methods for policy documents so that types appear at the beginning of the source files. --- pkg/common/embedded_policies.go | 26 +++---- pkg/common/policies.go | 119 +++++++++++++------------------- pkg/common/policy_common.go | 23 ++++++ pkg/common/policy_issuance.go | 37 +++++----- 4 files changed, 103 insertions(+), 102 deletions(-) create mode 100644 pkg/common/policy_common.go diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index b4411299..46c66f98 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -19,6 +19,12 @@ type SPT struct { Signature []byte `json:",omitempty"` } +// SPRT is a signed policy revocation timestamp. +type SPRT struct { + SPT + Reason int `json:",omitempty"` +} + func NewSPT( Subject string, Version int, @@ -48,19 +54,6 @@ func NewSPT( } } -// SPRT is a signed policy revocation timestamp. -type SPRT struct { - SPT - Reason int `json:",omitempty"` -} - -func NewSPRT(SPT *SPT, Reason int) *SPRT { - return &SPRT{ - SPT: *SPT, - Reason: Reason, - } -} - func (s SPT) Equal(o SPT) bool { return true && s.Version == o.Version && @@ -75,6 +68,13 @@ func (s SPT) Equal(o SPT) bool { bytes.Equal(s.Signature, o.Signature) } +func NewSPRT(SPT *SPT, Reason int) *SPRT { + return &SPRT{ + SPT: *SPT, + Reason: Reason, + } +} + func (sprt *SPRT) Equal(sprt_ *SPRT) bool { return sprt.SPT.Equal(sprt_.SPT) && sprt.Reason == sprt_.Reason diff --git a/pkg/common/policies.go b/pkg/common/policies.go index b1f82264..d38532f4 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -5,30 +5,7 @@ import ( "time" ) -// MarshallableObject is an object that can be marshalled and unmarshalled to and from JSON. -type MarshallableObject interface { - Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). -} - -// PolicyDocument is an interface that is implemented by all objects that are part of the set -// of "policy objects". A policy object is that one that represents functionality of policies -// for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. -type PolicyDocument interface { - MarshallableObject - Subject() string -} - -type PolicyObjectBase struct { - RawJSON []byte `json:"-"` // omit from JSON (un)marshaling - RawSubject string `json:"Subject,omitempty"` -} - -func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } -func (o PolicyObjectBase) Subject() string { return o.RawSubject } - -// root certificate signing request - -// root policy certificate +// RPC is a Root Policy Certificate. type RPC struct { PolicyObjectBase SerialNumber int `json:",omitempty"` @@ -45,6 +22,30 @@ type RPC struct { SPTs []SPT `json:",omitempty"` } +// SP is a Signed Policy. +type SP struct { + PolicyObjectBase + Policies DomainPolicy `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + CAName string `json:",omitempty"` + SerialNumber int `json:",omitempty"` + CASignature []byte `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` + SPTs []SPT `json:",omitempty"` +} + +// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. +type DomainPolicy struct { + TrustedCA []string `json:",omitempty"` + AllowedSubdomains []string `json:",omitempty"` +} + +// PCRevocation is for now empty. +type PCRevocation struct { + PolicyObjectBase + // TODO(juagargi) define the revocation. +} + func NewRPC( Subject string, SerialNumber int, @@ -80,30 +81,21 @@ func NewRPC( } } -// PCRevocation is for now empty. -type PCRevocation struct { - PolicyObjectBase - // TODO(juagargi) define the revocation. -} - -func NewPCRevocation(subject string) *PCRevocation { - return &PCRevocation{ - PolicyObjectBase{ - RawSubject: subject, - }, - } -} - -// Signed Policy -type SP struct { - PolicyObjectBase - Policies DomainPolicy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - CAName string `json:",omitempty"` - SerialNumber int `json:",omitempty"` - CASignature []byte `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` - SPTs []SPT `json:",omitempty"` +func (rpc *RPC) Equal(rpc_ *RPC) bool { + return true && + rpc.SerialNumber == rpc_.SerialNumber && + rpc.RawSubject == rpc_.RawSubject && + rpc.Version == rpc_.Version && + rpc.PublicKeyAlgorithm == rpc_.PublicKeyAlgorithm && + bytes.Equal(rpc.PublicKey, rpc_.PublicKey) && + rpc.NotBefore.Equal(rpc_.NotBefore) && + rpc.NotAfter.Equal(rpc_.NotAfter) && + rpc.CAName == rpc_.CAName && + rpc.SignatureAlgorithm == rpc_.SignatureAlgorithm && + rpc.TimeStamp.Equal(rpc_.TimeStamp) && + bytes.Equal(rpc.PRCSignature, rpc_.PRCSignature) && + bytes.Equal(rpc.CASignature, rpc_.CASignature) && + equalSPTs(rpc.SPTs, rpc_.SPTs) } func NewSP( @@ -131,12 +123,6 @@ func NewSP( } } -// DomainPolicy is a domain policy. -type DomainPolicy struct { - TrustedCA []string `json:",omitempty"` - AllowedSubdomains []string `json:",omitempty"` -} - func (s SP) Equal(o SP) bool { return true && s.TimeStamp.Equal(o.TimeStamp) && @@ -149,23 +135,6 @@ func (s SP) Equal(o SP) bool { equalSPTs(s.SPTs, o.SPTs) } -func (rpc *RPC) Equal(rpc_ *RPC) bool { - return true && - rpc.SerialNumber == rpc_.SerialNumber && - rpc.RawSubject == rpc_.RawSubject && - rpc.Version == rpc_.Version && - rpc.PublicKeyAlgorithm == rpc_.PublicKeyAlgorithm && - bytes.Equal(rpc.PublicKey, rpc_.PublicKey) && - rpc.NotBefore.Equal(rpc_.NotBefore) && - rpc.NotAfter.Equal(rpc_.NotAfter) && - rpc.CAName == rpc_.CAName && - rpc.SignatureAlgorithm == rpc_.SignatureAlgorithm && - rpc.TimeStamp.Equal(rpc_.TimeStamp) && - bytes.Equal(rpc.PRCSignature, rpc_.PRCSignature) && - bytes.Equal(rpc.CASignature, rpc_.CASignature) && - equalSPTs(rpc.SPTs, rpc_.SPTs) -} - func (s DomainPolicy) Equal(o DomainPolicy) bool { if len(s.TrustedCA) != len(o.TrustedCA) { return false @@ -178,6 +147,14 @@ func (s DomainPolicy) Equal(o DomainPolicy) bool { return true } +func NewPCRevocation(subject string) *PCRevocation { + return &PCRevocation{ + PolicyObjectBase{ + RawSubject: subject, + }, + } +} + func equalSPTs(a, b []SPT) bool { if len(a) != len(b) { return false diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go new file mode 100644 index 00000000..a23238ed --- /dev/null +++ b/pkg/common/policy_common.go @@ -0,0 +1,23 @@ +package common + +// MarshallableDocument is an object that can be marshalled and unmarshalled to and from JSON. +type MarshallableDocument interface { + Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). +} + +// PolicyDocument is an interface that is implemented by all objects that are part of the set +// of "policy objects". A policy object is that one that represents functionality of policies +// for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. +type PolicyDocument interface { + MarshallableDocument + Subject() string +} + +// PolicyObjectBase is the common type to all policy documents. +type PolicyObjectBase struct { + RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + RawSubject string `json:"Subject,omitempty"` +} + +func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } +func (o PolicyObjectBase) Subject() string { return o.RawSubject } diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 7be3f1bb..cbd070ce 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -5,6 +5,7 @@ import ( "time" ) +// RCSR is a root certificate signing request. type RCSR struct { PolicyObjectBase Version int `json:",omitempty"` @@ -16,6 +17,14 @@ type RCSR struct { Signature []byte `json:",omitempty"` } +// PSR is a Policy Signing Request. +type PSR struct { + SubjectRaw string `json:",omitempty"` + Policy DomainPolicy `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` +} + func NewRCSR( Subject string, Version int, @@ -41,12 +50,16 @@ func NewRCSR( } } -// PSR is a Policy Signing Request. -type PSR struct { - SubjectRaw string `json:",omitempty"` - Policy DomainPolicy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` +func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { + return true && + rcsr.RawSubject == rcsr_.RawSubject && + rcsr.Version == rcsr_.Version && + rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && + rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && + bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && + rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && + bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && + bytes.Equal(rcsr.Signature, rcsr_.Signature) } func NewPSR( @@ -63,15 +76,3 @@ func NewPSR( RootCertSignature: RootCertSignature, } } - -func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { - return true && - rcsr.RawSubject == rcsr_.RawSubject && - rcsr.Version == rcsr_.Version && - rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && - rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && - bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && - rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && - bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && - bytes.Equal(rcsr.Signature, rcsr_.Signature) -} From 07aea225c217a3b8cbc3438e6f1b9ee84f02b6c6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 19 Jun 2023 14:26:13 +0200 Subject: [PATCH 160/187] Split policy things into three interfaces/base types. --- cmd/ingest/certProcessor.go | 2 +- pkg/common/crypto/crypto.go | 4 +- pkg/common/embedded_policies.go | 71 ++++++++++++++------ pkg/common/json.go | 14 ++-- pkg/common/policies.go | 91 +++++++++++++------------- pkg/common/policies_test.go | 6 +- pkg/common/policy_common.go | 21 +++--- pkg/common/policy_issuance.go | 35 +++++++--- pkg/db/mysql/mysql_test.go | 2 +- pkg/domainowner/domainowner.go | 8 ++- pkg/logverifier/logverifier_test.go | 4 +- pkg/mapserver/logfetcher/logfetcher.go | 18 +++-- pkg/mapserver/updater/updater.go | 4 +- pkg/mapserver/updater/updater_test.go | 4 +- pkg/pca/sign_and_log.go | 2 +- pkg/tests/random/random.go | 23 ++++--- pkg/util/io.go | 4 +- pkg/util/types.go | 49 +++++++++----- pkg/util/types_test.go | 8 ++- 19 files changed, 223 insertions(+), 147 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 531b89db..946d2bb4 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -79,7 +79,7 @@ const ( type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*common.SHA256Output, []*common.SHA256Output, []*ctx509.Certificate, []*time.Time, - []common.PolicyDocument) error + []common.PolicyCertificate) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 76c6fec7..3aeafb6d 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -114,7 +114,7 @@ func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, rpc := common.NewRPC( rcsr.RawSubject, serialNumber, - rcsr.Version, + rcsr.Version(), rcsr.PublicKeyAlgorithm, rcsr.PublicKey, notBefore, @@ -205,7 +205,7 @@ func CASignSP(psr *common.PSR, caPrivKey *rsa.PrivateKey, caName string, serialN *common.SP, error) { sp := common.NewSP( - psr.SubjectRaw, + psr.RawSubject, psr.Policy, time.Now(), caName, diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index 46c66f98..d72e3823 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -5,10 +5,24 @@ import ( "time" ) +type EmbeddedPolicyBase struct { + PolicyPartBase +} + +func (p EmbeddedPolicyBase) Equal(o EmbeddedPolicyBase) bool { + return p.PolicyPartBase.Equal(o.PolicyPartBase) +} + +// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. +type DomainPolicy struct { + EmbeddedPolicyBase + TrustedCA []string `json:",omitempty"` + AllowedSubdomains []string `json:",omitempty"` +} + // SPT is a signed policy timestamp. type SPT struct { - PolicyObjectBase - Version int `json:",omitempty"` + EmbeddedPolicyBase CAName string `json:",omitempty"` LogID int `json:",omitempty"` CertType uint8 `json:",omitempty"` @@ -25,6 +39,12 @@ type SPRT struct { Reason int `json:",omitempty"` } +func (s DomainPolicy) Equal(o DomainPolicy) bool { + return s.EmbeddedPolicyBase.Equal(o.EmbeddedPolicyBase) && + equalStringSlices(s.TrustedCA, o.TrustedCA) && + equalStringSlices(s.AllowedSubdomains, o.AllowedSubdomains) +} + func NewSPT( Subject string, Version int, @@ -39,10 +59,11 @@ func NewSPT( ) *SPT { return &SPT{ - PolicyObjectBase: PolicyObjectBase{ - RawSubject: Subject, + EmbeddedPolicyBase: EmbeddedPolicyBase{ + PolicyPartBase: PolicyPartBase{ + RawVersion: Version, + }, }, - Version: Version, CAName: CAName, LogID: LogID, CertType: CertType, @@ -54,18 +75,16 @@ func NewSPT( } } -func (s SPT) Equal(o SPT) bool { - return true && - s.Version == o.Version && - s.RawSubject == o.RawSubject && - s.CAName == o.CAName && - s.LogID == o.LogID && - s.CertType == o.CertType && - s.AddedTS.Equal(o.AddedTS) && - bytes.Equal(s.STH, o.STH) && - bytes.Equal(s.PoI, o.PoI) && - s.STHSerialNumber == o.STHSerialNumber && - bytes.Equal(s.Signature, o.Signature) +func (s SPT) Equal(x SPT) bool { + return s.EmbeddedPolicyBase.Equal(x.EmbeddedPolicyBase) && + s.CAName == x.CAName && + s.LogID == x.LogID && + s.CertType == x.CertType && + s.AddedTS.Equal(x.AddedTS) && + bytes.Equal(s.STH, x.STH) && + bytes.Equal(s.PoI, x.PoI) && + s.STHSerialNumber == x.STHSerialNumber && + bytes.Equal(s.Signature, x.Signature) } func NewSPRT(SPT *SPT, Reason int) *SPRT { @@ -75,7 +94,19 @@ func NewSPRT(SPT *SPT, Reason int) *SPRT { } } -func (sprt *SPRT) Equal(sprt_ *SPRT) bool { - return sprt.SPT.Equal(sprt_.SPT) && - sprt.Reason == sprt_.Reason +func (sprt SPRT) Equal(x SPRT) bool { + return sprt.SPT.Equal(x.SPT) && + sprt.Reason == x.Reason +} + +func equalStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true } diff --git a/pkg/common/json.go b/pkg/common/json.go index 7de9faac..d79ac16b 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -12,7 +12,7 @@ import ( type serializableObjectBase struct { O any // actual object to Marshal/Unmarshal - skipRaw bool // flag controlling JSON copying into PolicyObjectBase.Raw + skipRaw bool // flag controlling JSON copying into PolicyPartBase.Raw } func ToJSON(obj any) ([]byte, error) { @@ -36,7 +36,7 @@ func FromJSON(data []byte, opts ...FromJSONModifier) (any, error) { type FromJSONModifier func(*serializableObjectBase) // WithSkipCopyJSONIntoPolicyObjects avoids copying the raw JSON into each one of the -// objects that aggregate a PolicyObjectBase (RPC, SP, etc). +// objects that aggregate a PolicyPartBase (RPC, SP, etc). func WithSkipCopyJSONIntoPolicyObjects(o *serializableObjectBase) { o.skipRaw = true } @@ -138,18 +138,18 @@ func (o *serializableObjectBase) UnmarshalJSON(data []byte) error { // If we should copy JSON to Raw: if shouldCopyJSON { - // Find out if the object is a pointer to a PolicyObjectBase like structure. - base := reflect.Indirect(reflect.ValueOf(obj)).FieldByName("PolicyObjectBase") + // Find out if the object is a pointer to a PolicyPartBase like structure. + base := reflect.Indirect(reflect.ValueOf(obj)).FieldByName("PolicyPartBase") if base != (reflect.Value{}) { - // It is a PolicyObjectBase like object. Check the Raw field (should always be true). + // It is a PolicyPartBase like object. Check the Raw field (should always be true). if raw := base.FieldByName("RawJSON"); raw != (reflect.Value{}) { // Set its value to the JSON data. raw.Set(reflect.ValueOf(data)) } else { // This should never happen, and the next line should ensure it: - _ = PolicyObjectBase{}.RawJSON + _ = PolicyPartBase{}.RawJSON // But terminate the control flow anyways with a panic. - panic("logic error: structure PolicyObjectBase has lost its Raw member") + panic("logic error: structure PolicyPartBase has lost its Raw member") } } } diff --git a/pkg/common/policies.go b/pkg/common/policies.go index d38532f4..257b2e05 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -5,11 +5,28 @@ import ( "time" ) +// PolicyCertificate is any policy document that can be exchanged among mapservers, CT log servers, +// and others. +type PolicyCertificate interface { + PolicyPart + Subject() string +} + +type PolicyCertificateBase struct { + PolicyPartBase + RawSubject string `json:"Subject,omitempty"` +} + +func (o PolicyCertificateBase) Subject() string { return o.RawSubject } +func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { + return p.PolicyPartBase.Equal(x.PolicyPartBase) && + p.RawSubject == x.RawSubject +} + // RPC is a Root Policy Certificate. type RPC struct { - PolicyObjectBase + PolicyCertificateBase SerialNumber int `json:",omitempty"` - Version int `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` NotBefore time.Time `json:",omitempty"` @@ -24,7 +41,7 @@ type RPC struct { // SP is a Signed Policy. type SP struct { - PolicyObjectBase + PolicyCertificateBase Policies DomainPolicy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` CAName string `json:",omitempty"` @@ -34,15 +51,9 @@ type SP struct { SPTs []SPT `json:",omitempty"` } -// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. -type DomainPolicy struct { - TrustedCA []string `json:",omitempty"` - AllowedSubdomains []string `json:",omitempty"` -} - // PCRevocation is for now empty. type PCRevocation struct { - PolicyObjectBase + PolicyCertificateBase // TODO(juagargi) define the revocation. } @@ -63,11 +74,13 @@ func NewRPC( ) *RPC { return &RPC{ - PolicyObjectBase: PolicyObjectBase{ + PolicyCertificateBase: PolicyCertificateBase{ + PolicyPartBase: PolicyPartBase{ + RawVersion: Version, + }, RawSubject: Subject, }, SerialNumber: SerialNumber, - Version: Version, PublicKeyAlgorithm: PublicKeyAlgorithm, PublicKey: PublicKey, NotBefore: NotBefore, @@ -81,26 +94,24 @@ func NewRPC( } } -func (rpc *RPC) Equal(rpc_ *RPC) bool { - return true && - rpc.SerialNumber == rpc_.SerialNumber && - rpc.RawSubject == rpc_.RawSubject && - rpc.Version == rpc_.Version && - rpc.PublicKeyAlgorithm == rpc_.PublicKeyAlgorithm && - bytes.Equal(rpc.PublicKey, rpc_.PublicKey) && - rpc.NotBefore.Equal(rpc_.NotBefore) && - rpc.NotAfter.Equal(rpc_.NotAfter) && - rpc.CAName == rpc_.CAName && - rpc.SignatureAlgorithm == rpc_.SignatureAlgorithm && - rpc.TimeStamp.Equal(rpc_.TimeStamp) && - bytes.Equal(rpc.PRCSignature, rpc_.PRCSignature) && - bytes.Equal(rpc.CASignature, rpc_.CASignature) && - equalSPTs(rpc.SPTs, rpc_.SPTs) +func (rpc RPC) Equal(x RPC) bool { + return rpc.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && + rpc.SerialNumber == x.SerialNumber && + rpc.PublicKeyAlgorithm == x.PublicKeyAlgorithm && + bytes.Equal(rpc.PublicKey, x.PublicKey) && + rpc.NotBefore.Equal(x.NotBefore) && + rpc.NotAfter.Equal(x.NotAfter) && + rpc.CAName == x.CAName && + rpc.SignatureAlgorithm == x.SignatureAlgorithm && + rpc.TimeStamp.Equal(x.TimeStamp) && + bytes.Equal(rpc.PRCSignature, x.PRCSignature) && + bytes.Equal(rpc.CASignature, x.CASignature) && + equalSPTs(rpc.SPTs, x.SPTs) } func NewSP( Subject string, - Policies DomainPolicy, + Policy DomainPolicy, TimeStamp time.Time, CAName string, SerialNumber int, @@ -110,10 +121,11 @@ func NewSP( ) *SP { return &SP{ - PolicyObjectBase: PolicyObjectBase{ - RawSubject: Subject, + PolicyCertificateBase: PolicyCertificateBase{ + PolicyPartBase: PolicyPartBase{}, + RawSubject: Subject, }, - Policies: Policies, + Policies: Policy, TimeStamp: TimeStamp, CAName: CAName, SerialNumber: SerialNumber, @@ -124,9 +136,8 @@ func NewSP( } func (s SP) Equal(o SP) bool { - return true && + return s.PolicyCertificateBase.Equal(o.PolicyCertificateBase) && s.TimeStamp.Equal(o.TimeStamp) && - s.RawSubject == o.RawSubject && s.CAName == o.CAName && s.SerialNumber == o.SerialNumber && bytes.Equal(s.CASignature, o.CASignature) && @@ -135,21 +146,9 @@ func (s SP) Equal(o SP) bool { equalSPTs(s.SPTs, o.SPTs) } -func (s DomainPolicy) Equal(o DomainPolicy) bool { - if len(s.TrustedCA) != len(o.TrustedCA) { - return false - } - for i, v := range s.TrustedCA { - if v != o.TrustedCA[i] { - return false - } - } - return true -} - func NewPCRevocation(subject string) *PCRevocation { return &PCRevocation{ - PolicyObjectBase{ + PolicyCertificateBase: PolicyCertificateBase{ RawSubject: subject, }, } diff --git a/pkg/common/policies_test.go b/pkg/common/policies_test.go index dd23b26f..407d6903 100644 --- a/pkg/common/policies_test.go +++ b/pkg/common/policies_test.go @@ -38,10 +38,10 @@ func TestEqual(t *testing.T) { require.False(t, spt2.Equal(spt1)) sprt := random.RandomSPRT(t) - require.True(t, sprt.Equal(sprt)) + require.True(t, sprt.Equal(*sprt)) rpc := random.RandomRPC(t) - require.True(t, rpc.Equal(rpc)) + require.True(t, rpc.Equal(*rpc)) } // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) @@ -60,7 +60,7 @@ func TestJsonReadWrite(t *testing.T) { rpc1, err := common.JsonFileToRPC(tempFile) require.NoError(t, err, "Json File To RPC error") - require.True(t, rpc.Equal(rpc1), "Json error") + require.True(t, rpc.Equal(*rpc1), "Json error") } func randomTrillianProof(t tests.T) *trillian.Proof { diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index a23238ed..0ab4ee31 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -5,19 +5,24 @@ type MarshallableDocument interface { Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). } -// PolicyDocument is an interface that is implemented by all objects that are part of the set +// PolicyPart is an interface that is implemented by all objects that are part of the set // of "policy objects". A policy object is that one that represents functionality of policies // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. -type PolicyDocument interface { +type PolicyPart interface { MarshallableDocument - Subject() string + Version() int } -// PolicyObjectBase is the common type to all policy documents. -type PolicyObjectBase struct { +// PolicyPartBase is the common type to all policy documents. +type PolicyPartBase struct { RawJSON []byte `json:"-"` // omit from JSON (un)marshaling - RawSubject string `json:"Subject,omitempty"` + RawVersion int `json:"Version,omitempty"` } -func (o PolicyObjectBase) Raw() []byte { return o.RawJSON } -func (o PolicyObjectBase) Subject() string { return o.RawSubject } +func (o PolicyPartBase) Raw() []byte { return o.RawJSON } +func (o PolicyPartBase) Version() int { return o.RawVersion } + +func (o PolicyPartBase) Equal(x PolicyPartBase) bool { + // Ignore the RawJSON component, use just the regular fields. + return o.RawVersion == x.RawVersion +} diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index cbd070ce..8b05aec0 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -5,10 +5,25 @@ import ( "time" ) +type PolicyIssuer interface { + PolicyPart + Subject() string +} + +type PolicyIssuerBase struct { + PolicyPartBase + RawSubject string `json:"Subject,omitempty"` +} + +func (c PolicyIssuerBase) Subject() string { return c.RawSubject } +func (c PolicyIssuerBase) Equal(x PolicyIssuerBase) bool { + return c.PolicyPartBase.Equal(x.PolicyPartBase) && + c.RawSubject == x.RawSubject +} + // RCSR is a root certificate signing request. type RCSR struct { - PolicyObjectBase - Version int `json:",omitempty"` + PolicyIssuerBase TimeStamp time.Time `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` @@ -19,7 +34,7 @@ type RCSR struct { // PSR is a Policy Signing Request. type PSR struct { - SubjectRaw string `json:",omitempty"` + PolicyIssuerBase Policy DomainPolicy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` RootCertSignature []byte `json:",omitempty"` @@ -37,10 +52,12 @@ func NewRCSR( ) *RCSR { return &RCSR{ - PolicyObjectBase: PolicyObjectBase{ + PolicyIssuerBase: PolicyIssuerBase{ + PolicyPartBase: PolicyPartBase{ + RawVersion: Version, + }, RawSubject: Subject, }, - Version: Version, TimeStamp: TimeStamp, PublicKeyAlgorithm: PublicKeyAlgo, PublicKey: PublicKey, @@ -51,9 +68,7 @@ func NewRCSR( } func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { - return true && - rcsr.RawSubject == rcsr_.RawSubject && - rcsr.Version == rcsr_.Version && + return rcsr.PolicyIssuerBase.Equal(rcsr.PolicyIssuerBase) && rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && @@ -70,7 +85,9 @@ func NewPSR( ) *PSR { return &PSR{ - SubjectRaw: Subject, + PolicyIssuerBase: PolicyIssuerBase{ + RawSubject: Subject, + }, Policy: Policy, TimeStamp: TimeStamp, RootCertSignature: RootCertSignature, diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index fdb97e95..124077a1 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -218,7 +218,7 @@ func testCertHierarchyForLeafs(t tests.T, leaves []string) (certs []*ctx509.Cert // testPolicyHierarchyForLeafs returns simply a policy hierarchy per leaf name, created using // the function BuildTestRandomPolicyHierarchy. -func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyDocument, +func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyCertificate, polIDs []*common.SHA256Output) { for _, name := range leaves { diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 71c853b3..5467ad1e 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -79,9 +79,11 @@ func (do *DomainOwner) GeneratePSR(domainName string, policy common.DomainPolicy } psr := &common.PSR{ - SubjectRaw: domainName, - Policy: policy, - TimeStamp: time.Now(), + PolicyIssuerBase: common.PolicyIssuerBase{ + RawSubject: domainName, + }, + Policy: policy, + TimeStamp: time.Now(), } err := crypto.DomainOwnerSignPSR(rpcKeyPair, psr) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 1c3a1124..5c74188f 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -28,7 +28,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "/Pk2HUaMxp2JDmKrEw8H/vqhjs3xsUcU2JUDaDD+bDE="), + RootHash: tests.MustDecodeBase64(t, "Rv16YWSSJWdqqQTccjyPPBSjyJCxiTN8XU0APhbtqFE="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -84,7 +84,7 @@ func TestCheckRPC(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "QxOQbyfff8Hi5UWqpLC0abhJzpQC3a+6kMgD5nepfCA="), + RootHash: tests.MustDecodeBase64(t, "OhHDUl3nXT2aBWQcY/ZDULXFXlZHNhLtt0qewB1pMao="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 4cfdc756..a5385a29 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -325,13 +325,17 @@ func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker in //fmt.Println("invalid domain name: ", domainName) continue } - resultPCs = append(resultPCs, &common.SP{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: domainName, - }, - TimeStamp: time.Now(), - CASignature: generateRandomBytes(), - }) + + resultPCs = append(resultPCs, common.NewSP( + domainName, + common.DomainPolicy{}, + time.Now(), + "", // CA name + 0, // serial number + generateRandomBytes(), + nil, // root cert signature + nil, // SPTs + )) rpc := &common.RPC{} rpc.RawSubject = domainName diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index a432900f..fd0a2d25 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -133,7 +133,7 @@ func (mapUpdater *MapUpdater) updateRPCAndPC( func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyDocument, + policies []common.PolicyCertificate, ) error { // Insert all specified certificates. @@ -165,7 +165,7 @@ func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]stri func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyDocument, + policies []common.PolicyCertificate, ) error { // First check which certificates are already present in the DB. diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 79450648..0208f9a2 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -95,7 +95,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { } // Check policy coalescing. - policiesPerName := make(map[string][]common.PolicyDocument, len(pols)) + policiesPerName := make(map[string][]common.PolicyCertificate, len(pols)) for _, pol := range pols { policiesPerName[pol.Subject()] = append(policiesPerName[pol.Subject()], pol) } @@ -161,7 +161,7 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S return gluedIDs, &id } -func computeIDsOfPolicies(policies []common.PolicyDocument) []*common.SHA256Output { +func computeIDsOfPolicies(policies []common.PolicyCertificate) []*common.SHA256Output { set := make(map[common.SHA256Output]struct{}, len(policies)) for _, pol := range policies { id := common.SHA256Hash32Bytes(pol.Raw()) diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index 69181102..c2b147d1 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -73,7 +73,7 @@ func (pca *PCA) SignAndLogSP(psr *common.PSR) error { } func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { - rpc, ok := pca.validRPCsByDomains[psr.SubjectRaw] + rpc, ok := pca.validRPCsByDomains[psr.Subject()] if !ok { return fmt.Errorf("findRPCAndVerifyPSR | validRPCsByDomains | no valid rpc at this moment") } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index e79e6c05..07f29241 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -33,7 +33,7 @@ func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { } } -func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyDocument { +func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyCertificate { // Create one RPC and one SP for that name. rpc := RandomRPC(t) rpc.RawSubject = domainName @@ -43,19 +43,22 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic require.NoError(t, err) rpc.RawJSON = data - sp := &common.SP{ - PolicyObjectBase: common.PolicyObjectBase{ - RawSubject: domainName, - }, - CAName: "c0.com", - CASignature: RandomBytesForTest(t, 100), - RootCertSignature: RandomBytesForTest(t, 100), - } + sp := common.NewSP( + domainName, + common.DomainPolicy{}, + RandomTimeWithoutMonotonic(), + "c0.com", + 0, // serial number + RandomBytesForTest(t, 100), // CA signature + RandomBytesForTest(t, 100), // root cert signature + nil, // SPTs + ) + data, err = common.ToJSON(sp) require.NoError(t, err) sp.RawJSON = data - return []common.PolicyDocument{rpc, sp} + return []common.PolicyCertificate{rpc, sp} } // BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate diff --git a/pkg/util/io.go b/pkg/util/io.go index f5303b53..a699b9d7 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -159,13 +159,13 @@ func LoadCertsAndChainsFromCSV( // LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their // serialized form. -func LoadPoliciesFromRaw(b []byte) ([]common.PolicyDocument, error) { +func LoadPoliciesFromRaw(b []byte) ([]common.PolicyCertificate, error) { obj, err := common.FromJSON(b) if err != nil { return nil, err } // The returned object should be of type list. - pols, err := ToTypedSlice[common.PolicyDocument](obj) + pols, err := ToTypedSlice[common.PolicyCertificate](obj) if err != nil { return nil, err } diff --git a/pkg/util/types.go b/pkg/util/types.go index 13c3cc51..e84f2e0a 100644 --- a/pkg/util/types.go +++ b/pkg/util/types.go @@ -2,29 +2,42 @@ package util import "fmt" -// ToTypedSlice expects a slice as input and returns a slice whose elements are converted to the -// required type one by one, or error. +// ToType returns the passed object as the specified type, or error. +func ToType[T any](obj any) (T, error) { + if o, ok := obj.(T); ok { + return o, nil + } + return *new(T), fmt.Errorf("cannot convert from %T into %T", obj, *new(T)) +} + +// ToTypedSlice expects a slice (or error is returned). It returs a slice containing all elements +// of the slice converted to the requested type. If not all elements were convertible, an error is +// returned. func ToTypedSlice[T any](obj any) ([]T, error) { - s, ok := obj.([]any) - if !ok { - return nil, fmt.Errorf("the content is of type %T instead of []any", obj) + s, err := ToType[[]any](obj) + if err != nil { + return nil, err } - t := make([]T, len(s)) - for i, e := range s { - if te, ok := e.(T); ok { - t[i] = te - } else { - return nil, fmt.Errorf("element at %d of type %T cannot be converted to %T", - i, e, *new(T)) - } + t, a := SliceToTypedSlice[T](s) + if len(a) > 0 { + return nil, fmt.Errorf("not all elements were convertible to %T. At least one of %T is found", + *new(T), a[0]) } return t, nil } -// ToType returns the passed object as the specified type, or error. -func ToType[T any](obj any) (T, error) { - if o, ok := obj.(T); ok { - return o, nil +// ToTypedSlice expects a slice as input and returns a slice whose elements are converted to the +// required type one by one, and another slice with the remaining elements that couldn't be +// converted. +func SliceToTypedSlice[T any](s []any) ([]T, []any) { + filtered := make([]T, 0, len(s)) + remaining := make([]any, 0, len(s)) + for _, e := range s { + if te, ok := e.(T); ok { + filtered = append(filtered, te) + } else { + remaining = append(remaining, e) + } } - return *new(T), fmt.Errorf("cannot convert from %T into %T", obj, *new(T)) + return filtered, remaining } diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index bd8571f2..0499d8e7 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -19,9 +19,11 @@ func TestToTypedSlice(t *testing.T) { // slice of *common.RPC { orig := []*common.RPC{ - {Version: 1}, - {Version: 2}, + {}, + {}, } + orig[0].RawVersion = 1 + orig[1].RawVersion = 2 s := make([]any, len(orig)) for i, e := range orig { s[i] = e @@ -37,7 +39,7 @@ func TestToType(t *testing.T) { { orig := &common.RPC{} orig.RawSubject = "a.com" - orig.Version = 1 + orig.RawVersion = 1 e := any(orig) r, err := ToType[*common.RPC](e) require.NoError(t, err) From fb065b50feedd419a8cf6dcbf03d058b34cade00 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 19 Jun 2023 19:16:10 +0200 Subject: [PATCH 161/187] Make Issuer common to all. --- pkg/common/embedded_policies.go | 35 +++--------------------- pkg/common/policies.go | 41 ++++++++++++++++++++++------- pkg/common/policy_common.go | 10 ++++--- pkg/logverifier/logverifier_test.go | 6 ++--- pkg/pca/pca.go | 4 +-- pkg/tests/random/random.go | 2 +- 6 files changed, 48 insertions(+), 50 deletions(-) diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index d72e3823..ed63c0cc 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -9,21 +9,13 @@ type EmbeddedPolicyBase struct { PolicyPartBase } -func (p EmbeddedPolicyBase) Equal(o EmbeddedPolicyBase) bool { - return p.PolicyPartBase.Equal(o.PolicyPartBase) -} - -// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. -type DomainPolicy struct { - EmbeddedPolicyBase - TrustedCA []string `json:",omitempty"` - AllowedSubdomains []string `json:",omitempty"` +func (p EmbeddedPolicyBase) Equal(x EmbeddedPolicyBase) bool { + return p.PolicyPartBase.Equal(x.PolicyPartBase) } // SPT is a signed policy timestamp. type SPT struct { EmbeddedPolicyBase - CAName string `json:",omitempty"` LogID int `json:",omitempty"` CertType uint8 `json:",omitempty"` AddedTS time.Time `json:",omitempty"` @@ -39,16 +31,10 @@ type SPRT struct { Reason int `json:",omitempty"` } -func (s DomainPolicy) Equal(o DomainPolicy) bool { - return s.EmbeddedPolicyBase.Equal(o.EmbeddedPolicyBase) && - equalStringSlices(s.TrustedCA, o.TrustedCA) && - equalStringSlices(s.AllowedSubdomains, o.AllowedSubdomains) -} - func NewSPT( Subject string, Version int, - CAName string, + issuer string, LogID int, CertType uint8, AddedTS time.Time, @@ -62,9 +48,9 @@ func NewSPT( EmbeddedPolicyBase: EmbeddedPolicyBase{ PolicyPartBase: PolicyPartBase{ RawVersion: Version, + RawIssuer: issuer, }, }, - CAName: CAName, LogID: LogID, CertType: CertType, AddedTS: AddedTS, @@ -77,7 +63,6 @@ func NewSPT( func (s SPT) Equal(x SPT) bool { return s.EmbeddedPolicyBase.Equal(x.EmbeddedPolicyBase) && - s.CAName == x.CAName && s.LogID == x.LogID && s.CertType == x.CertType && s.AddedTS.Equal(x.AddedTS) && @@ -98,15 +83,3 @@ func (sprt SPRT) Equal(x SPRT) bool { return sprt.SPT.Equal(x.SPT) && sprt.Reason == x.Reason } - -func equalStringSlices(a, b []string) bool { - if len(a) != len(b) { - return false - } - for i := range a { - if a[i] != b[i] { - return false - } - } - return true -} diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 257b2e05..4dc64722 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -31,7 +31,6 @@ type RPC struct { PublicKey []byte `json:",omitempty"` NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` - CAName string `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` PRCSignature []byte `json:",omitempty"` @@ -44,13 +43,18 @@ type SP struct { PolicyCertificateBase Policies DomainPolicy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - CAName string `json:",omitempty"` SerialNumber int `json:",omitempty"` CASignature []byte `json:",omitempty"` RootCertSignature []byte `json:",omitempty"` SPTs []SPT `json:",omitempty"` } +// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. +type DomainPolicy struct { + TrustedCA []string `json:",omitempty"` + AllowedSubdomains []string `json:",omitempty"` +} + // PCRevocation is for now empty. type PCRevocation struct { PolicyCertificateBase @@ -65,7 +69,7 @@ func NewRPC( PublicKey []byte, NotBefore time.Time, NotAfter time.Time, - CAName string, + issuer string, SignatureAlgorithm SignatureAlgorithm, TimeStamp time.Time, PRCSignature []byte, @@ -77,6 +81,7 @@ func NewRPC( PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ RawVersion: Version, + RawIssuer: issuer, }, RawSubject: Subject, }, @@ -85,7 +90,6 @@ func NewRPC( PublicKey: PublicKey, NotBefore: NotBefore, NotAfter: NotAfter, - CAName: CAName, SignatureAlgorithm: SignatureAlgorithm, TimeStamp: TimeStamp, PRCSignature: PRCSignature, @@ -101,7 +105,6 @@ func (rpc RPC) Equal(x RPC) bool { bytes.Equal(rpc.PublicKey, x.PublicKey) && rpc.NotBefore.Equal(x.NotBefore) && rpc.NotAfter.Equal(x.NotAfter) && - rpc.CAName == x.CAName && rpc.SignatureAlgorithm == x.SignatureAlgorithm && rpc.TimeStamp.Equal(x.TimeStamp) && bytes.Equal(rpc.PRCSignature, x.PRCSignature) && @@ -113,7 +116,7 @@ func NewSP( Subject string, Policy DomainPolicy, TimeStamp time.Time, - CAName string, + issuer string, SerialNumber int, CASignature []byte, RootCertSignature []byte, @@ -122,12 +125,13 @@ func NewSP( return &SP{ PolicyCertificateBase: PolicyCertificateBase{ - PolicyPartBase: PolicyPartBase{}, - RawSubject: Subject, + PolicyPartBase: PolicyPartBase{ + RawIssuer: issuer, + }, + RawSubject: Subject, }, Policies: Policy, TimeStamp: TimeStamp, - CAName: CAName, SerialNumber: SerialNumber, CASignature: CASignature, RootCertSignature: RootCertSignature, @@ -138,7 +142,6 @@ func NewSP( func (s SP) Equal(o SP) bool { return s.PolicyCertificateBase.Equal(o.PolicyCertificateBase) && s.TimeStamp.Equal(o.TimeStamp) && - s.CAName == o.CAName && s.SerialNumber == o.SerialNumber && bytes.Equal(s.CASignature, o.CASignature) && bytes.Equal(s.RootCertSignature, o.RootCertSignature) && @@ -146,6 +149,12 @@ func (s SP) Equal(o SP) bool { equalSPTs(s.SPTs, o.SPTs) } +func (s DomainPolicy) Equal(o DomainPolicy) bool { + return true && + equalStringSlices(s.TrustedCA, o.TrustedCA) && + equalStringSlices(s.AllowedSubdomains, o.AllowedSubdomains) +} + func NewPCRevocation(subject string) *PCRevocation { return &PCRevocation{ PolicyCertificateBase: PolicyCertificateBase{ @@ -165,3 +174,15 @@ func equalSPTs(a, b []SPT) bool { } return true } + +func equalStringSlices(a, b []string) bool { + if len(a) != len(b) { + return false + } + for i := range a { + if a[i] != b[i] { + return false + } + } + return true +} diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index 0ab4ee31..60fd173f 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -11,18 +11,22 @@ type MarshallableDocument interface { type PolicyPart interface { MarshallableDocument Version() int + Issuer() string } // PolicyPartBase is the common type to all policy documents. type PolicyPartBase struct { RawJSON []byte `json:"-"` // omit from JSON (un)marshaling RawVersion int `json:"Version,omitempty"` + RawIssuer string `json:"Issuer,omitempty"` } -func (o PolicyPartBase) Raw() []byte { return o.RawJSON } -func (o PolicyPartBase) Version() int { return o.RawVersion } +func (o PolicyPartBase) Raw() []byte { return o.RawJSON } +func (o PolicyPartBase) Version() int { return o.RawVersion } +func (o PolicyPartBase) Issuer() string { return o.RawIssuer } func (o PolicyPartBase) Equal(x PolicyPartBase) bool { // Ignore the RawJSON component, use just the regular fields. - return o.RawVersion == x.RawVersion + return o.RawVersion == x.RawVersion && + o.RawIssuer == x.RawIssuer } diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 5c74188f..14113b3b 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -28,7 +28,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "Rv16YWSSJWdqqQTccjyPPBSjyJCxiTN8XU0APhbtqFE="), + RootHash: tests.MustDecodeBase64(t, "m5Lwb1nDco/+mrAdAQnue4WIne67qRACok/ESYmCsZ8="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -84,7 +84,7 @@ func TestCheckRPC(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "OhHDUl3nXT2aBWQcY/ZDULXFXlZHNhLtt0qewB1pMao="), + RootHash: tests.MustDecodeBase64(t, "0ePUVBOu4WOgAo1pW+JMUCGUVUWaK/6C7JqLJt9XWk4="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -125,7 +125,7 @@ func TestCheckSP(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "p/zmpyI3xc064LO9NvXi99BqQoCQPO7GeMgzrBlAUKM="), + RootHash: tests.MustDecodeBase64(t, "iQ+VNmSKh0lxvWHqQ/lqUB/17DkP/67rW9yAwhtMiwg="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 1d472641..2d8ddc27 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -129,14 +129,14 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { func (pca *PCA) OutputRPCAndSP() error { for domain, rpc := range pca.validRPCsByDomains { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"rpc") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer()+"_"+"rpc") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } } for domain, rpc := range pca.validSPsByDomains { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.CAName+"_"+"sp") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer()+"_"+"sp") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 07f29241..7967f968 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -37,7 +37,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic // Create one RPC and one SP for that name. rpc := RandomRPC(t) rpc.RawSubject = domainName - rpc.CAName = "c0.com" + rpc.RawIssuer = "c0.com" data, err := common.ToJSON(rpc) require.NoError(t, err) From 205902c6aa01d755bc516812a3fe4b7ff0911f71 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 19 Jun 2023 19:19:42 +0200 Subject: [PATCH 162/187] Simplify policy interfaces, base objects. --- pkg/common/crypto/crypto.go | 2 +- pkg/common/embedded_policies.go | 4 ++-- pkg/common/policies.go | 6 +++--- pkg/common/policy_common.go | 16 ++++++---------- pkg/common/policy_issuance.go | 2 +- pkg/pca/pca.go | 4 ++-- pkg/tests/random/random.go | 2 +- pkg/util/types_test.go | 6 +++--- 8 files changed, 19 insertions(+), 23 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 3aeafb6d..89d2b338 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -114,7 +114,7 @@ func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, rpc := common.NewRPC( rcsr.RawSubject, serialNumber, - rcsr.Version(), + rcsr.Version, rcsr.PublicKeyAlgorithm, rcsr.PublicKey, notBefore, diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index ed63c0cc..24b67791 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -47,8 +47,8 @@ func NewSPT( return &SPT{ EmbeddedPolicyBase: EmbeddedPolicyBase{ PolicyPartBase: PolicyPartBase{ - RawVersion: Version, - RawIssuer: issuer, + Version: Version, + Issuer: issuer, }, }, LogID: LogID, diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 4dc64722..f8434129 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -80,8 +80,8 @@ func NewRPC( return &RPC{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ - RawVersion: Version, - RawIssuer: issuer, + Version: Version, + Issuer: issuer, }, RawSubject: Subject, }, @@ -126,7 +126,7 @@ func NewSP( return &SP{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ - RawIssuer: issuer, + Issuer: issuer, }, RawSubject: Subject, }, diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index 60fd173f..753f2d28 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -10,23 +10,19 @@ type MarshallableDocument interface { // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. type PolicyPart interface { MarshallableDocument - Version() int - Issuer() string } // PolicyPartBase is the common type to all policy documents. type PolicyPartBase struct { - RawJSON []byte `json:"-"` // omit from JSON (un)marshaling - RawVersion int `json:"Version,omitempty"` - RawIssuer string `json:"Issuer,omitempty"` + RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + Version int `json:",omitempty"` + Issuer string `json:",omitempty"` } -func (o PolicyPartBase) Raw() []byte { return o.RawJSON } -func (o PolicyPartBase) Version() int { return o.RawVersion } -func (o PolicyPartBase) Issuer() string { return o.RawIssuer } +func (o PolicyPartBase) Raw() []byte { return o.RawJSON } func (o PolicyPartBase) Equal(x PolicyPartBase) bool { // Ignore the RawJSON component, use just the regular fields. - return o.RawVersion == x.RawVersion && - o.RawIssuer == x.RawIssuer + return o.Version == x.Version && + o.Issuer == x.Issuer } diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 8b05aec0..eba9546c 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -54,7 +54,7 @@ func NewRCSR( return &RCSR{ PolicyIssuerBase: PolicyIssuerBase{ PolicyPartBase: PolicyPartBase{ - RawVersion: Version, + Version: Version, }, RawSubject: Subject, }, diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 2d8ddc27..548488dc 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -129,14 +129,14 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { func (pca *PCA) OutputRPCAndSP() error { for domain, rpc := range pca.validRPCsByDomains { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer()+"_"+"rpc") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer+"_"+"rpc") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } } for domain, rpc := range pca.validSPsByDomains { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer()+"_"+"sp") + err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer+"_"+"sp") if err != nil { return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 7967f968..270b7370 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -37,7 +37,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic // Create one RPC and one SP for that name. rpc := RandomRPC(t) rpc.RawSubject = domainName - rpc.RawIssuer = "c0.com" + rpc.Issuer = "c0.com" data, err := common.ToJSON(rpc) require.NoError(t, err) diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index 0499d8e7..51e54a4b 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -22,8 +22,8 @@ func TestToTypedSlice(t *testing.T) { {}, {}, } - orig[0].RawVersion = 1 - orig[1].RawVersion = 2 + orig[0].Version = 1 + orig[1].Version = 2 s := make([]any, len(orig)) for i, e := range orig { s[i] = e @@ -39,7 +39,7 @@ func TestToType(t *testing.T) { { orig := &common.RPC{} orig.RawSubject = "a.com" - orig.RawVersion = 1 + orig.Version = 1 e := any(orig) r, err := ToType[*common.RPC](e) require.NoError(t, err) From 03d6719b00f7ee9f9039b148b168ba5caf5105f5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 20 Jun 2023 09:18:51 +0200 Subject: [PATCH 163/187] Added a boolean field in RPC to denote CA or not. --- pkg/common/policies.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index f8434129..d1a171f5 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -26,6 +26,7 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { // RPC is a Root Policy Certificate. type RPC struct { PolicyCertificateBase + IsCA bool `json:",omitempty"` SerialNumber int `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` From 0aefccaec865140a01bde80e119449279a46a4af Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 20 Jun 2023 09:26:42 +0200 Subject: [PATCH 164/187] Moved SerialNumber to more general. --- pkg/common/policies.go | 26 +++++++++++++------------- pkg/common/policy_issuance.go | 13 ++++++++----- pkg/logverifier/logverifier_test.go | 2 +- pkg/pca/sign_and_log.go | 2 +- 4 files changed, 23 insertions(+), 20 deletions(-) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index d1a171f5..79753bd2 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -10,24 +10,27 @@ import ( type PolicyCertificate interface { PolicyPart Subject() string + SerialNumber() int } type PolicyCertificateBase struct { PolicyPartBase - RawSubject string `json:"Subject,omitempty"` + RawSubject string `json:"Subject,omitempty"` + RawSerialNumber int `json:"SerialNumber,omitempty"` } -func (o PolicyCertificateBase) Subject() string { return o.RawSubject } +func (o PolicyCertificateBase) Subject() string { return o.RawSubject } +func (o PolicyCertificateBase) SerialNumber() int { return o.RawSerialNumber } func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { return p.PolicyPartBase.Equal(x.PolicyPartBase) && - p.RawSubject == x.RawSubject + p.RawSubject == x.RawSubject && + p.RawSerialNumber == x.RawSerialNumber } // RPC is a Root Policy Certificate. type RPC struct { PolicyCertificateBase IsCA bool `json:",omitempty"` - SerialNumber int `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` NotBefore time.Time `json:",omitempty"` @@ -44,7 +47,6 @@ type SP struct { PolicyCertificateBase Policies DomainPolicy `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - SerialNumber int `json:",omitempty"` CASignature []byte `json:",omitempty"` RootCertSignature []byte `json:",omitempty"` SPTs []SPT `json:",omitempty"` @@ -64,7 +66,7 @@ type PCRevocation struct { func NewRPC( Subject string, - SerialNumber int, + serialNumber int, Version int, PublicKeyAlgorithm PublicKeyAlgorithm, PublicKey []byte, @@ -84,9 +86,9 @@ func NewRPC( Version: Version, Issuer: issuer, }, - RawSubject: Subject, + RawSubject: Subject, + RawSerialNumber: serialNumber, }, - SerialNumber: SerialNumber, PublicKeyAlgorithm: PublicKeyAlgorithm, PublicKey: PublicKey, NotBefore: NotBefore, @@ -101,7 +103,6 @@ func NewRPC( func (rpc RPC) Equal(x RPC) bool { return rpc.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && - rpc.SerialNumber == x.SerialNumber && rpc.PublicKeyAlgorithm == x.PublicKeyAlgorithm && bytes.Equal(rpc.PublicKey, x.PublicKey) && rpc.NotBefore.Equal(x.NotBefore) && @@ -118,7 +119,7 @@ func NewSP( Policy DomainPolicy, TimeStamp time.Time, issuer string, - SerialNumber int, + serialNumber int, CASignature []byte, RootCertSignature []byte, SPTs []SPT, @@ -129,11 +130,11 @@ func NewSP( PolicyPartBase: PolicyPartBase{ Issuer: issuer, }, - RawSubject: Subject, + RawSubject: Subject, + RawSerialNumber: serialNumber, }, Policies: Policy, TimeStamp: TimeStamp, - SerialNumber: SerialNumber, CASignature: CASignature, RootCertSignature: RootCertSignature, SPTs: SPTs, @@ -143,7 +144,6 @@ func NewSP( func (s SP) Equal(o SP) bool { return s.PolicyCertificateBase.Equal(o.PolicyCertificateBase) && s.TimeStamp.Equal(o.TimeStamp) && - s.SerialNumber == o.SerialNumber && bytes.Equal(s.CASignature, o.CASignature) && bytes.Equal(s.RootCertSignature, o.RootCertSignature) && s.Policies.Equal(o.Policies) && diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index eba9546c..670d2a5a 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -8,17 +8,20 @@ import ( type PolicyIssuer interface { PolicyPart Subject() string + SerialNumber() int } type PolicyIssuerBase struct { PolicyPartBase - RawSubject string `json:"Subject,omitempty"` + RawSubject string `json:"Subject,omitempty"` + RawSerialNumber int `json:"SerialNumber,omitempty"` } -func (c PolicyIssuerBase) Subject() string { return c.RawSubject } -func (c PolicyIssuerBase) Equal(x PolicyIssuerBase) bool { - return c.PolicyPartBase.Equal(x.PolicyPartBase) && - c.RawSubject == x.RawSubject +func (p PolicyIssuerBase) Subject() string { return p.RawSubject } +func (p PolicyIssuerBase) Equal(x PolicyIssuerBase) bool { + return p.PolicyPartBase.Equal(x.PolicyPartBase) && + p.RawSubject == x.RawSubject && + p.RawSerialNumber == x.RawSerialNumber } // RCSR is a root certificate signing request. diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 14113b3b..bf2cd024 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -125,7 +125,7 @@ func TestCheckSP(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "iQ+VNmSKh0lxvWHqQ/lqUB/17DkP/67rW9yAwhtMiwg="), + RootHash: tests.MustDecodeBase64(t, "SqfdrDwpR1nlUZ/MGvC0qKH48CYcAHRlBspg6l/G060="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index c2b147d1..1493c24f 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -64,7 +64,7 @@ func (pca *PCA) SignAndLogSP(psr *common.PSR) error { pca.preSPByDomains[spHash] = sp - err = pca.sendSPToPolicyLog(sp, strconv.Itoa(sp.SerialNumber)) + err = pca.sendSPToPolicyLog(sp, strconv.Itoa(sp.SerialNumber())) if err != nil { return fmt.Errorf("SignAndLogPSR | sendSPToPolicyLog | %w", err) } From 45eb1dcfae7d391da8bc7bfe509ef9cbcc4cb5d5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 21 Jun 2023 09:24:11 +0200 Subject: [PATCH 165/187] Rename IsCA to IsIssuer. --- pkg/common/policies.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 79753bd2..aa3852f7 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -30,7 +30,7 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { // RPC is a Root Policy Certificate. type RPC struct { PolicyCertificateBase - IsCA bool `json:",omitempty"` + IsIssuer bool `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` PublicKey []byte `json:",omitempty"` NotBefore time.Time `json:",omitempty"` From 8109ca667359ed451b38327a952e5afc591dad95 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Wed, 21 Jun 2023 16:41:36 +0200 Subject: [PATCH 166/187] Rename RPC to PolicyCertificate. --- cmd/ingest/certProcessor.go | 2 +- pkg/common/crypto/crypto.go | 11 +-- pkg/common/crypto/crypto_test.go | 2 +- pkg/common/json.go | 8 +- pkg/common/json_test.go | 4 +- pkg/common/policies.go | 77 +++++++++++-------- pkg/common/policy_issuance.go | 8 +- pkg/db/mysql/mysql_test.go | 2 +- pkg/domainowner/domainowner.go | 2 +- pkg/logverifier/verifier.go | 2 +- pkg/mapserver/common/tools.go | 2 +- pkg/mapserver/logfetcher/logfetcher.go | 8 +- pkg/mapserver/updater/updater.go | 8 +- pkg/mapserver/updater/updater_test.go | 4 +- pkg/pca/pca.go | 12 +-- pkg/pca/sign_and_log.go | 2 +- pkg/tests/random/random.go | 15 ++-- pkg/util/io.go | 4 +- pkg/util/types_test.go | 8 +- .../main.go | 4 +- 20 files changed, 98 insertions(+), 87 deletions(-) diff --git a/cmd/ingest/certProcessor.go b/cmd/ingest/certProcessor.go index 946d2bb4..531b89db 100644 --- a/cmd/ingest/certProcessor.go +++ b/cmd/ingest/certProcessor.go @@ -79,7 +79,7 @@ const ( type UpdateCertificateFunction func(context.Context, db.Conn, [][]string, []*common.SHA256Output, []*common.SHA256Output, []*ctx509.Certificate, []*time.Time, - []common.PolicyCertificate) error + []common.PolicyDocument) error func NewCertProcessor(conn db.Conn, incoming chan *CertificateNode, strategy CertificateUpdateStrategy) *CertificateProcessor { diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 89d2b338..fe4db464 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -84,7 +84,7 @@ func RCSRVerifySignature(rcsr *common.RCSR) error { } // RCSRVerifyRPCSignature: verify the RCSR using RPC; verify the RPC signature -func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.RPC) error { +func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.PolicyCertificate) error { // Serialize without signature: sig := rcsr.Signature rcsr.Signature = nil @@ -109,10 +109,11 @@ func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.RPC) error { // RCSRGenerateRPC: called by PCA. Sign the RCSR and generate RPC; SPT field is (should be) empty func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, - caPrivKey *rsa.PrivateKey, caName string) (*common.RPC, error) { + caPrivKey *rsa.PrivateKey, caName string) (*common.PolicyCertificate, error) { - rpc := common.NewRPC( + rpc := common.NewPolicyCertificate( rcsr.RawSubject, + nil, // policy attributes serialNumber, rcsr.Version, rcsr.PublicKeyAlgorithm, @@ -141,7 +142,7 @@ func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, // ---------------------------------------------------------------------------------- // RPCVerifyCASignature: used by domain owner, check whether CA signature is correct -func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *common.RPC) error { +func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *common.PolicyCertificate) error { pubKey := caCert.PublicKey.(*rsa.PublicKey) // Serialize without CA signature or SPTs: @@ -176,7 +177,7 @@ func DomainOwnerSignPSR(domainOwnerPrivKey *rsa.PrivateKey, psr *common.PSR) err return nil } -func VerifyPSRUsingRPC(psr *common.PSR, rpc *common.RPC) error { +func VerifyPSRUsingRPC(psr *common.PSR, rpc *common.PolicyCertificate) error { // Serialize without signature: sig := psr.RootCertSignature psr.RootCertSignature = nil diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index a4516786..b045acbc 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -129,7 +129,7 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- psr := common.NewPSR( "test_SP", - common.DomainPolicy{}, + common.PolicyAttributes{}, time.Now(), nil, ) diff --git a/pkg/common/json.go b/pkg/common/json.go index d79ac16b..09256989 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -63,7 +63,7 @@ func (*serializableObjectBase) marshalJSON(obj any) (string, []byte, error) { switch obj.(type) { case RCSR: T = "rcsr" - case RPC: + case PolicyCertificate: T = "rpc" case PCRevocation: T = "rev" @@ -196,7 +196,7 @@ func (o *serializableObjectBase) unmarshalTypeObject(T string, data []byte) (boo case "rcsr": obj, err = inflateObj[RCSR](data) case "rpc": - obj, err = inflateObj[RPC](data) + obj, err = inflateObj[PolicyCertificate](data) case "rev": obj, err = inflateObj[PCRevocation](data) case "sp": @@ -249,13 +249,13 @@ func FromJSONFile(filePath string) (any, error) { } // JsonFileToRPC: read json files and unmarshal it to Root Policy Certificate -func JsonFileToRPC(filePath string) (*RPC, error) { +func JsonFileToRPC(filePath string) (*PolicyCertificate, error) { po, err := FromJSONFile(filePath) if err != nil { return nil, fmt.Errorf("JsonFileToRPC | Unmarshal | %w", err) } - o, ok := po.(*RPC) + o, ok := po.(*PolicyCertificate) if !ok { return nil, fmt.Errorf("JsonFileToRPC | object is %T", po) } diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 98a119d0..436d4992 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -101,7 +101,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { obj: random.RandomRPC(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - rpc := obj.(*common.RPC) + rpc := obj.(*common.PolicyCertificate) return [][]byte{rpc.RawJSON} }, }, @@ -131,7 +131,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { l := obj.([]any) return [][]byte{ l[0].(*common.SP).RawJSON, - l[1].(*common.RPC).RawJSON, + l[1].(*common.PolicyCertificate).RawJSON, } }, }, diff --git a/pkg/common/policies.go b/pkg/common/policies.go index aa3852f7..1db59e85 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -5,9 +5,9 @@ import ( "time" ) -// PolicyCertificate is any policy document that can be exchanged among mapservers, CT log servers, +// PolicyDocument is any policy document that can be exchanged among mapservers, CT log servers, // and others. -type PolicyCertificate interface { +type PolicyDocument interface { PolicyPart Subject() string SerialNumber() int @@ -27,8 +27,8 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { p.RawSerialNumber == x.RawSerialNumber } -// RPC is a Root Policy Certificate. -type RPC struct { +// PolicyCertificate is a Root Policy Certificate. +type PolicyCertificate struct { PolicyCertificateBase IsIssuer bool `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` @@ -39,21 +39,22 @@ type RPC struct { TimeStamp time.Time `json:",omitempty"` PRCSignature []byte `json:",omitempty"` CASignature []byte `json:",omitempty"` + PolicyAttributes []PolicyAttributes `json:",omitempty"` SPTs []SPT `json:",omitempty"` } // SP is a Signed Policy. type SP struct { PolicyCertificateBase - Policies DomainPolicy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - CASignature []byte `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` - SPTs []SPT `json:",omitempty"` + Policies PolicyAttributes `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + CASignature []byte `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` + SPTs []SPT `json:",omitempty"` } -// DomainPolicy is a domain policy that specifies what is or not acceptable for a domain. -type DomainPolicy struct { +// PolicyAttributes is a domain policy that specifies what is or not acceptable for a domain. +type PolicyAttributes struct { TrustedCA []string `json:",omitempty"` AllowedSubdomains []string `json:",omitempty"` } @@ -64,8 +65,9 @@ type PCRevocation struct { // TODO(juagargi) define the revocation. } -func NewRPC( +func NewPolicyCertificate( Subject string, + policyAttributes []PolicyAttributes, serialNumber int, Version int, PublicKeyAlgorithm PublicKeyAlgorithm, @@ -78,9 +80,9 @@ func NewRPC( PRCSignature []byte, CASignature []byte, SPTs []SPT, -) *RPC { +) *PolicyCertificate { - return &RPC{ + return &PolicyCertificate{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ Version: Version, @@ -101,22 +103,9 @@ func NewRPC( } } -func (rpc RPC) Equal(x RPC) bool { - return rpc.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && - rpc.PublicKeyAlgorithm == x.PublicKeyAlgorithm && - bytes.Equal(rpc.PublicKey, x.PublicKey) && - rpc.NotBefore.Equal(x.NotBefore) && - rpc.NotAfter.Equal(x.NotAfter) && - rpc.SignatureAlgorithm == x.SignatureAlgorithm && - rpc.TimeStamp.Equal(x.TimeStamp) && - bytes.Equal(rpc.PRCSignature, x.PRCSignature) && - bytes.Equal(rpc.CASignature, x.CASignature) && - equalSPTs(rpc.SPTs, x.SPTs) -} - func NewSP( Subject string, - Policy DomainPolicy, + Policy PolicyAttributes, TimeStamp time.Time, issuer string, serialNumber int, @@ -141,16 +130,30 @@ func NewSP( } } +func (c PolicyCertificate) Equal(x PolicyCertificate) bool { + return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && + c.PublicKeyAlgorithm == x.PublicKeyAlgorithm && + bytes.Equal(c.PublicKey, x.PublicKey) && + c.NotBefore.Equal(x.NotBefore) && + c.NotAfter.Equal(x.NotAfter) && + c.SignatureAlgorithm == x.SignatureAlgorithm && + c.TimeStamp.Equal(x.TimeStamp) && + bytes.Equal(c.PRCSignature, x.PRCSignature) && + bytes.Equal(c.CASignature, x.CASignature) && + equalSlices(c.SPTs, x.SPTs) && + equalSlices(c.PolicyAttributes, x.PolicyAttributes) +} + func (s SP) Equal(o SP) bool { return s.PolicyCertificateBase.Equal(o.PolicyCertificateBase) && s.TimeStamp.Equal(o.TimeStamp) && bytes.Equal(s.CASignature, o.CASignature) && bytes.Equal(s.RootCertSignature, o.RootCertSignature) && s.Policies.Equal(o.Policies) && - equalSPTs(s.SPTs, o.SPTs) + equalSlices(s.SPTs, o.SPTs) } -func (s DomainPolicy) Equal(o DomainPolicy) bool { +func (s PolicyAttributes) Equal(o PolicyAttributes) bool { return true && equalStringSlices(s.TrustedCA, o.TrustedCA) && equalStringSlices(s.AllowedSubdomains, o.AllowedSubdomains) @@ -164,24 +167,30 @@ func NewPCRevocation(subject string) *PCRevocation { } } -func equalSPTs(a, b []SPT) bool { +func equalStringSlices(a, b []string) bool { if len(a) != len(b) { return false } for i := range a { - if !a[i].Equal(b[i]) { + if a[i] != b[i] { return false } } return true } -func equalStringSlices(a, b []string) bool { +type Equaler[T any] interface { + Equal(T) bool +} + +// equalSlices (a,b) returns true iff the a and b slices contain exactly the same elements and in +// the same order, using `Equal` on each element to compare them. +func equalSlices[T Equaler[T]](a, b []T) bool { if len(a) != len(b) { return false } for i := range a { - if a[i] != b[i] { + if !a[i].Equal(b[i]) { return false } } diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 670d2a5a..74d1c4a2 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -38,9 +38,9 @@ type RCSR struct { // PSR is a Policy Signing Request. type PSR struct { PolicyIssuerBase - Policy DomainPolicy `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` + Policy PolicyAttributes `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + RootCertSignature []byte `json:",omitempty"` } func NewRCSR( @@ -82,7 +82,7 @@ func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { func NewPSR( Subject string, - Policy DomainPolicy, + Policy PolicyAttributes, TimeStamp time.Time, RootCertSignature []byte, ) *PSR { diff --git a/pkg/db/mysql/mysql_test.go b/pkg/db/mysql/mysql_test.go index 124077a1..fdb97e95 100644 --- a/pkg/db/mysql/mysql_test.go +++ b/pkg/db/mysql/mysql_test.go @@ -218,7 +218,7 @@ func testCertHierarchyForLeafs(t tests.T, leaves []string) (certs []*ctx509.Cert // testPolicyHierarchyForLeafs returns simply a policy hierarchy per leaf name, created using // the function BuildTestRandomPolicyHierarchy. -func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyCertificate, +func testPolicyHierarchyForLeafs(t tests.T, leaves []string) (pols []common.PolicyDocument, polIDs []*common.SHA256Output) { for _, name := range leaves { diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 5467ad1e..105ea9f8 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -72,7 +72,7 @@ func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCS } // GeneratePSR: generate one psr for one specific domain. -func (do *DomainOwner) GeneratePSR(domainName string, policy common.DomainPolicy) (*common.PSR, error) { +func (do *DomainOwner) GeneratePSR(domainName string, policy common.PolicyAttributes) (*common.PSR, error) { rpcKeyPair, ok := do.privKeyByDomainName[domainName] if !ok { return nil, fmt.Errorf("GeneratePSR | No valid RPC for domain %s", domainName) diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 637e2600..ffdcb326 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -130,7 +130,7 @@ func (v *LogVerifier) VerifySP(sp *common.SP) error { return nil } -func (v *LogVerifier) VerifyRPC(rpc *common.RPC) error { +func (v *LogVerifier) VerifyRPC(rpc *common.PolicyCertificate) error { // Get the hash of the RPC without SPTs: SPTs := rpc.SPTs rpc.SPTs = []common.SPT{} diff --git a/pkg/mapserver/common/tools.go b/pkg/mapserver/common/tools.go index e8390267..8a097099 100644 --- a/pkg/mapserver/common/tools.go +++ b/pkg/mapserver/common/tools.go @@ -109,7 +109,7 @@ func (domainEntry *DomainEntry) AddPC(pc *common.SP) bool { } // AddRPC: add a Root Policy Certificate to a domain entry. Return whether the domain entry is updated. -func (domainEntry *DomainEntry) AddRPC(rpc *common.RPC) bool { +func (domainEntry *DomainEntry) AddRPC(rpc *common.PolicyCertificate) bool { // caName := rpc.CAName // isFound := false diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index a5385a29..59fa1206 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -306,9 +306,9 @@ func (f *LogFetcher) getRawEntries( // GetPCAndRPCs: get PC and RPC from url // TODO(yongzhe): currently just generate random PC and RPC using top 1k domain names -func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker int) ([]*common.SP, []*common.RPC, error) { +func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker int) ([]*common.SP, []*common.PolicyCertificate, error) { resultPCs := make([]*common.SP, 0) - resultRPCs := make([]*common.RPC, 0) + resultRPCs := make([]*common.PolicyCertificate, 0) f, err := os.Open(ctURL) if err != nil { @@ -328,7 +328,7 @@ func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker in resultPCs = append(resultPCs, common.NewSP( domainName, - common.DomainPolicy{}, + common.PolicyAttributes{}, time.Now(), "", // CA name 0, // serial number @@ -337,7 +337,7 @@ func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker in nil, // SPTs )) - rpc := &common.RPC{} + rpc := &common.PolicyCertificate{} rpc.RawSubject = domainName rpc.NotBefore = time.Now() resultRPCs = append(resultRPCs, rpc) diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index fd0a2d25..f40d8e51 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -106,7 +106,7 @@ func (mapUpdater *MapUpdater) UpdateRPCAndPC(ctx context.Context, ctUrl string, } // UpdateRPCAndPCLocally: update RPC and PC, given a rpc and sp. Currently just mock PC and RPC -func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList []*common.SP, rpcList []*common.RPC) error { +func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList []*common.SP, rpcList []*common.PolicyCertificate) error { return mapUpdater.updateRPCAndPC(ctx, spList, rpcList) } @@ -123,7 +123,7 @@ func (mapUpdater *MapUpdater) updateCerts( func (mapUpdater *MapUpdater) updateRPCAndPC( ctx context.Context, sps []*common.SP, - rpcs []*common.RPC, + rpcs []*common.PolicyCertificate, ) error { // TODO(juagargi) @@ -133,7 +133,7 @@ func (mapUpdater *MapUpdater) updateRPCAndPC( func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyCertificate, + policies []common.PolicyDocument, ) error { // Insert all specified certificates. @@ -165,7 +165,7 @@ func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]stri func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]string, certIDs, parentCertIDs []*common.SHA256Output, certs []*ctx509.Certificate, certExpirations []*time.Time, - policies []common.PolicyCertificate, + policies []common.PolicyDocument, ) error { // First check which certificates are already present in the DB. diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 0208f9a2..79450648 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -95,7 +95,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { } // Check policy coalescing. - policiesPerName := make(map[string][]common.PolicyCertificate, len(pols)) + policiesPerName := make(map[string][]common.PolicyDocument, len(pols)) for _, pol := range pols { policiesPerName[pol.Subject()] = append(policiesPerName[pol.Subject()], pol) } @@ -161,7 +161,7 @@ func glueSortedIDsAndComputeItsID(IDs []*common.SHA256Output) ([]byte, *common.S return gluedIDs, &id } -func computeIDsOfPolicies(policies []common.PolicyCertificate) []*common.SHA256Output { +func computeIDsOfPolicies(policies []common.PolicyDocument) []*common.SHA256Output { set := make(map[common.SHA256Output]struct{}, len(policies)) for _, pol := range policies { id := common.SHA256Hash32Bytes(pol.Raw()) diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 548488dc..a7a86d8e 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -30,12 +30,12 @@ type PCA struct { rsaKeyPair *rsa.PrivateKey // store valid RPC (with SPT) in memory; Later replaced by data base - validRPCsByDomains map[string]*common.RPC + validRPCsByDomains map[string]*common.PolicyCertificate validSPsByDomains map[string]*common.SP // RPC without SPT; pre-certificate - preRPCByDomains map[string]*common.RPC + preRPCByDomains map[string]*common.PolicyCertificate // RPC without SPT; pre-certificate preSPByDomains map[string]*common.SP @@ -65,9 +65,9 @@ func NewPCA(configPath string) (*PCA, error) { return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) } return &PCA{ - validRPCsByDomains: make(map[string]*common.RPC), + validRPCsByDomains: make(map[string]*common.PolicyCertificate), validSPsByDomains: make(map[string]*common.SP), - preRPCByDomains: make(map[string]*common.RPC), + preRPCByDomains: make(map[string]*common.PolicyCertificate), preSPByDomains: make(map[string]*common.SP), logVerifier: logverifier.NewLogVerifier(nil), caName: config.CAName, @@ -145,7 +145,7 @@ func (pca *PCA) OutputRPCAndSP() error { } // verify the SPT of the RPC. -func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.RPC) error { +func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.PolicyCertificate) error { proofs, logRoot, err := getProofsAndLogRoot(spt) if err != nil { return fmt.Errorf("verifySPTWithRPC | parsePoIAndSTH | %w", err) @@ -195,7 +195,7 @@ func (pca *PCA) increaseSerialNumber() { pca.serialNumber = pca.serialNumber + 1 } -func (pca *PCA) ReturnValidRPC() map[string]*common.RPC { +func (pca *PCA) ReturnValidRPC() map[string]*common.PolicyCertificate { return pca.validRPCsByDomains } diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index 1493c24f..79a9a67a 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -87,7 +87,7 @@ func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { } // save file to output dir -func (pca *PCA) sendRPCToPolicyLog(rpc *common.RPC, fileName string) error { +func (pca *PCA) sendRPCToPolicyLog(rpc *common.PolicyCertificate, fileName string) error { return common.ToJSONFile(rpc, pca.policyLogExgPath+"/rpc/"+fileName) } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 270b7370..c2c31157 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -33,7 +33,7 @@ func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { } } -func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyCertificate { +func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyDocument { // Create one RPC and one SP for that name. rpc := RandomRPC(t) rpc.RawSubject = domainName @@ -45,7 +45,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic sp := common.NewSP( domainName, - common.DomainPolicy{}, + common.PolicyAttributes{}, RandomTimeWithoutMonotonic(), "c0.com", 0, // serial number @@ -58,7 +58,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic require.NoError(t, err) sp.RawJSON = data - return []common.PolicyCertificate{rpc, sp} + return []common.PolicyDocument{rpc, sp} } // BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate @@ -125,9 +125,10 @@ func RandomSPT(t tests.T) *common.SPT { ) } -func RandomRPC(t tests.T) *common.RPC { - return common.NewRPC( +func RandomRPC(t tests.T) *common.PolicyCertificate { + return common.NewPolicyCertificate( "RPC subject", + nil, // policy attributes (empty for now) rand.Intn(10), rand.Intn(10), common.RSA, @@ -150,7 +151,7 @@ func RandomSPRT(t tests.T) *common.SPRT { func RandomSP(t tests.T) *common.SP { return common.NewSP( "domainname.com", - common.DomainPolicy{ + common.PolicyAttributes{ TrustedCA: []string{"ca1", "ca2"}, }, RandomTimeWithoutMonotonic(), @@ -169,7 +170,7 @@ func RandomSP(t tests.T) *common.SP { func RandomPSR(t tests.T) *common.PSR { return common.NewPSR( "domain_name.com", - common.DomainPolicy{ + common.PolicyAttributes{ TrustedCA: []string{"one CA", "another CA"}, AllowedSubdomains: []string{"sub1.com", "sub2.com"}, }, diff --git a/pkg/util/io.go b/pkg/util/io.go index a699b9d7..f5303b53 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -159,13 +159,13 @@ func LoadCertsAndChainsFromCSV( // LoadPoliciesFromRaw can load RPCs, SPs, RCSRs, PCRevocations, SPRTs, and PSRs from their // serialized form. -func LoadPoliciesFromRaw(b []byte) ([]common.PolicyCertificate, error) { +func LoadPoliciesFromRaw(b []byte) ([]common.PolicyDocument, error) { obj, err := common.FromJSON(b) if err != nil { return nil, err } // The returned object should be of type list. - pols, err := ToTypedSlice[common.PolicyCertificate](obj) + pols, err := ToTypedSlice[common.PolicyDocument](obj) if err != nil { return nil, err } diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index 51e54a4b..d3872c3b 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -18,7 +18,7 @@ func TestToTypedSlice(t *testing.T) { // slice of *common.RPC { - orig := []*common.RPC{ + orig := []*common.PolicyCertificate{ {}, {}, } @@ -28,7 +28,7 @@ func TestToTypedSlice(t *testing.T) { for i, e := range orig { s[i] = e } - r, err := ToTypedSlice[*common.RPC](s) + r, err := ToTypedSlice[*common.PolicyCertificate](s) require.NoError(t, err) require.Equal(t, orig, r) } @@ -37,11 +37,11 @@ func TestToTypedSlice(t *testing.T) { func TestToType(t *testing.T) { // *common.RPC { - orig := &common.RPC{} + orig := &common.PolicyCertificate{} orig.RawSubject = "a.com" orig.Version = 1 e := any(orig) - r, err := ToType[*common.RPC](e) + r, err := ToType[*common.PolicyCertificate](e) require.NoError(t, err) require.Equal(t, orig, r) } diff --git a/tests/integration/domainowner_pca_policlog_interaction/main.go b/tests/integration/domainowner_pca_policlog_interaction/main.go index a5a39ed3..80f01910 100644 --- a/tests/integration/domainowner_pca_policlog_interaction/main.go +++ b/tests/integration/domainowner_pca_policlog_interaction/main.go @@ -127,11 +127,11 @@ func main() { logErrAndQuit(fmt.Errorf("rpcs num error")) } - policy1 := common.DomainPolicy{ + policy1 := common.PolicyAttributes{ TrustedCA: []string{"swiss CA"}, } - policy2 := common.DomainPolicy{ + policy2 := common.PolicyAttributes{ TrustedCA: []string{"US CA"}, } From b0f138dae803598b0dd290b59897c10dc7c684a0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 22 Jun 2023 14:38:18 +0200 Subject: [PATCH 167/187] Rename and refactor policy objects. Use just one PolicyCertificate instead of RPC and SP. Use PolicyCertificateSigningRequest instead of RCSR and PSR. Refactor New* functions, to create the internal types using internal New* functions. Same for Equal functions. --- pkg/common/crypto/crypto.go | 215 +++++------------ pkg/common/crypto/crypto_test.go | 100 ++------ pkg/common/embedded_policies.go | 130 +++++++--- pkg/common/json.go | 68 ++---- pkg/common/json_test.go | 55 ++--- pkg/common/policies.go | 227 +++++++++++------- pkg/common/policies_test.go | 32 +-- pkg/common/policy_issuance.go | 131 ++++------ pkg/domainowner/domainowner.go | 69 +++--- pkg/logverifier/logverifier_test.go | 64 +---- pkg/logverifier/verifier.go | 35 +-- pkg/mapserver/common/tools.go | 31 --- pkg/mapserver/logfetcher/logfetcher.go | 45 ++-- pkg/mapserver/updater/updater.go | 16 +- pkg/mapserver/updater/updater_test.go | 6 +- pkg/pca/pca.go | 107 ++------- pkg/pca/sign_and_log.go | 32 +-- pkg/policylog/client/logclient.go | 113 +-------- pkg/tests/random/random.go | 117 +++------ .../main.go | 18 +- tests/testdata/2-SPs.json | 1 - 21 files changed, 603 insertions(+), 1009 deletions(-) delete mode 100644 tests/testdata/2-SPs.json diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index fe4db464..7ac0a8eb 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -6,7 +6,6 @@ import ( "crypto/rsa" "crypto/sha256" "fmt" - "time" ctx509 "github.com/google/certificate-transparency-go/x509" @@ -23,231 +22,125 @@ func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { return signature, nil } -// ---------------------------------------------------------------------------------- -// functions on RCSR -// ---------------------------------------------------------------------------------- - -// RCSRCreateSignature: Generate a signature, and fill the signature in the RCSR -func RCSRCreateSignature(domainOwnerPrivKey *rsa.PrivateKey, rcsr *common.RCSR) error { +// SignAsOwner generates a signature using the owner's key, and fills the owner signature in +// the policy certificate signing request. +func SignAsOwner(domainOwnerPrivKey *rsa.PrivateKey, req *common.PolicyCertificateSigningRequest) error { // clear signature; normally should be empty - rcsr.Signature = []byte{} + req.OwnerSignature = []byte{} - signature, err := signStructRSASHA256(rcsr, domainOwnerPrivKey) + signature, err := signStructRSASHA256(req, domainOwnerPrivKey) if err != nil { return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) } - rcsr.Signature = signature - return nil -} - -// RCSRGenerateRPCSignature: Generate RPC signature and fill it in the RCSR; -// -// (in paper, if new rcsr has the signature from previous rpc, the cool-off can be bypassed) -func RCSRGenerateRPCSignature(rcsr *common.RCSR, prevPrivKeyOfPRC *rsa.PrivateKey) error { - // clear the co-responding fields - rcsr.Signature = []byte{} - rcsr.PRCSignature = []byte{} - - rpcSignature, err := signStructRSASHA256(rcsr, prevPrivKeyOfPRC) - if err != nil { - return fmt.Errorf("RCSRGenerateRPCSignature | SignStructRSASHA256 | %w", err) - } - - rcsr.PRCSignature = rpcSignature + req.OwnerSignature = signature return nil } -// RCSRVerifySignature: verify the signature using the public key in hash -func RCSRVerifySignature(rcsr *common.RCSR) error { +// VerifyOwnerSignature verifies the owner's signature using the public key. +func VerifyOwnerSignature(req *common.PolicyCertificateSigningRequest) error { // Serialize without signature: - sig := rcsr.Signature - rcsr.Signature = nil - serializedStruct, err := common.ToJSON(rcsr) + sig := req.OwnerSignature + req.OwnerSignature = nil + serializedStruct, err := common.ToJSON(req) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - rcsr.Signature = sig + req.OwnerSignature = sig // Get the pub key: - pubKey, err := util.PEMToRSAPublic(rcsr.PublicKey) + pubKey, err := util.PEMToRSAPublic(req.PublicKey) if err != nil { return fmt.Errorf("RCSRVerifySignature | PemBytesToRsaPublicKey | %w", err) } hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.Signature) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) if err != nil { return fmt.Errorf("RCSRVerifySignature | VerifyPKCS1v15 | %w", err) } return nil } -// RCSRVerifyRPCSignature: verify the RCSR using RPC; verify the RPC signature -func RCSRVerifyRPCSignature(rcsr *common.RCSR, rpc *common.PolicyCertificate) error { +func VerifyOwnerSignatureWithPolCert(req *common.PolicyCertificateSigningRequest, + polCert *common.PolicyCertificate) error { + // Serialize without signature: - sig := rcsr.Signature - rcsr.Signature = nil - serializedStruct, err := common.ToJSON(rcsr) + sig := req.OwnerSignature + req.OwnerSignature = nil + serializedStruct, err := common.ToJSON(req) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - rcsr.Signature = sig + req.OwnerSignature = sig - pubKey, err := util.PEMToRSAPublic(rpc.PublicKey) + pubKey, err := util.PEMToRSAPublic(polCert.PublicKey) if err != nil { return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) } hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rcsr.PRCSignature) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) if err != nil { return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) } + return nil } -// RCSRGenerateRPC: called by PCA. Sign the RCSR and generate RPC; SPT field is (should be) empty -func RCSRGenerateRPC(rcsr *common.RCSR, notBefore time.Time, serialNumber int, - caPrivKey *rsa.PrivateKey, caName string) (*common.PolicyCertificate, error) { - - rpc := common.NewPolicyCertificate( - rcsr.RawSubject, - nil, // policy attributes - serialNumber, - rcsr.Version, - rcsr.PublicKeyAlgorithm, - rcsr.PublicKey, - notBefore, - time.Now().AddDate(0, 0, 90), - caName, - common.SHA256, - time.Now(), - rcsr.PRCSignature, - []byte{}, - nil, +// SignAsIssuer is called by the Policy CA. It signs the request and generates a +// PolicyCertificate. The SPTs field is (should be) empty. +func SignAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *rsa.PrivateKey, +) (*common.PolicyCertificate, error) { + + cert := common.NewPolicyCertificate( + req.Version, + req.Issuer, + req.Subject(), + req.SerialNumber(), + req.NotBefore, + req.NotAfter, + req.IsIssuer, + req.PublicKey, + req.PublicKeyAlgorithm, + req.SignatureAlgorithm, + req.TimeStamp, + req.PolicyAttributes, + req.OwnerSignature, + nil, // issuer signature + nil, // SPTs ) - signature, err := signStructRSASHA256(rpc, caPrivKey) + signature, err := signStructRSASHA256(cert, privKey) if err != nil { return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) } - rpc.CASignature = signature - return rpc, nil + cert.IssuerSignature = signature + return cert, nil } -// ---------------------------------------------------------------------------------- -// functions on RPC -// ---------------------------------------------------------------------------------- - -// RPCVerifyCASignature: used by domain owner, check whether CA signature is correct -func RPCVerifyCASignature(caCert *ctx509.Certificate, rpc *common.PolicyCertificate) error { +// VerifyIssuerSignature: used by domain owner, check whether CA signature is correct +func VerifyIssuerSignature(caCert *ctx509.Certificate, rpc *common.PolicyCertificate) error { pubKey := caCert.PublicKey.(*rsa.PublicKey) // Serialize without CA signature or SPTs: - caSig, SPTs := rpc.CASignature, rpc.SPTs - rpc.CASignature, rpc.SPTs = nil, nil + caSig, SPTs := rpc.IssuerSignature, rpc.SPTs + rpc.IssuerSignature, rpc.SPTs = nil, nil bytes, err := common.ToJSON(rpc) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - rpc.CASignature, rpc.SPTs = caSig, SPTs + rpc.IssuerSignature, rpc.SPTs = caSig, SPTs hashOutput := sha256.Sum256(bytes) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.CASignature) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.IssuerSignature) if err != nil { return fmt.Errorf("RPCVerifyCASignature | VerifyPKCS1v15 | %w", err) } return nil } -// ---------------------------------------------------------------------------------- -// functions on SP -// ---------------------------------------------------------------------------------- - -// DomainOwnerSignSP: Used by domain owner to sign the PC -func DomainOwnerSignPSR(domainOwnerPrivKey *rsa.PrivateKey, psr *common.PSR) error { - signature, err := signStructRSASHA256(psr, domainOwnerPrivKey) - if err != nil { - return fmt.Errorf("DomainOwnerSignPC | SignStructRSASHA256 | %w", err) - } - - psr.RootCertSignature = signature - return nil -} - -func VerifyPSRUsingRPC(psr *common.PSR, rpc *common.PolicyCertificate) error { - // Serialize without signature: - sig := psr.RootCertSignature - psr.RootCertSignature = nil - serializedStruct, err := common.ToJSON(psr) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - psr.RootCertSignature = sig - - pubKey, err := util.PEMToRSAPublic(rpc.PublicKey) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) - } - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], psr.RootCertSignature) - if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) - } - - return nil -} - -// CAVerifySPAndSign: verify the signature and sign the signature -func CASignSP(psr *common.PSR, caPrivKey *rsa.PrivateKey, caName string, serialNum int) ( - *common.SP, error) { - - sp := common.NewSP( - psr.RawSubject, - psr.Policy, - time.Now(), - caName, - serialNum, - nil, - psr.RootCertSignature, - nil, - ) - - caSignature, err := signStructRSASHA256(sp, caPrivKey) - if err != nil { - return nil, fmt.Errorf("CASignSP | SignStructRSASHA256 | %w", err) - } - - sp.CASignature = caSignature - return sp, nil -} - -// VerifyCASigInSP: verify CA's signature -func VerifyCASigInSP(caCert *ctx509.Certificate, sp *common.SP) error { - if len(sp.CASignature) == 0 { - return fmt.Errorf("VerifyCASigInPC | no valid CA signature") - } - - // Serialize without CA signature or SPTs: - caSig, SPTs := sp.CASignature, sp.SPTs - sp.CASignature, sp.SPTs = nil, nil - serializedStruct, err := common.ToJSON(sp) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) - } - sp.CASignature, sp.SPTs = caSig, SPTs - - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(caCert.PublicKey.(*rsa.PublicKey), crypto.SHA256, hashOutput[:], sp.CASignature) - if err != nil { - return fmt.Errorf("VerifyCASigInPC | VerifyPKCS1v15 | %w", err) - } - return nil -} - // signStructRSASHA256: generate a signature using SHA256 and RSA func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { b, err := common.ToJSON(s) diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index b045acbc..76507a38 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -2,12 +2,10 @@ package crypto_test import ( "testing" - "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" @@ -20,57 +18,37 @@ func TestSignatureOfRCSR(t *testing.T) { pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "RSA key to bytes error") - test := common.NewRCSR("this is a test", - 44, - time.Now(), - common.RSA, - pubKeyBytes, - common.SHA256, - random.RandomBytesForTest(t, 32), - random.RandomBytesForTest(t, 32), - ) - - err = crypto.RCSRCreateSignature(privKey, test) + test := random.RandomPolCertSignRequest(t) + test.PublicKey = pubKeyBytes + + err = crypto.SignAsOwner(privKey, test) require.NoError(t, err, "RCSR sign signature error") - err = crypto.RCSRVerifySignature(test) + err = crypto.VerifyOwnerSignature(test) require.NoError(t, err, "RCSR verify signature error") } // TestIssuanceOfRPC: check if the CA signature is correct func TestIssuanceOfRPC(t *testing.T) { - // ------------------------------------- - // phase 1: domain owner generate rcsr - // ------------------------------------- + // Phase 1: domain owner generates a policy certificate signing request. privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") - rcsr := common.NewRCSR("this is a test", - 44, - time.Now(), - common.RSA, - pubKeyBytes, - common.SHA256, - random.RandomBytesForTest(t, 32), - random.RandomBytesForTest(t, 32), - ) - - // generate signature for rcsr - err = crypto.RCSRCreateSignature(privKey, rcsr) + req := random.RandomPolCertSignRequest(t) + req.PublicKey = pubKeyBytes + // generate signature for request + err = crypto.SignAsOwner(privKey, req) require.NoError(t, err, "RCSR Create Signature error") - // ------------------------------------- - // phase 2: pca issue rpc - // ------------------------------------- - // validate the signature in rcsr - err = crypto.RCSRVerifySignature(rcsr) + // Phase 2: pca issues policy certificate. + err = crypto.VerifyOwnerSignature(req) + // Validate the signature in rcsr require.NoError(t, err, "RCSR Verify Signature error") pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") require.NoError(t, err) - rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + rpc, err := crypto.SignAsIssuer(req, pcaPrivKey) require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") @@ -82,7 +60,7 @@ func TestIssuanceOfRPC(t *testing.T) { caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") require.NoError(t, err, "X509 Cert From File error") - err = crypto.RPCVerifyCASignature(caCert, rpc) + err = crypto.VerifyIssuerSignature(caCert, rpc) require.NoError(t, err, "RPC Verify CA Signature error") } @@ -96,62 +74,24 @@ func TestIssuanceOfSP(t *testing.T) { pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") - rcsr := common.NewRCSR("this is a test", - 44, - time.Now(), - common.RSA, - pubKeyBytes, - common.SHA256, - random.RandomBytesForTest(t, 32), - random.RandomBytesForTest(t, 32), - ) + req := random.RandomPolCertSignRequest(t) + req.PublicKey = pubKeyBytes // generate signature for rcsr - err = crypto.RCSRCreateSignature(privKey, rcsr) + err = crypto.SignAsOwner(privKey, req) require.NoError(t, err, "RCSR Create Signature error") // ------------------------------------- // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = crypto.RCSRVerifySignature(rcsr) + err = crypto.VerifyOwnerSignature(req) require.NoError(t, err, "RCSR Verify Signature error") pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") require.NoError(t, err) - rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), 1, pcaPrivKey, "fpki") + rpc, err := crypto.SignAsIssuer(req, pcaPrivKey) require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") - - // ------------------------------------- - // phase 3: domain owner generate SP - // ------------------------------------- - psr := common.NewPSR( - "test_SP", - common.PolicyAttributes{}, - time.Now(), - nil, - ) - - err = crypto.DomainOwnerSignPSR(privKey, psr) - require.NoError(t, err, "DomainOwnerSignPSR error") - - // ------------------------------------- - // phase 4: pca check psr - // ------------------------------------- - err = crypto.VerifyPSRUsingRPC(psr, rpc) - require.NoError(t, err, "VerifyPSRUsingRPC error") - - sp, err := crypto.CASignSP(psr, pcaPrivKey, "test ca", 22) - require.NoError(t, err, "CASignSP error") - - // ------------------------------------- - // phase 5: domain owner check sp - // ------------------------------------- - caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") - require.NoError(t, err, "X509CertFromFile error") - - err = crypto.VerifyCASigInSP(caCert, sp) - require.NoError(t, err, "VerifyCASigInSP error") } diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index 24b67791..d70e3e53 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -13,10 +13,10 @@ func (p EmbeddedPolicyBase) Equal(x EmbeddedPolicyBase) bool { return p.PolicyPartBase.Equal(x.PolicyPartBase) } -// SPT is a signed policy timestamp. -type SPT struct { +// . SignedThingTimestamp is common to all timestamps returned by a policy log server. +type SignedThingTimestamp struct { EmbeddedPolicyBase - LogID int `json:",omitempty"` + LogID []byte `json:",omitempty"` CertType uint8 `json:",omitempty"` AddedTS time.Time `json:",omitempty"` STH []byte `json:",omitempty"` @@ -25,45 +25,50 @@ type SPT struct { Signature []byte `json:",omitempty"` } -// SPRT is a signed policy revocation timestamp. -type SPRT struct { - SPT +// SignedPolicyCertificateTimestamp is a signed policy certificate timestamp. +type SignedPolicyCertificateTimestamp struct { + SignedThingTimestamp +} + +// SignedPolicyCertificateRevocationTimestamp is a signed policy certificate revocation timestamp. +type SignedPolicyCertificateRevocationTimestamp struct { + SignedThingTimestamp Reason int `json:",omitempty"` } -func NewSPT( - Subject string, - Version int, +func NewSignedThingTimestamp( + subject string, + version int, issuer string, - LogID int, - CertType uint8, - AddedTS time.Time, - STH []byte, - PoI []byte, - STHSerialNumber int, - Signature []byte, -) *SPT { + logID []byte, + certType uint8, + addedTS time.Time, + sTH []byte, + poI []byte, + sTHSerialNumber int, + signature []byte, +) *SignedThingTimestamp { - return &SPT{ + return &SignedThingTimestamp{ EmbeddedPolicyBase: EmbeddedPolicyBase{ PolicyPartBase: PolicyPartBase{ - Version: Version, + Version: version, Issuer: issuer, }, }, - LogID: LogID, - CertType: CertType, - AddedTS: AddedTS, - STH: STH, - PoI: PoI, - STHSerialNumber: STHSerialNumber, - Signature: Signature, + LogID: logID, + CertType: certType, + AddedTS: addedTS, + STH: sTH, + PoI: poI, + STHSerialNumber: sTHSerialNumber, + Signature: signature, } } -func (s SPT) Equal(x SPT) bool { +func (s SignedThingTimestamp) Equal(x SignedThingTimestamp) bool { return s.EmbeddedPolicyBase.Equal(x.EmbeddedPolicyBase) && - s.LogID == x.LogID && + bytes.Equal(s.LogID, x.LogID) && s.CertType == x.CertType && s.AddedTS.Equal(x.AddedTS) && bytes.Equal(s.STH, x.STH) && @@ -72,14 +77,69 @@ func (s SPT) Equal(x SPT) bool { bytes.Equal(s.Signature, x.Signature) } -func NewSPRT(SPT *SPT, Reason int) *SPRT { - return &SPRT{ - SPT: *SPT, - Reason: Reason, +func NewSignedPolicyCertificateTimestamp( + subject string, + version int, + issuer string, + logID []byte, + certType uint8, + addedTS time.Time, + sTH []byte, + poI []byte, + sTHSerialNumber int, + signature []byte, +) *SignedPolicyCertificateTimestamp { + return &SignedPolicyCertificateTimestamp{ + SignedThingTimestamp: *NewSignedThingTimestamp( + subject, + version, + issuer, + logID, + certType, + addedTS, + sTH, + poI, + sTHSerialNumber, + signature, + ), + } +} + +func (t SignedPolicyCertificateTimestamp) Equal(x SignedPolicyCertificateTimestamp) bool { + return t.SignedThingTimestamp.Equal(x.SignedThingTimestamp) +} + +func NewSignedPolicyCertificateRevocationTimestamp( + subject string, + version int, + issuer string, + logID []byte, + certType uint8, + addedTS time.Time, + sTH []byte, + poI []byte, + sTHSerialNumber int, + signature []byte, + reason int, +) *SignedPolicyCertificateRevocationTimestamp { + return &SignedPolicyCertificateRevocationTimestamp{ + SignedThingTimestamp: *NewSignedThingTimestamp( + subject, + version, + issuer, + logID, + certType, + addedTS, + sTH, + poI, + sTHSerialNumber, + signature, + ), + Reason: reason, } } -func (sprt SPRT) Equal(x SPRT) bool { - return sprt.SPT.Equal(x.SPT) && - sprt.Reason == x.Reason +func (t SignedPolicyCertificateRevocationTimestamp) Equal(x SignedPolicyCertificateRevocationTimestamp) bool { + return t.SignedThingTimestamp.Equal(x.SignedThingTimestamp) && + t.Reason == x.Reason } diff --git a/pkg/common/json.go b/pkg/common/json.go index 09256989..64ca7a44 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -61,20 +61,16 @@ func (o serializableObjectBase) MarshalJSON() ([]byte, error) { func (*serializableObjectBase) marshalJSON(obj any) (string, []byte, error) { var T string switch obj.(type) { - case RCSR: - T = "rcsr" + case PolicyCertificateSigningRequest: + T = "pcsr" case PolicyCertificate: - T = "rpc" - case PCRevocation: - T = "rev" - case SP: - T = "sp" - case SPT: - T = "spt" - case SPRT: - T = "sprt" - case PSR: - T = "psr" + T = "pc" + case SignedPolicyCertificateTimestamp: + T = "spct" + case PolicyCertificateRevocation: + T = "pcrev" + case SignedPolicyCertificateRevocationTimestamp: + T = "spcrevt" case trillian.Proof: T = "trillian.Proof" case trilliantypes.LogRootV1: @@ -193,20 +189,16 @@ func (o *serializableObjectBase) unmarshalTypeObject(T string, data []byte) (boo list[i] = tmp.O } } - case "rcsr": - obj, err = inflateObj[RCSR](data) - case "rpc": + case "pcsr": + obj, err = inflateObj[PolicyCertificateSigningRequest](data) + case "pc": obj, err = inflateObj[PolicyCertificate](data) - case "rev": - obj, err = inflateObj[PCRevocation](data) - case "sp": - obj, err = inflateObj[SP](data) - case "spt": - obj, err = inflateObj[SPT](data) - case "sprt": - obj, err = inflateObj[SPRT](data) - case "psr": - obj, err = inflateObj[PSR](data) + case "pcrev": + obj, err = inflateObj[PolicyCertificateRevocation](data) + case "spct": + obj, err = inflateObj[SignedPolicyCertificateTimestamp](data) + case "spcrevt": + obj, err = inflateObj[SignedPolicyCertificateRevocationTimestamp](data) case "trillian.Proof": obj, err = inflateObj[trillian.Proof](data) case "logrootv1": @@ -248,8 +240,8 @@ func FromJSONFile(filePath string) (any, error) { return FromJSON(data) } -// JsonFileToRPC: read json files and unmarshal it to Root Policy Certificate -func JsonFileToRPC(filePath string) (*PolicyCertificate, error) { +// JsonFileToPolicyCert: read json files and unmarshal it to Root Policy Certificate +func JsonFileToPolicyCert(filePath string) (*PolicyCertificate, error) { po, err := FromJSONFile(filePath) if err != nil { return nil, fmt.Errorf("JsonFileToRPC | Unmarshal | %w", err) @@ -263,17 +255,17 @@ func JsonFileToRPC(filePath string) (*PolicyCertificate, error) { } // JsonFileToSPT: read json files and unmarshal it to Signed Policy Timestamp -func JsonFileToSPT(filePath string) (*SPT, error) { +func JsonFileToSPT(filePath string) (*SignedPolicyCertificateTimestamp, error) { po, err := FromJSONFile(filePath) if err != nil { return nil, fmt.Errorf("JsonFileToSPT | Unmarshal | %w", err) } - o, ok := po.(*SPT) + t, ok := po.(*SignedPolicyCertificateTimestamp) if !ok { return nil, fmt.Errorf("JsonFileToSPT | object is %T", po) } - return o, nil + return t, nil } // JsonFileToProof: read json files and unmarshal it to trillian proof @@ -303,17 +295,3 @@ func JsonFileToSTH(filePath string) (*trilliantypes.LogRootV1, error) { } return o, nil } - -// JsonFileToSTH reads a json file and unmarshals it to a Signed Policy. -func JsonFileToSP(filePath string) (*SP, error) { - po, err := FromJSONFile(filePath) - if err != nil { - return nil, fmt.Errorf("JsonFileToSP | Unmarshal | %w", err) - } - - o, ok := po.(*SP) - if !ok { - err = fmt.Errorf("JsonFileToSP | object is %T", po) - } - return o, err -} diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index 436d4992..f4d3e267 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -19,37 +19,32 @@ func TestPolicyObjects(t *testing.T) { data any }{ "rpcPtr": { - data: random.RandomRPC(t), + data: random.RandomPolicyCertificate(t), }, "rpcValue": { - data: *random.RandomRPC(t), + data: *random.RandomPolicyCertificate(t), }, "rcsr": { - data: random.RandomRCSR(t), - }, - "sp": { - data: random.RandomSP(t), + data: random.RandomPolCertSignRequest(t), }, "spt": { - data: *random.RandomSPT(t), + data: *random.RandomSignedPolicyCertificateTimestamp(t), }, "list": { data: []any{ - random.RandomRPC(t), - random.RandomRCSR(t), - random.RandomSP(t), - random.RandomSPRT(t), - random.RandomPSR(t), + random.RandomPolicyCertificate(t), + random.RandomPolCertSignRequest(t), + random.RandomSignedPolicyCertificateTimestamp(t), randomTrillianProof(t), randomLogRootV1(t), }, }, "list_embedded": { data: []any{ - random.RandomRPC(t), + random.RandomPolicyCertificate(t), []any{ - random.RandomSP(t), - random.RandomSPT(t), + random.RandomPolicyCertificate(t), + random.RandomSignedPolicyCertificateTimestamp(t), }, []any{ randomTrillianProof(t), @@ -59,14 +54,14 @@ func TestPolicyObjects(t *testing.T) { }, "multiListPtr": { data: &[]any{ - random.RandomRPC(t), - *random.RandomRPC(t), + random.RandomPolicyCertificate(t), + *random.RandomPolicyCertificate(t), []any{ - random.RandomSP(t), - *random.RandomSP(t), + random.RandomPolicyCertificate(t), + *random.RandomPolicyCertificate(t), &[]any{ - random.RandomSPT(t), - *random.RandomSPT(t), + random.RandomSignedPolicyCertificateTimestamp(t), + *random.RandomSignedPolicyCertificateTimestamp(t), }, }, }, @@ -98,7 +93,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { getRawElemsFcn func(obj any) [][]byte // Return the Raw components of this thing. }{ "rpc": { - obj: random.RandomRPC(t), + obj: random.RandomPolicyCertificate(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { rpc := obj.(*common.PolicyCertificate) @@ -106,32 +101,32 @@ func TestPolicyObjectBaseRaw(t *testing.T) { }, }, "spPtr": { - obj: random.RandomSP(t), + obj: random.RandomPolicyCertificate(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - sp := obj.(*common.SP) + sp := obj.(*common.PolicyCertificate) return [][]byte{sp.RawJSON} }, }, "spValue": { - obj: *random.RandomSP(t), + obj: *random.RandomPolicyCertificate(t), rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { - sp := obj.(common.SP) + sp := obj.(common.PolicyCertificate) return [][]byte{sp.RawJSON} }, }, "list": { obj: []any{ - random.RandomSP(t), - random.RandomRPC(t), + random.RandomPolicyCertificate(t), + random.RandomPolCertSignRequest(t), }, rawElemsCount: 2, getRawElemsFcn: func(obj any) [][]byte { l := obj.([]any) return [][]byte{ - l[0].(*common.SP).RawJSON, - l[1].(*common.PolicyCertificate).RawJSON, + l[0].(*common.PolicyCertificate).RawJSON, + l[1].(*common.PolicyCertificateSigningRequest).RawJSON, } }, }, diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 1db59e85..2ae7bfaa 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -27,30 +27,24 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { p.RawSerialNumber == x.RawSerialNumber } -// PolicyCertificate is a Root Policy Certificate. -type PolicyCertificate struct { +type PolicyCertificateFields struct { PolicyCertificateBase - IsIssuer bool `json:",omitempty"` - PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` - PublicKey []byte `json:",omitempty"` NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` + IsIssuer bool `json:",omitempty"` + PublicKey []byte `json:",omitempty"` // In PEM format + PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - PRCSignature []byte `json:",omitempty"` - CASignature []byte `json:",omitempty"` PolicyAttributes []PolicyAttributes `json:",omitempty"` - SPTs []SPT `json:",omitempty"` + OwnerSignature []byte `json:",omitempty"` } -// SP is a Signed Policy. -type SP struct { - PolicyCertificateBase - Policies PolicyAttributes `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - CASignature []byte `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` - SPTs []SPT `json:",omitempty"` +// PolicyCertificate is a Root Policy Certificate. +type PolicyCertificate struct { + PolicyCertificateFields + IssuerSignature []byte `json:",omitempty"` + SPTs []SignedPolicyCertificateTimestamp `json:",omitempty"` } // PolicyAttributes is a domain policy that specifies what is or not acceptable for a domain. @@ -59,78 +53,55 @@ type PolicyAttributes struct { AllowedSubdomains []string `json:",omitempty"` } -// PCRevocation is for now empty. -type PCRevocation struct { +type PolicyCertificateRevocationFields struct { PolicyCertificateBase - // TODO(juagargi) define the revocation. + TimeStamp time.Time `json:",omitempty"` + OwnerSignature []byte `json:",omitempty"` } -func NewPolicyCertificate( - Subject string, - policyAttributes []PolicyAttributes, - serialNumber int, - Version int, - PublicKeyAlgorithm PublicKeyAlgorithm, - PublicKey []byte, - NotBefore time.Time, - NotAfter time.Time, - issuer string, - SignatureAlgorithm SignatureAlgorithm, - TimeStamp time.Time, - PRCSignature []byte, - CASignature []byte, - SPTs []SPT, -) *PolicyCertificate { - - return &PolicyCertificate{ - PolicyCertificateBase: PolicyCertificateBase{ - PolicyPartBase: PolicyPartBase{ - Version: Version, - Issuer: issuer, - }, - RawSubject: Subject, - RawSerialNumber: serialNumber, - }, - PublicKeyAlgorithm: PublicKeyAlgorithm, - PublicKey: PublicKey, - NotBefore: NotBefore, - NotAfter: NotAfter, - SignatureAlgorithm: SignatureAlgorithm, - TimeStamp: TimeStamp, - PRCSignature: PRCSignature, - CASignature: CASignature, - SPTs: SPTs, - } +type PolicyCertificateRevocation struct { + PolicyCertificateRevocationFields + IssuerSignature []byte `json:",omitempty"` + SPCRTs []SignedPolicyCertificateRevocationTimestamp `json:",omitempty"` } -func NewSP( - Subject string, - Policy PolicyAttributes, - TimeStamp time.Time, +func NewPolicyCertificateFields( + version int, issuer string, + subject string, serialNumber int, - CASignature []byte, - RootCertSignature []byte, - SPTs []SPT, -) *SP { - - return &SP{ + notBefore time.Time, + notAfter time.Time, + isIssuer bool, + publicKey []byte, + publicKeyAlgorithm PublicKeyAlgorithm, + signatureAlgorithm SignatureAlgorithm, + timeStamp time.Time, + policyAttributes []PolicyAttributes, + ownerSignature []byte, +) *PolicyCertificateFields { + return &PolicyCertificateFields{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ - Issuer: issuer, + Version: version, + Issuer: issuer, }, - RawSubject: Subject, + RawSubject: subject, RawSerialNumber: serialNumber, }, - Policies: Policy, - TimeStamp: TimeStamp, - CASignature: CASignature, - RootCertSignature: RootCertSignature, - SPTs: SPTs, + NotBefore: notBefore, + NotAfter: notAfter, + IsIssuer: isIssuer, + PublicKey: publicKey, + PublicKeyAlgorithm: publicKeyAlgorithm, + SignatureAlgorithm: signatureAlgorithm, + TimeStamp: timeStamp, + PolicyAttributes: policyAttributes, + OwnerSignature: ownerSignature, } } -func (c PolicyCertificate) Equal(x PolicyCertificate) bool { +func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && c.PublicKeyAlgorithm == x.PublicKeyAlgorithm && bytes.Equal(c.PublicKey, x.PublicKey) && @@ -138,19 +109,53 @@ func (c PolicyCertificate) Equal(x PolicyCertificate) bool { c.NotAfter.Equal(x.NotAfter) && c.SignatureAlgorithm == x.SignatureAlgorithm && c.TimeStamp.Equal(x.TimeStamp) && - bytes.Equal(c.PRCSignature, x.PRCSignature) && - bytes.Equal(c.CASignature, x.CASignature) && - equalSlices(c.SPTs, x.SPTs) && + bytes.Equal(c.OwnerSignature, x.OwnerSignature) && equalSlices(c.PolicyAttributes, x.PolicyAttributes) } -func (s SP) Equal(o SP) bool { - return s.PolicyCertificateBase.Equal(o.PolicyCertificateBase) && - s.TimeStamp.Equal(o.TimeStamp) && - bytes.Equal(s.CASignature, o.CASignature) && - bytes.Equal(s.RootCertSignature, o.RootCertSignature) && - s.Policies.Equal(o.Policies) && - equalSlices(s.SPTs, o.SPTs) +func NewPolicyCertificate( + version int, + issuer string, + subject string, + serialNumber int, + notBefore time.Time, + notAfter time.Time, + isIssuer bool, + publicKey []byte, + publicKeyAlgorithm PublicKeyAlgorithm, + signatureAlgorithm SignatureAlgorithm, + timeStamp time.Time, + policyAttributes []PolicyAttributes, + ownerSignature []byte, + issuerSignature []byte, + SPTs []SignedPolicyCertificateTimestamp, +) *PolicyCertificate { + + return &PolicyCertificate{ + PolicyCertificateFields: *NewPolicyCertificateFields( + version, + issuer, + subject, + serialNumber, + notBefore, + notAfter, + isIssuer, + publicKey, + publicKeyAlgorithm, + signatureAlgorithm, + timeStamp, + policyAttributes, + ownerSignature, + ), + IssuerSignature: issuerSignature, + SPTs: SPTs, + } +} + +func (c PolicyCertificate) Equal(x PolicyCertificate) bool { + return c.PolicyCertificateFields.Equal(x.PolicyCertificateFields) && + bytes.Equal(c.IssuerSignature, x.IssuerSignature) && + equalSlices(c.SPTs, x.SPTs) } func (s PolicyAttributes) Equal(o PolicyAttributes) bool { @@ -159,14 +164,64 @@ func (s PolicyAttributes) Equal(o PolicyAttributes) bool { equalStringSlices(s.AllowedSubdomains, o.AllowedSubdomains) } -func NewPCRevocation(subject string) *PCRevocation { - return &PCRevocation{ +func NewPolicyCertificateRevocationFields( + version int, + issuer string, + subject string, + serialNumber int, + timeStamp time.Time, + ownerSignature []byte, +) *PolicyCertificateRevocationFields { + return &PolicyCertificateRevocationFields{ PolicyCertificateBase: PolicyCertificateBase{ - RawSubject: subject, + PolicyPartBase: PolicyPartBase{ + Version: version, + Issuer: issuer, + }, + RawSubject: subject, + RawSerialNumber: serialNumber, }, + TimeStamp: timeStamp, + OwnerSignature: ownerSignature, } } +func (c PolicyCertificateRevocationFields) Equal(x PolicyCertificateRevocationFields) bool { + return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && + c.TimeStamp == x.TimeStamp && + bytes.Equal(c.OwnerSignature, x.OwnerSignature) +} + +func NewPolicyCertificateRevocation( + version int, + issuer string, + subject string, + serialNumber int, + timeStamp time.Time, + ownerSignature []byte, + issuerSignature []byte, + serverTimestamps []SignedPolicyCertificateRevocationTimestamp, +) *PolicyCertificateRevocation { + return &PolicyCertificateRevocation{ + PolicyCertificateRevocationFields: *NewPolicyCertificateRevocationFields( + version, + issuer, + subject, + serialNumber, + timeStamp, + ownerSignature, + ), + IssuerSignature: issuerSignature, + SPCRTs: serverTimestamps, + } +} + +func (r PolicyCertificateRevocation) Equal(x PolicyCertificateRevocation) bool { + return r.PolicyCertificateRevocationFields.Equal(x.PolicyCertificateRevocationFields) && + bytes.Equal(r.IssuerSignature, x.IssuerSignature) && + equalSlices(r.SPCRTs, x.SPCRTs) +} + func equalStringSlices(a, b []string) bool { if len(a) != len(b) { return false diff --git a/pkg/common/policies_test.go b/pkg/common/policies_test.go index 407d6903..c477ced7 100644 --- a/pkg/common/policies_test.go +++ b/pkg/common/policies_test.go @@ -14,42 +14,28 @@ import ( "github.com/netsec-ethz/fpki/pkg/tests/random" ) -var update = tests.UpdateGoldenFiles() - -func TestGenerateGoldenFiles(t *testing.T) { - // Update the JSON files in tests/testdata - if *update { - obj := []any{random.RandomSP(t), random.RandomSP(t)} - err := common.ToJSONFile(obj, "../../tests/testdata/2-SPs.json") - require.NoError(t, err) - } -} - // TestEqual: Equal funcs for every structure func TestEqual(t *testing.T) { - rcsr := random.RandomRCSR(t) + rcsr := random.RandomPolCertSignRequest(t) require.True(t, rcsr.Equal(rcsr)) - spt1 := *random.RandomSPT(t) - spt2 := *random.RandomSPT(t) + spt1 := *random.RandomSignedPolicyCertificateTimestamp(t) + spt2 := *random.RandomSignedPolicyCertificateTimestamp(t) require.True(t, spt1.Equal(spt1)) require.True(t, spt2.Equal(spt2)) require.False(t, spt1.Equal(spt2)) require.False(t, spt2.Equal(spt1)) - sprt := random.RandomSPRT(t) - require.True(t, sprt.Equal(*sprt)) - - rpc := random.RandomRPC(t) + rpc := random.RandomPolicyCertificate(t) require.True(t, rpc.Equal(*rpc)) } // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) func TestJsonReadWrite(t *testing.T) { - rpc := random.RandomRPC(t) - rpc.SPTs = []common.SPT{ - *random.RandomSPT(t), - *random.RandomSPT(t), + rpc := random.RandomPolicyCertificate(t) + rpc.SPTs = []common.SignedPolicyCertificateTimestamp{ + *random.RandomSignedPolicyCertificateTimestamp(t), + *random.RandomSignedPolicyCertificateTimestamp(t), } tempFile := path.Join(os.TempDir(), "rpctest.json") @@ -57,7 +43,7 @@ func TestJsonReadWrite(t *testing.T) { err := common.ToJSONFile(rpc, tempFile) require.NoError(t, err, "Json Struct To File error") - rpc1, err := common.JsonFileToRPC(tempFile) + rpc1, err := common.JsonFileToPolicyCert(tempFile) require.NoError(t, err, "Json File To RPC error") require.True(t, rpc.Equal(*rpc1), "Json error") diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 74d1c4a2..37462ef1 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -1,98 +1,53 @@ package common import ( - "bytes" "time" ) -type PolicyIssuer interface { - PolicyPart - Subject() string - SerialNumber() int -} - -type PolicyIssuerBase struct { - PolicyPartBase - RawSubject string `json:"Subject,omitempty"` - RawSerialNumber int `json:"SerialNumber,omitempty"` -} - -func (p PolicyIssuerBase) Subject() string { return p.RawSubject } -func (p PolicyIssuerBase) Equal(x PolicyIssuerBase) bool { - return p.PolicyPartBase.Equal(x.PolicyPartBase) && - p.RawSubject == x.RawSubject && - p.RawSerialNumber == x.RawSerialNumber -} - -// RCSR is a root certificate signing request. -type RCSR struct { - PolicyIssuerBase - TimeStamp time.Time `json:",omitempty"` - PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` - PublicKey []byte `json:",omitempty"` - SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` - PRCSignature []byte `json:",omitempty"` - Signature []byte `json:",omitempty"` -} - -// PSR is a Policy Signing Request. -type PSR struct { - PolicyIssuerBase - Policy PolicyAttributes `json:",omitempty"` - TimeStamp time.Time `json:",omitempty"` - RootCertSignature []byte `json:",omitempty"` -} - -func NewRCSR( - Subject string, - Version int, - TimeStamp time.Time, - PublicKeyAlgo PublicKeyAlgorithm, - PublicKey []byte, - SignatureAlgo SignatureAlgorithm, - PRCSignature []byte, - Signature []byte, -) *RCSR { - - return &RCSR{ - PolicyIssuerBase: PolicyIssuerBase{ - PolicyPartBase: PolicyPartBase{ - Version: Version, - }, - RawSubject: Subject, - }, - TimeStamp: TimeStamp, - PublicKeyAlgorithm: PublicKeyAlgo, - PublicKey: PublicKey, - SignatureAlgorithm: SignatureAlgo, - PRCSignature: PRCSignature, - Signature: Signature, +// PolicyCertificateSigningRequest is a policy certificate signing request. +type PolicyCertificateSigningRequest struct { + PolicyCertificateFields +} + +type PolicyCertificateRevocationSigningRequest struct { + Subject string `json:",omitemptyu"` +} + +func NewPolicyCertificateSigningRequest( + version int, + issuer string, + subject string, + serialNumber int, + notBefore time.Time, + notAfter time.Time, + isIssuer bool, + publicKey []byte, + publicKeyAlgorithm PublicKeyAlgorithm, + signatureAlgorithm SignatureAlgorithm, + timeStamp time.Time, + policyAttributes []PolicyAttributes, + ownerSignature []byte, +) *PolicyCertificateSigningRequest { + + return &PolicyCertificateSigningRequest{ + PolicyCertificateFields: *NewPolicyCertificateFields( + version, + issuer, + subject, + serialNumber, + notBefore, + notAfter, + isIssuer, + publicKey, + publicKeyAlgorithm, + signatureAlgorithm, + timeStamp, + policyAttributes, + ownerSignature, + ), } } -func (rcsr *RCSR) Equal(rcsr_ *RCSR) bool { - return rcsr.PolicyIssuerBase.Equal(rcsr.PolicyIssuerBase) && - rcsr.TimeStamp.Equal(rcsr_.TimeStamp) && - rcsr.PublicKeyAlgorithm == rcsr_.PublicKeyAlgorithm && - bytes.Equal(rcsr.PublicKey, rcsr_.PublicKey) && - rcsr.SignatureAlgorithm == rcsr_.SignatureAlgorithm && - bytes.Equal(rcsr.PRCSignature, rcsr_.PRCSignature) && - bytes.Equal(rcsr.Signature, rcsr_.Signature) -} - -func NewPSR( - Subject string, - Policy PolicyAttributes, - TimeStamp time.Time, - RootCertSignature []byte, -) *PSR { - - return &PSR{ - PolicyIssuerBase: PolicyIssuerBase{ - RawSubject: Subject, - }, - Policy: Policy, - TimeStamp: TimeStamp, - RootCertSignature: RootCertSignature, - } +func (req *PolicyCertificateSigningRequest) Equal(x *PolicyCertificateSigningRequest) bool { + return req.PolicyCertificateFields.Equal(x.PolicyCertificateFields) } diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 105ea9f8..02d32d46 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -26,72 +26,87 @@ func NewDomainOwner() *DomainOwner { } } -// GenerateRCSR: Generate a Root Certificate Signing Request for one domain +// GeneratePolCertSignRequest: Generate a Root Certificate Signing Request for one domain // subject is the name of the domain: eg. fpki.com -func (do *DomainOwner) GenerateRCSR(domainName string, version int) (*common.RCSR, error) { +func (do *DomainOwner) GeneratePolCertSignRequest(domainName string, version int) (*common.PolicyCertificateSigningRequest, error) { // generate a fresh RSA key pair; new RSA key for every RCSR, thus every RPC newPrivKeyPair, err := do.generateRSAPrivKeyPair() if err != nil { - return nil, fmt.Errorf("GenerateRCSR | generateRSAPrivKey | %w", err) + return nil, fmt.Errorf("GeneratePolCertSignRequest | generateRSAPrivKey | %w", err) } // marshall public key into bytes pubKeyBytes, err := util.RSAPublicToPEM(&newPrivKeyPair.PublicKey) if err != nil { - return nil, fmt.Errorf("GenerateRCSR | RsaPublicKeyToPemBytes | %w", err) + return nil, fmt.Errorf("GeneratePolCertSignRequest | RsaPublicKeyToPemBytes | %w", err) } - rcsr := common.NewRCSR( - domainName, + req := common.NewPolicyCertificateSigningRequest( version, + "", // issuer + domainName, + 0, // serial number time.Now(), - common.RSA, + time.Now().Add(time.Microsecond), // not after + false, // is issuer pubKeyBytes, + common.RSA, common.SHA256, - nil, - nil, + time.Now(), // timestamp + nil, // policy attributes + nil, // owner signature ) // if domain owner still have the private key of the previous RPC -> can avoid cool-off period if prevKey, ok := do.privKeyByDomainName[domainName]; ok { - err = crypto.RCSRGenerateRPCSignature(rcsr, prevKey) + err = crypto.SignAsOwner(prevKey, req) if err != nil { - return nil, fmt.Errorf("GenerateRCSR | RCSRGenerateRPCSignature | %w", err) + return nil, fmt.Errorf("GeneratePolCertSignRequest | RCSRGenerateRPCSignature | %w", err) } } // generate signature for RCSR, using the new pub key - err = crypto.RCSRCreateSignature(newPrivKeyPair, rcsr) + err = crypto.SignAsOwner(newPrivKeyPair, req) if err != nil { - return nil, fmt.Errorf("GenerateRCSR | RCSRCreateSignature | %w", err) + return nil, fmt.Errorf("GeneratePolCertSignRequest | RCSRCreateSignature | %w", err) } do.privKeyByDomainName[domainName] = newPrivKeyPair - return rcsr, nil + return req, nil } -// GeneratePSR: generate one psr for one specific domain. -func (do *DomainOwner) GeneratePSR(domainName string, policy common.PolicyAttributes) (*common.PSR, error) { +// RandomPolicyCertificate: generate one psr for one specific domain. +func (do *DomainOwner) RandomPolicyCertificate(domainName string, policy common.PolicyAttributes, +) (*common.PolicyCertificateSigningRequest, error) { + rpcKeyPair, ok := do.privKeyByDomainName[domainName] if !ok { - return nil, fmt.Errorf("GeneratePSR | No valid RPC for domain %s", domainName) + return nil, fmt.Errorf("RandomPolicyCertificate | No valid RPC for domain %s", domainName) } - psr := &common.PSR{ - PolicyIssuerBase: common.PolicyIssuerBase{ - RawSubject: domainName, - }, - Policy: policy, - TimeStamp: time.Now(), - } + polCertSignReq := common.NewPolicyCertificateSigningRequest( + 0, // version + "", // issuer + domainName, // subject + 0, // serial number + time.Now(), + time.Now().Add(time.Microsecond), // not after + false, // is issuer + nil, // public key + common.RSA, + common.SHA256, + time.Now(), // timestamp + []common.PolicyAttributes{policy}, // policy attributes + nil, // owner's signature + ) - err := crypto.DomainOwnerSignPSR(rpcKeyPair, psr) + err := crypto.SignAsOwner(rpcKeyPair, polCertSignReq) if err != nil { - return nil, fmt.Errorf("GeneratePSR | DomainOwnerSignPSR | %w", err) + return nil, fmt.Errorf("RandomPolicyCertificate | DomainOwnerSignPSR | %w", err) } - return psr, nil + return polCertSignReq, nil } // generate new rsa key pair diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index bf2cd024..01c18030 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -28,14 +28,14 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "m5Lwb1nDco/+mrAdAQnue4WIne67qRACok/ESYmCsZ8="), + RootHash: tests.MustDecodeBase64(t, "3mI5Az/2fISqNSrfUQuWZAkvFuP2ozS2ad4+hnZ1Eh4="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, } // Mock up a RPC. - rpc := random.RandomRPC(t) + rpc := random.RandomPolicyCertificate(t) // Serialize it without SPTs. serializedRPC, err := common.ToJSON(rpc) @@ -84,7 +84,7 @@ func TestCheckRPC(t *testing.T) { // Mock a STH with the right root hash. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "0ePUVBOu4WOgAo1pW+JMUCGUVUWaK/6C7JqLJt9XWk4="), + RootHash: tests.MustDecodeBase64(t, "sVt7R5j3fpNSgUfYMH6r9cfWx9N3Nq9UXaLEpa6/KBQ="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -103,13 +103,16 @@ func TestCheckRPC(t *testing.T) { require.NoError(t, err) // Mock a RPC. - rpc := random.RandomRPC(t) - rpc.SPTs = []common.SPT{ - { - AddedTS: util.TimeFromSecs(99), - STH: serializedSTH, - PoI: serializedPoI, - }, + rpc := random.RandomPolicyCertificate(t) + rpc.SPTs = []common.SignedPolicyCertificateTimestamp{ + *common.NewSignedPolicyCertificateTimestamp( + "", 0, "", nil, + 0, + util.TimeFromSecs(99), + serializedSTH, + serializedPoI, + 0, nil, + ), } // Check VerifyRPC. @@ -117,44 +120,3 @@ func TestCheckRPC(t *testing.T) { err = logverifier.VerifyRPC(rpc) require.NoError(t, err) } - -func TestCheckSP(t *testing.T) { - // Because we are using "random" bytes deterministically here, set a fixed seed. - rand.Seed(3) - - // Mock a STH with the right root hash. - sth := &types.LogRootV1{ - TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "SqfdrDwpR1nlUZ/MGvC0qKH48CYcAHRlBspg6l/G060="), - TimestampNanos: 1661986742112252000, - Revision: 0, - Metadata: []byte{}, - } - serializedSTH, err := common.ToJSON(sth) - require.NoError(t, err) - - // Mock a PoI. - poi := []*trillian.Proof{ - { - LeafIndex: 1, - Hashes: [][]byte{random.RandomBytesForTest(t, 32)}, - }, - } - serializedPoI, err := common.ToJSON(poi) - require.NoError(t, err) - - // Mock an SP. - sp := random.RandomSP(t) - sp.SPTs = []common.SPT{ - { - AddedTS: util.TimeFromSecs(444), - STH: serializedSTH, - PoI: serializedPoI, - }, - } - - // Check VerifySP works. - logverifier := NewLogVerifier(nil) - err = logverifier.VerifySP(sp) - require.NoError(t, err) -} diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index ffdcb326..49ac40ef 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -1,6 +1,7 @@ package logverifier import ( + "encoding/base64" "fmt" "github.com/google/trillian" @@ -101,39 +102,21 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", err) } - // deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, - // leafHash, proof.Hashes) - // if err != nil { - // panic(err) - // } - // fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) + deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + leafHash, proof.Hashes) + if err != nil { + panic(err) + } + fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) } // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") } -func (v *LogVerifier) VerifySP(sp *common.SP) error { - // Get the hash of the SP without SPTs: - SPTs := sp.SPTs - sp.SPTs = []common.SPT{} - serializedSP, err := common.ToJSON(sp) - if err != nil { - return fmt.Errorf("VerifySP | ToJSON | %w", err) - } - bytesHash := v.HashLeaf([]byte(serializedSP)) - // Restore the SPTs to the SP: - sp.SPTs = SPTs - - if err := v.verifySPTs(sp.SPTs, bytesHash); err != nil { - return fmt.Errorf("VerifySP | %w", err) - } - return nil -} - func (v *LogVerifier) VerifyRPC(rpc *common.PolicyCertificate) error { // Get the hash of the RPC without SPTs: SPTs := rpc.SPTs - rpc.SPTs = []common.SPT{} + rpc.SPTs = []common.SignedPolicyCertificateTimestamp{} serializedStruct, err := common.ToJSON(rpc) if err != nil { return fmt.Errorf("VerifyRPC | ToJSON | %w", err) @@ -148,7 +131,7 @@ func (v *LogVerifier) VerifyRPC(rpc *common.PolicyCertificate) error { return nil } -func (v *LogVerifier) verifySPTs(SPTs []common.SPT, dataHash []byte) error { +func (v *LogVerifier) verifySPTs(SPTs []common.SignedPolicyCertificateTimestamp, dataHash []byte) error { for _, p := range SPTs { // Load the STH from JSON. sthRaw, err := common.FromJSON(p.STH) diff --git a/pkg/mapserver/common/tools.go b/pkg/mapserver/common/tools.go index 8a097099..30e2fed5 100644 --- a/pkg/mapserver/common/tools.go +++ b/pkg/mapserver/common/tools.go @@ -77,37 +77,6 @@ func (domainEntry *DomainEntry) AddCert(cert *x509.Certificate, certChain []*x50 return false } -// AddPC: add a Policy Certificate to a domain entry. Return whether the domain entry is updated. -func (domainEntry *DomainEntry) AddPC(pc *common.SP) bool { - // caName := pc.CAName - // isFound := false - - // // iterate CAEntry list, find if the target CA list exists - // for i := range domainEntry.Entries { - // if domainEntry.Entries[i].CAName == caName { - // isFound = true - // // check whether this certificate is already registered - // if !domainEntry.Entries[i].PCs.Equal(*pc) { - // domainEntry.Entries[i].PCs = *pc - // return true - // } - // return false - // } - // } - - // // if CA list is not found - // if !isFound { - // // add a new CA list - // domainEntry.Entries = append(domainEntry.Entries, Entry{ - // CAName: caName, - // CAHash: common.SHA256Hash([]byte(caName)), - // PCs: *pc, - // }) - // return true - // } - return false -} - // AddRPC: add a Root Policy Certificate to a domain entry. Return whether the domain entry is updated. func (domainEntry *DomainEntry) AddRPC(rpc *common.PolicyCertificate) bool { // caName := rpc.CAName diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 59fa1206..5b13ed71 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -306,13 +306,18 @@ func (f *LogFetcher) getRawEntries( // GetPCAndRPCs: get PC and RPC from url // TODO(yongzhe): currently just generate random PC and RPC using top 1k domain names -func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker int) ([]*common.SP, []*common.PolicyCertificate, error) { - resultPCs := make([]*common.SP, 0) - resultRPCs := make([]*common.PolicyCertificate, 0) +func GetPCAndRPCs( + ctURL string, + startIndex int64, + endIndex int64, + numOfWorker int, +) ([]*common.PolicyCertificate, error) { + + resultPolCerts := make([]*common.PolicyCertificate, 0) f, err := os.Open(ctURL) if err != nil { - return nil, nil, fmt.Errorf("GetPCAndRPC | os.Open | %w", err) + return nil, fmt.Errorf("GetPCAndRPC | os.Open | %w", err) } defer f.Close() @@ -326,27 +331,29 @@ func GetPCAndRPCs(ctURL string, startIndex int64, endIndex int64, numOfWorker in continue } - resultPCs = append(resultPCs, common.NewSP( - domainName, - common.PolicyAttributes{}, - time.Now(), + resultPolCerts = append(resultPolCerts, common.NewPolicyCertificate( + 0, "", // CA name - 0, // serial number - generateRandomBytes(), - nil, // root cert signature - nil, // SPTs + domainName, + 0, // serial number + time.Now(), // not before + time.Now().Add(time.Microsecond), // not after + false, // is issuer + generateRandomBytes(), // public key + common.RSA, + common.SHA256, + time.Now(), // timestamp + nil, // policy attributes + nil, // owner signature + nil, // issuer signature + nil, // server timestamps )) - - rpc := &common.PolicyCertificate{} - rpc.RawSubject = domainName - rpc.NotBefore = time.Now() - resultRPCs = append(resultRPCs, rpc) } if err := scanner.Err(); err != nil { - return nil, nil, fmt.Errorf("GetPCAndRPC | scanner.Err | %w", err) + return nil, fmt.Errorf("GetPCAndRPC | scanner.Err | %w", err) } - return resultPCs, resultRPCs, nil + return resultPolCerts, nil } func generateRandomBytes() []byte { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index f40d8e51..2189bc42 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -95,19 +95,14 @@ func (mapUpdater *MapUpdater) UpdateCertsLocally(ctx context.Context, certList [ return UpdateWithKeepExisting(ctx, mapUpdater.dbConn, names, IDs, parentIDs, certs, expirations, nil) } -// UpdateRPCAndPC: update RPC and PC from url. Currently just mock PC and RPC -func (mapUpdater *MapUpdater) UpdateRPCAndPC(ctx context.Context, ctUrl string, startIdx, endIdx int64) error { +// UpdatePolicyCerts: update RPC and PC from url. Currently just mock PC and RPC +func (mapUpdater *MapUpdater) UpdatePolicyCerts(ctx context.Context, ctUrl string, startIdx, endIdx int64) error { // get PC and RPC first - pcList, rpcList, err := logfetcher.GetPCAndRPCs(ctUrl, startIdx, endIdx, 20) + rpcList, err := logfetcher.GetPCAndRPCs(ctUrl, startIdx, endIdx, 20) if err != nil { return fmt.Errorf("CollectCerts | GetPCAndRPC | %w", err) } - return mapUpdater.updateRPCAndPC(ctx, pcList, rpcList) -} - -// UpdateRPCAndPCLocally: update RPC and PC, given a rpc and sp. Currently just mock PC and RPC -func (mapUpdater *MapUpdater) UpdateRPCAndPCLocally(ctx context.Context, spList []*common.SP, rpcList []*common.PolicyCertificate) error { - return mapUpdater.updateRPCAndPC(ctx, spList, rpcList) + return mapUpdater.updatePolicyCerts(ctx, rpcList) } func (mapUpdater *MapUpdater) updateCerts( @@ -120,9 +115,8 @@ func (mapUpdater *MapUpdater) updateCerts( return nil } -func (mapUpdater *MapUpdater) updateRPCAndPC( +func (mapUpdater *MapUpdater) updatePolicyCerts( ctx context.Context, - sps []*common.SP, rpcs []*common.PolicyCertificate, ) error { diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 79450648..25f5a80c 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -5,7 +5,6 @@ import ( "encoding/hex" "fmt" "math/rand" - "os" "testing" "time" @@ -60,10 +59,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { } // Ingest two mock policies. - data, err := os.ReadFile("../../../tests/testdata/2-SPs.json") - require.NoError(t, err) - pols, err := util.LoadPoliciesFromRaw(data) - require.NoError(t, err) + pols := random.BuildTestRandomPolicyHierarchy(t, "a-domain-name.thing") // Update with certificates and policies. t0 := time.Now() diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index a7a86d8e..b9f998eb 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -29,16 +29,11 @@ type PCA struct { // pca's signing rsa key pair; used to sign rcsr -> rpc rsaKeyPair *rsa.PrivateKey - // store valid RPC (with SPT) in memory; Later replaced by data base - validRPCsByDomains map[string]*common.PolicyCertificate + // store valid Pol Cert (with server timestamps) in memory; Later replaced by data base + validPolCertsPerDomain map[string]*common.PolicyCertificate - validSPsByDomains map[string]*common.SP - - // RPC without SPT; pre-certificate - preRPCByDomains map[string]*common.PolicyCertificate - - // RPC without SPT; pre-certificate - preSPByDomains map[string]*common.SP + // Pol Cert without timestamps; pre-certificate + prePolCertsPerDomain map[string]*common.PolicyCertificate policyLogExgPath string @@ -65,24 +60,22 @@ func NewPCA(configPath string) (*PCA, error) { return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) } return &PCA{ - validRPCsByDomains: make(map[string]*common.PolicyCertificate), - validSPsByDomains: make(map[string]*common.SP), - preRPCByDomains: make(map[string]*common.PolicyCertificate), - preSPByDomains: make(map[string]*common.SP), - logVerifier: logverifier.NewLogVerifier(nil), - caName: config.CAName, - outputPath: config.OutputPath, - policyLogExgPath: config.PolicyLogExgPath, - rsaKeyPair: keyPair, + validPolCertsPerDomain: make(map[string]*common.PolicyCertificate), + prePolCertsPerDomain: make(map[string]*common.PolicyCertificate), + logVerifier: logverifier.NewLogVerifier(nil), + caName: config.CAName, + outputPath: config.OutputPath, + policyLogExgPath: config.PolicyLogExgPath, + rsaKeyPair: keyPair, }, nil } // ReceiveSPTFromPolicyLog: When policy log returns SPT, this func will be called // this func will read the SPTs from the file, and process them func (pca *PCA) ReceiveSPTFromPolicyLog() error { - for k, v := range pca.preRPCByDomains { + for domainName, v := range pca.prePolCertsPerDomain { // read the corresponding spt - spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + k) + spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + domainName) if err != nil { return fmt.Errorf("ReceiveSPTFromPolicyLog | JsonFileToSPT | %w", err) } @@ -90,62 +83,33 @@ func (pca *PCA) ReceiveSPTFromPolicyLog() error { // verify the PoI, STH err = pca.verifySPTWithRPC(spt, v) if err == nil { - log.Printf("Get a new SPT for domain RPC: %s\n", k) - v.SPTs = []common.SPT{*spt} + log.Printf("Get a new SPT for domain RPC: %s\n", domainName) + v.SPTs = []common.SignedPolicyCertificateTimestamp{*spt} // move the rpc from pre-rpc to valid-rpc - delete(pca.preRPCByDomains, k) - pca.validRPCsByDomains[v.RawSubject] = v + delete(pca.prePolCertsPerDomain, domainName) + pca.validPolCertsPerDomain[v.RawSubject] = v } else { return fmt.Errorf("Fail to verify one SPT RPC") } - os.Remove(pca.policyLogExgPath + "/spt/" + k) - } - - for k, v := range pca.preSPByDomains { - // read the corresponding spt - spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + k) - if err != nil { - return fmt.Errorf("ReceiveSPTFromPolicyLog | JsonFileToSPT | %w", err) - } - - // verify the PoI, STH - err = pca.verifySPTWithSP(spt, v) - if err == nil { - log.Printf("Get a new SPT for domain SP: %s\n", k) - v.SPTs = []common.SPT{*spt} - - // move the rpc from pre-rpc to valid-rpc - delete(pca.preRPCByDomains, k) - pca.validSPsByDomains[v.RawSubject] = v - } else { - return fmt.Errorf("Fail to verify one SPT SP") - } - os.Remove(pca.policyLogExgPath + "/spt/" + k) + os.Remove(pca.policyLogExgPath + "/spt/" + domainName) } return nil } -func (pca *PCA) OutputRPCAndSP() error { - for domain, rpc := range pca.validRPCsByDomains { +func (pca *PCA) OutputPolicyCertificate() error { + for domain, rpc := range pca.validPolCertsPerDomain { err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer+"_"+"rpc") if err != nil { - return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) - } - } - - for domain, rpc := range pca.validSPsByDomains { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer+"_"+"sp") - if err != nil { - return fmt.Errorf("OutputRPCAndSP | JsonStructToFile | %w", err) + return fmt.Errorf("OutputPolicyCertificate | JsonStructToFile | %w", err) } } return nil } // verify the SPT of the RPC. -func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.PolicyCertificate) error { +func (pca *PCA) verifySPTWithRPC(spt *common.SignedPolicyCertificateTimestamp, rpc *common.PolicyCertificate) error { proofs, logRoot, err := getProofsAndLogRoot(spt) if err != nil { return fmt.Errorf("verifySPTWithRPC | parsePoIAndSTH | %w", err) @@ -167,40 +131,17 @@ func (pca *PCA) verifySPTWithRPC(spt *common.SPT, rpc *common.PolicyCertificate) return nil } -// verify the SPT of the RPC. -func (pca *PCA) verifySPTWithSP(spt *common.SPT, sp *common.SP) error { - proofs, logRoot, err := getProofsAndLogRoot(spt) - if err != nil { - return fmt.Errorf("verifySPTWithSP | parsePoIAndSTH | %w", err) - } - - // get leaf hash - spBytes, err := common.ToJSON(sp) - if err != nil { - return fmt.Errorf("verifySPT | Json_StructToBytes | %w", err) - } - leafHash := pca.logVerifier.HashLeaf(spBytes) - - // verify the PoI - err = pca.logVerifier.VerifyInclusionByHash(logRoot, leafHash, proofs) - if err != nil { - return fmt.Errorf("verifySPT | VerifyInclusionByHash | %w", err) - } - - return nil -} - // TODO(yongzhe): modify this to make sure unique SN func (pca *PCA) increaseSerialNumber() { pca.serialNumber = pca.serialNumber + 1 } func (pca *PCA) ReturnValidRPC() map[string]*common.PolicyCertificate { - return pca.validRPCsByDomains + return pca.validPolCertsPerDomain } // getProofsAndLogRoot return the proofs and root parsed from the PoI and STH in JSON. -func getProofsAndLogRoot(spt *common.SPT) ([]*trillian.Proof, *types.LogRootV1, error) { +func getProofsAndLogRoot(spt *common.SignedPolicyCertificateTimestamp) ([]*trillian.Proof, *types.LogRootV1, error) { // Parse the PoI into []*trillian.Proof. serializedProofs, err := common.FromJSON(spt.PoI) if err != nil { diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go index 79a9a67a..c7dfb31e 100644 --- a/pkg/pca/sign_and_log.go +++ b/pkg/pca/sign_and_log.go @@ -4,24 +4,26 @@ import ( "encoding/base64" "fmt" "strconv" - "time" "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/common/crypto" ) // SignAndLogRCSR: sign the rcsr and generate a rpc -> store the rpc to the "fileExchange" folder; policy log will fetch rpc from the folder -func (pca *PCA) SignAndLogRCSR(rcsr *common.RCSR) error { +func (pca *PCA) SignAndLogRCSR(req *common.PolicyCertificateSigningRequest) error { // verify the signature in the rcsr; check if the domain's pub key is correct - err := crypto.RCSRVerifySignature(rcsr) + err := crypto.VerifyOwnerSignature(req) if err != nil { return fmt.Errorf("SignAndLogRCSR | RCSRVerifySignature | %w", err) } + // Set the issuer values from this CA. pca.increaseSerialNumber() + req.Issuer = pca.caName + req.RawSerialNumber = pca.serialNumber // generate pre-RPC (without SPT) - rpc, err := crypto.RCSRGenerateRPC(rcsr, time.Now(), pca.serialNumber, pca.rsaKeyPair, pca.caName) + rpc, err := crypto.SignAsIssuer(req, pca.rsaKeyPair) if err != nil { return fmt.Errorf("SignAndLogRCSR | RCSRGenerateRPC | %w", err) } @@ -32,7 +34,7 @@ func (pca *PCA) SignAndLogRCSR(rcsr *common.RCSR) error { } // add the rpc to preRPC(without SPT) - pca.preRPCByDomains[rpcHash] = rpc + pca.prePolCertsPerDomain[rpcHash] = rpc // send RPC to policy log err = pca.sendRPCToPolicyLog(rpc, strconv.Itoa(pca.serialNumber)) @@ -44,27 +46,27 @@ func (pca *PCA) SignAndLogRCSR(rcsr *common.RCSR) error { } // SignAndLogPSR: sign and log policy signing request -func (pca *PCA) SignAndLogSP(psr *common.PSR) error { - err := pca.findRPCAndVerifyPSR(psr) +func (pca *PCA) SignAndLogPolicyCertificate(req *common.PolicyCertificateSigningRequest) error { + err := pca.findRPCAndVerifyPSR(req) if err != nil { return fmt.Errorf("SignAndLogPSR | findRPCAndVerifyPSR | %w", err) } pca.increaseSerialNumber() - sp, err := crypto.CASignSP(psr, pca.rsaKeyPair, pca.caName, pca.serialNumber) + polCert, err := crypto.SignAsIssuer(req, pca.rsaKeyPair) if err != nil { return fmt.Errorf("SignAndLogPSR | CASignSP | %w", err) } - spHash, err := pca.getHashName(sp) + spHash, err := pca.getHashName(polCert) if err != nil { return fmt.Errorf("SignAndLogRCSR | getHashName | %w", err) } - pca.preSPByDomains[spHash] = sp + pca.prePolCertsPerDomain[spHash] = polCert - err = pca.sendSPToPolicyLog(sp, strconv.Itoa(sp.SerialNumber())) + err = pca.sendSPToPolicyLog(polCert, strconv.Itoa(polCert.SerialNumber())) if err != nil { return fmt.Errorf("SignAndLogPSR | sendSPToPolicyLog | %w", err) } @@ -72,13 +74,13 @@ func (pca *PCA) SignAndLogSP(psr *common.PSR) error { return nil } -func (pca *PCA) findRPCAndVerifyPSR(psr *common.PSR) error { - rpc, ok := pca.validRPCsByDomains[psr.Subject()] +func (pca *PCA) findRPCAndVerifyPSR(req *common.PolicyCertificateSigningRequest) error { + rpc, ok := pca.validPolCertsPerDomain[req.Subject()] if !ok { return fmt.Errorf("findRPCAndVerifyPSR | validRPCsByDomains | no valid rpc at this moment") } - err := crypto.VerifyPSRUsingRPC(psr, rpc) + err := crypto.VerifyOwnerSignatureWithPolCert(req, rpc) if err != nil { return fmt.Errorf("findRPCAndVerifyPSR | VerifyPSRUsingRPC | %w", err) } @@ -92,7 +94,7 @@ func (pca *PCA) sendRPCToPolicyLog(rpc *common.PolicyCertificate, fileName strin } // save file to output dir -func (pca *PCA) sendSPToPolicyLog(sp *common.SP, fileName string) error { +func (pca *PCA) sendSPToPolicyLog(sp *common.PolicyCertificate, fileName string) error { return common.ToJSONFile(sp, pca.policyLogExgPath+"/sp/"+fileName) } diff --git a/pkg/policylog/client/logclient.go b/pkg/policylog/client/logclient.go index 9d8e19d8..2d67b260 100644 --- a/pkg/policylog/client/logclient.go +++ b/pkg/policylog/client/logclient.go @@ -221,73 +221,6 @@ func (c *LogClient) QueueRPCs(ctx context.Context) (*QueueRPCResult, error) { return queueRPCResult, nil } -func (c *LogClient) QueueSPs(ctx context.Context) (*QueueRPCResult, error) { - queueRPCResult := &QueueRPCResult{} - - // read RPC from files - data, err := c.readSPFromFileToBytes() - if err != nil { - return nil, fmt.Errorf("QueueSPs | readRPCFromFileToBytes: %w", err) - } - - leafNum := len(data) - - start := time.Now() - - // add leaves - addLeavesErrors := c.AddLeaves(ctx, data) - - // process the errors from AddLeaves() - queueRPCResult.NumOfSucceedAddedLeaves = leafNum - len(addLeavesErrors.Errs) - queueRPCResult.FailToAddLeaves = addLeavesErrors.FailedLeaves - - // calculate time - elapsed := time.Since(start) - fmt.Println("queue leaves succeed!") - fmt.Println(elapsed) - - // record previous tree size - prevTreeSize := c.currentTreeSize - - // wait for the leaves to be added to the log (BUG FOUND!!!!!!) - for { - err = c.UpdateTreeSize(ctx) - if err != nil { - return queueRPCResult, fmt.Errorf("QueueSPs | UpdateTreeSize: %w", err) - } - if c.currentTreeSize == prevTreeSize+int64(queueRPCResult.NumOfSucceedAddedLeaves) { - break - } - // wait 50 ms before next query - time.Sleep(50 * time.Millisecond) - } - - start = time.Now() - - // fetch the inclusion - fetchInclusionResult := c.FetchInclusions(ctx, data) - - // precess fetch inclusion errors - queueRPCResult.NumOfRetrievedLeaves = len(fetchInclusionResult.PoIs) - queueRPCResult.FailToRetrievedLeaves = fetchInclusionResult.FailedLeaves - queueRPCResult.FailToRetrieveLeavesName = fetchInclusionResult.FailedLeavesName - queueRPCResult.RetrieveLeavesErrs = fetchInclusionResult.Errs - - elapsed = time.Since(start) - fmt.Println("fetch proofs succeed!") - fmt.Println(elapsed) - - // queueRPCResult will always be returned, even if error occurs in the future - - // store proof to SPT file - err = c.storeProofMapToSPT(fetchInclusionResult.PoIs) - if err != nil { - return queueRPCResult, fmt.Errorf("QueueSPs | storeProofMapToSPT: %w", err) - } - - return queueRPCResult, nil -} - // file -> RPC -> bytes func (c *LogClient) readRPCFromFileToBytes() ([][]byte, error) { data := [][]byte{} @@ -302,7 +235,7 @@ func (c *LogClient) readRPCFromFileToBytes() ([][]byte, error) { filaPath := c.config.PolicyLogExchangePath + "/rpc/" + filaName.Name() // read RPC from file - rpc, err := common.JsonFileToRPC(filaPath) + rpc, err := common.JsonFileToPolicyCert(filaPath) if err != nil { return nil, fmt.Errorf("readRPCFromFileToBytes | JsonFileToRPC %w", err) } @@ -321,39 +254,6 @@ func (c *LogClient) readRPCFromFileToBytes() ([][]byte, error) { return data, nil } -// file -> RPC -> bytes -func (c *LogClient) readSPFromFileToBytes() ([][]byte, error) { - data := [][]byte{} - - fileNames, err := ioutil.ReadDir(c.config.PolicyLogExchangePath + "/sp") - if err != nil { - return nil, fmt.Errorf("readSPFromFileToBytes | ReadDir | %w", err) - } - - // read SPT from "fileTransfer" folder - for _, filaName := range fileNames { - filePath := c.config.PolicyLogExchangePath + "/sp/" + filaName.Name() - - // read RPC from file - sp, err := common.JsonFileToSP(filePath) - if err != nil { - return nil, fmt.Errorf("readSPFromFileToBytes | JsonFileToRPC %w", err) - } - - // serialize sp - bytes, err := common.ToJSON(sp) - if err != nil { - return nil, fmt.Errorf("readSPFromFileToBytes | ToJSON: %w", err) - } - - data = append(data, bytes) - - // delete rpc - os.Remove(filePath) - } - return data, nil -} - // read elements in the proof map, and turn it into a SPT, then store them func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { // for every proof in the map @@ -372,13 +272,14 @@ func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { // attach PoI and STH to SPT // TODO(yongzhe): fill in the other fields - spt := &common.SPT{ - PoI: proofBytes, - STH: sth, - } + serverTimestamp := common.NewSignedPolicyCertificateTimestamp( + "", 0, "", nil, 0, time.Time{}, + sth, proofBytes, + 0, nil, + ) // store SPT to file - err = common.ToJSONFile(spt, c.config.PolicyLogExchangePath+"/spt/"+k) + err = common.ToJSONFile(serverTimestamp, c.config.PolicyLogExchangePath+"/spt/"+k) if err != nil { return fmt.Errorf("storeProofMapToSPT | JsonStructToFile: %w", err) } diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index c2c31157..111c7f86 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -33,32 +33,21 @@ func RandomX509Cert(t tests.T, domain string) *ctx509.Certificate { } } +// BuildTestRandomPolicyHierarchy creates two policy certificates for the given name. func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.PolicyDocument { - // Create one RPC and one SP for that name. - rpc := RandomRPC(t) - rpc.RawSubject = domainName - rpc.Issuer = "c0.com" - - data, err := common.ToJSON(rpc) - require.NoError(t, err) - rpc.RawJSON = data - - sp := common.NewSP( - domainName, - common.PolicyAttributes{}, - RandomTimeWithoutMonotonic(), - "c0.com", - 0, // serial number - RandomBytesForTest(t, 100), // CA signature - RandomBytesForTest(t, 100), // root cert signature - nil, // SPTs - ) - - data, err = common.ToJSON(sp) - require.NoError(t, err) - sp.RawJSON = data - - return []common.PolicyDocument{rpc, sp} + // Create two policy certificates for that name. + docs := make([]common.PolicyDocument, 2) + for i := range docs { + pc := RandomPolicyCertificate(t) + pc.RawSubject = domainName + pc.Issuer = "c0.com" + + data, err := common.ToJSON(pc) + require.NoError(t, err) + pc.RawJSON = data + docs[i] = pc + } + return docs } // BuildTestRandomCertHierarchy returns the certificates, chains, and names for two mock certificate @@ -110,12 +99,12 @@ func RandomTimeWithoutMonotonic() time.Time { ) } -func RandomSPT(t tests.T) *common.SPT { - return common.NewSPT( +func RandomSignedPolicyCertificateTimestamp(t tests.T) *common.SignedPolicyCertificateTimestamp { + return common.NewSignedPolicyCertificateTimestamp( "spt subject", - rand.Intn(10), + rand.Intn(10), // version "Issuer", - rand.Intn(100000), // 0-99,999 + RandomBytesForTest(t, 10), // log id 0x21, RandomTimeWithoutMonotonic(), RandomBytesForTest(t, 32), @@ -125,69 +114,43 @@ func RandomSPT(t tests.T) *common.SPT { ) } -func RandomRPC(t tests.T) *common.PolicyCertificate { - return common.NewPolicyCertificate( - "RPC subject", - nil, // policy attributes (empty for now) - rand.Intn(10), +func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest { + return common.NewPolicyCertificateSigningRequest( rand.Intn(10), - common.RSA, - RandomBytesForTest(t, 32), + "Issuer", + "RPC subject", + rand.Intn(1000), // serial number RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), - "Issuer", + true, + RandomBytesForTest(t, 32), + common.RSA, common.SHA256, RandomTimeWithoutMonotonic(), + nil, // policy attributes (empty for now) RandomBytesForTest(t, 32), - RandomBytesForTest(t, 32), - []common.SPT{*RandomSPT(t), *RandomSPT(t)}, ) } -func RandomSPRT(t tests.T) *common.SPRT { - return common.NewSPRT(RandomSPT(t), rand.Intn(1000)) -} - -func RandomSP(t tests.T) *common.SP { - return common.NewSP( - "domainname.com", - common.PolicyAttributes{ - TrustedCA: []string{"ca1", "ca2"}, - }, +func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { + return common.NewPolicyCertificate( + rand.Intn(10), + "Issuer", + "RPC subject", + rand.Intn(1000), // serial number RandomTimeWithoutMonotonic(), - "ca1", - rand.Int(), - RandomBytesForTest(t, 32), - RandomBytesForTest(t, 32), - []common.SPT{ - *RandomSPT(t), - *RandomSPT(t), - *RandomSPT(t), - }, - ) -} - -func RandomPSR(t tests.T) *common.PSR { - return common.NewPSR( - "domain_name.com", - common.PolicyAttributes{ - TrustedCA: []string{"one CA", "another CA"}, - AllowedSubdomains: []string{"sub1.com", "sub2.com"}, - }, RandomTimeWithoutMonotonic(), + true, RandomBytesForTest(t, 32), - ) -} - -func RandomRCSR(t tests.T) *common.RCSR { - return common.NewRCSR( - "subject", - 6789, - RandomTimeWithoutMonotonic(), common.RSA, - RandomBytesForTest(t, 32), common.SHA256, + RandomTimeWithoutMonotonic(), + nil, // policy attributes (empty for now) RandomBytesForTest(t, 32), RandomBytesForTest(t, 32), + []common.SignedPolicyCertificateTimestamp{ + *RandomSignedPolicyCertificateTimestamp(t), + *RandomSignedPolicyCertificateTimestamp(t), + }, ) } diff --git a/tests/integration/domainowner_pca_policlog_interaction/main.go b/tests/integration/domainowner_pca_policlog_interaction/main.go index 80f01910..6e1d3510 100644 --- a/tests/integration/domainowner_pca_policlog_interaction/main.go +++ b/tests/integration/domainowner_pca_policlog_interaction/main.go @@ -31,12 +31,12 @@ func main() { } // first rcsr - rcsr, err := do.GenerateRCSR("abc.com", 1) + rcsr, err := do.GeneratePolCertSignRequest("abc.com", 1) if err != nil { logErrAndQuit(err) } - if len(rcsr.PRCSignature) != 0 { + if len(rcsr.OwnerSignature) != 0 { panic("first rcsr error: should not have RPCSignature") } @@ -47,7 +47,7 @@ func main() { } // second rcsr - rcsr, err = do.GenerateRCSR("fpki.com", 1) + rcsr, err = do.GeneratePolCertSignRequest("fpki.com", 1) if err != nil { logErrAndQuit(err) } @@ -135,27 +135,27 @@ func main() { TrustedCA: []string{"US CA"}, } - psr1, err := do.GeneratePSR("abc.com", policy1) + pcsr1, err := do.RandomPolicyCertificate("abc.com", policy1) if err != nil { logErrAndQuit(err) } - psr2, err := do.GeneratePSR("fpki.com", policy2) + pcsr2, err := do.RandomPolicyCertificate("fpki.com", policy2) if err != nil { logErrAndQuit(err) } - err = pca.SignAndLogSP(psr1) + err = pca.SignAndLogPolicyCertificate(pcsr1) if err != nil { logErrAndQuit(err) } - err = pca.SignAndLogSP(psr2) + err = pca.SignAndLogPolicyCertificate(pcsr2) if err != nil { logErrAndQuit(err) } - logClient.QueueSPs(ctx) + // logClient.QueueSPs(ctx) err = pca.ReceiveSPTFromPolicyLog() if err != nil { @@ -166,7 +166,7 @@ func main() { logErrAndQuit(fmt.Errorf("queue error SP")) } - err = pca.OutputRPCAndSP() + err = pca.OutputPolicyCertificate() if err != nil { logErrAndQuit(err) } diff --git a/tests/testdata/2-SPs.json b/tests/testdata/2-SPs.json deleted file mode 100644 index 28e92cb7..00000000 --- a/tests/testdata/2-SPs.json +++ /dev/null @@ -1 +0,0 @@ -{"T":"[]","O":[{"T":"*sp","O":{"Policies":{"TrustedCA":["ca1","ca2"]},"TimeStamp":"2023-05-02T09:40:17+02:00","Subject":"domainname.com","CAName":"ca1","SerialNumber":4037200794235010051,"CASignature":"nmEjctgU98hQyR43KW84EsGS30tJSD4gwOlvmh/8l943ZIFArLyhhw==","RootCertSignature":"kHen0bcIYaGxHe+wTZYrUGtudXgZMsefapRNC55RG+lVG4iIWgvnig==","SPTs":[{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"cIuAGWgQp6PEwoRXPc+yMOMX7CrGd7zCx4pYLDdcKrc2yIGDU8168w==","PoI":"+NpYjba39SgiQah1fyw8A1+0eLZrc6WqxP/9E7dFJhhhnRoJ/QtBCw==","STHSerialNumber":114378,"Signature":"e2pHQiXrj4X9wng4zpFn7mP9S9TfxSPMJVTUUd62WFWIxbGk0bvv1g=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"vNVMm/9KJcm2vbKPMZ6iUKiocW5nqPmLflxe5YqFGjpD2rB4KBSPOQ==","PoI":"jiKzbHeuR7WYZ5Vyzm9tmbL2gFlWCHzGERQg1VNxvDh5U8ga8EhJZw==","STHSerialNumber":114378,"Signature":"HzzW+0iCmiSXiHxOXzNN8l/KNJiwwMyhI8ErSU4KSUEdZFjUUngcDQ=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"aknFL07ew8EzJSpuG4T7sL/giNMpNx7qgf9jhEtz0DO5dkM/ZPu2rg==","PoI":"QNWytTKo5svQZ68N2/vOfMUpjOGDTWtMi3FWh5bD0TyN+rKdMfdbdA==","STHSerialNumber":114378,"Signature":"TokIBlevDHPLEB/glTfYw8Z0WjFP0aufNSiCG1Fev3WqHXioEOLCFQ=="}]}},{"T":"*sp","O":{"Policies":{"TrustedCA":["ca1","ca2"]},"TimeStamp":"2023-05-02T09:40:17+02:00","Subject":"domainname.com","CAName":"ca1","SerialNumber":3916589616287113937,"CASignature":"h83Ti3iPNpj8UTxhXSaURxVu1Vb3vqRe14HIoXMxchN1ZxlpwmvwSg==","RootCertSignature":"pjIDTWUsp/0uuA/vvR+A5lG1Mxpf9xsWc+5fsqf0LTm6MGxw0fWuag==","SPTs":[{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"LsUotCYOYbfxJSlM9aUxLfK6+1hWog+HGO/pmBXVq9lxGdEFcNToZQ==","PoI":"no2sa1DssKi9Gn/rO17XUR96br6DJDlqXD3lK9xx8/AAkY6s4k3i+g==","STHSerialNumber":114378,"Signature":"NBsavvZ/7fD6wlhDat56bixzNbTtG0dnAxDN7VN96Y99QL5lLlV9Iw=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"8z3dXu5RL+2lgnC1lVXg4foT+2Ygu2TuLE1Si9L+bSxLFHmD1FSReg==","PoI":"FORkqLNl1amu6VftWf+UsscNVP8SbsxE9uVlohqY22v2Nx2dc3Nkng==","STHSerialNumber":114378,"Signature":"BAwmH8ycc8B+ux98RfsbCrAeBDFxzi5sN/pZlWxDHzJh1CF3GzXobg=="},{"Version":12368713,"Subject":"hohohoho","CAName":"I'm malicious CA, nice to meet you","LogID":1324123,"CertType":33,"AddedTS":"2023-05-02T09:40:17+02:00","STH":"ZWmjxlaCBgJv4gbsTZh1Jkt22dQogRZMUelL1oMhdyXToWkxKZpaSQ==","PoI":"Hxv/e9YzDmVtgerOpdq5j0xoHpMnYYi6cwVd0u7V83Ku2RDrobj2Vg==","STHSerialNumber":114378,"Signature":"gelEAAIa7l/pyPoJApVCPULr91KhAYz04vb1PkfGK+mVrZXvamKe2g=="}]}}]} \ No newline at end of file From b0669e5b397e065eb7340724601574cf9bf28ae1 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:16:16 +0200 Subject: [PATCH 168/187] New SPTs, new PCA workflow. PolicyCertificate contains now the ID of the key used to sign, both for owner and issuer signature. Heavily modified PCA to use SPTs with signature instead of proof of presence. Logverier cannot verify now Policy certificates. --- pkg/common/crypto/crypto.go | 66 +++- pkg/common/crypto/crypto_test.go | 90 +++-- pkg/common/embedded_policies.go | 73 +--- pkg/common/policies.go | 48 ++- pkg/common/policy_common.go | 10 +- pkg/common/policy_issuance.go | 2 + pkg/domainowner/domainowner.go | 18 +- pkg/logverifier/logverifier_test.go | 62 +--- pkg/logverifier/verifier.go | 55 --- pkg/mapserver/logfetcher/logfetcher.go | 2 + pkg/pca/config.go | 17 +- pkg/pca/pca.go | 339 +++++++++++------- pkg/pca/pca_test.go | 241 ++++++++++++- pkg/pca/sign_and_log.go | 113 ------ pkg/pca/testdata/pca_config.json | 19 +- pkg/pca/testdata/rpc.json | 1 + pkg/policylog/client/logclient.go | 49 --- pkg/tests/random/random.go | 28 +- pkg/util/pem.go | 16 + .../main.go | 8 +- .../crypto => tests}/testdata/clientcert.pem | 0 .../crypto => tests}/testdata/clientkey.pem | 0 .../crypto => tests}/testdata/servercert.pem | 0 .../crypto => tests}/testdata/serverkey.pem | 0 24 files changed, 702 insertions(+), 555 deletions(-) delete mode 100644 pkg/pca/sign_and_log.go create mode 100644 pkg/pca/testdata/rpc.json rename {pkg/common/crypto => tests}/testdata/clientcert.pem (100%) rename {pkg/common/crypto => tests}/testdata/clientkey.pem (100%) rename {pkg/common/crypto => tests}/testdata/servercert.pem (100%) rename {pkg/common/crypto => tests}/testdata/serverkey.pem (100%) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 7ac0a8eb..e98b2d5e 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -24,21 +24,36 @@ func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { // SignAsOwner generates a signature using the owner's key, and fills the owner signature in // the policy certificate signing request. -func SignAsOwner(domainOwnerPrivKey *rsa.PrivateKey, req *common.PolicyCertificateSigningRequest) error { - // clear signature; normally should be empty - req.OwnerSignature = []byte{} +func SignAsOwner(ownerKey *rsa.PrivateKey, req *common.PolicyCertificateSigningRequest) error { + // Clear owner signature (it's normally empty). + req.OwnerSignature = nil - signature, err := signStructRSASHA256(req, domainOwnerPrivKey) + // Identify the public key of the signer with its hash. + // In CT, the hash of the public key is calculated over the DER-encoded + // SubjectPublicKeyInfo object + // From the MarshalPKIXPublicKey go docs: + // MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. + // The encoded public key is a SubjectPublicKeyInfo structure + // (see RFC 5280, Section 4.1). + pubKeyBytes, err := ctx509.MarshalPKIXPublicKey(&ownerKey.PublicKey) + if err != nil { + return err + } + req.OwnerPubKeyHash = common.SHA256Hash(pubKeyBytes) + + // Sign using the owner's private key and including the hash of its public key. + req.OwnerSignature, err = signStructRSASHA256(req, ownerKey) if err != nil { return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) } - req.OwnerSignature = signature return nil } // VerifyOwnerSignature verifies the owner's signature using the public key. -func VerifyOwnerSignature(req *common.PolicyCertificateSigningRequest) error { +func VerifyOwnerSignature(req *common.PolicyCertificateSigningRequest, + pubKey *rsa.PublicKey) error { + // Serialize without signature: sig := req.OwnerSignature req.OwnerSignature = nil @@ -48,12 +63,6 @@ func VerifyOwnerSignature(req *common.PolicyCertificateSigningRequest) error { } req.OwnerSignature = sig - // Get the pub key: - pubKey, err := util.PEMToRSAPublic(req.PublicKey) - if err != nil { - return fmt.Errorf("RCSRVerifySignature | PemBytesToRsaPublicKey | %w", err) - } - hashOutput := sha256.Sum256(serializedStruct) err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) if err != nil { @@ -74,13 +83,13 @@ func VerifyOwnerSignatureWithPolCert(req *common.PolicyCertificateSigningRequest } req.OwnerSignature = sig - pubKey, err := util.PEMToRSAPublic(polCert.PublicKey) + pubKey, err := util.DERBytesToRSAPublic(polCert.PublicKey) if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | PemBytesToRsaPublicKey | %w", err) + return err } - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, + common.SHA256Hash(serializedStruct), req.OwnerSignature) if err != nil { return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) } @@ -88,11 +97,12 @@ func VerifyOwnerSignatureWithPolCert(req *common.PolicyCertificateSigningRequest return nil } -// SignAsIssuer is called by the Policy CA. It signs the request and generates a +// SignRequestAsIssuer is called by the Policy CA. It signs the request and generates a // PolicyCertificate. The SPTs field is (should be) empty. -func SignAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *rsa.PrivateKey, +func SignRequestAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *rsa.PrivateKey, ) (*common.PolicyCertificate, error) { + // Create a certificate policy inheriting all values from the request. cert := common.NewPolicyCertificate( req.Version, req.Issuer, @@ -107,19 +117,37 @@ func SignAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *rsa.Priv req.TimeStamp, req.PolicyAttributes, req.OwnerSignature, + req.OwnerPubKeyHash, nil, // issuer signature + nil, // issuer pub key hash nil, // SPTs ) + // Sign the policy certificate. signature, err := signStructRSASHA256(cert, privKey) if err != nil { return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) } - cert.IssuerSignature = signature + return cert, nil } +// SignPolicyCertificateAsIssuer is called by PCAs after they have received the SPTs from the +// CT log servers. The SPTs are embedded in the policy certificate passed to this function, and +// the PCA uses its key to create a signature. The policy certificate is passes with an empty +// IssuerSignature (this function does not remove IssuerSignature if it's set). +func SignPolicyCertificateAsIssuer(pc *common.PolicyCertificate, privKey *rsa.PrivateKey, +) (*common.PolicyCertificate, error) { + + signature, err := signStructRSASHA256(pc, privKey) + if err != nil { + return nil, err + } + pc.IssuerSignature = signature + return pc, nil +} + // VerifyIssuerSignature: used by domain owner, check whether CA signature is correct func VerifyIssuerSignature(caCert *ctx509.Certificate, rpc *common.PolicyCertificate) error { pubKey := caCert.PublicKey.(*rsa.PublicKey) diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 76507a38..f486fbc6 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -1,66 +1,83 @@ package crypto_test import ( + libcrypto "crypto" + "crypto/rsa" "testing" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" ) -// TestSignatureOfRCSR: Generate RCSR -> generate signature for RCSR -> verify signature -func TestSignatureOfRCSR(t *testing.T) { - privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") +func TestSignatureOfPolicyCertSignRequest(t *testing.T) { + ownerPriv, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") - pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) - require.NoError(t, err, "RSA key to bytes error") - test := random.RandomPolCertSignRequest(t) - test.PublicKey = pubKeyBytes + request := random.RandomPolCertSignRequest(t) + request.IsIssuer = true - err = crypto.SignAsOwner(privKey, test) + // Sign as owner. + err = crypto.SignAsOwner(ownerPriv, request) require.NoError(t, err, "RCSR sign signature error") - err = crypto.VerifyOwnerSignature(test) + // Serialize the request (w/out signature) to bytes to later check its hash value. + sig := request.OwnerSignature + request.OwnerSignature = nil + serializedRequest, err := common.ToJSON(request) + require.NoError(t, err) + request.OwnerSignature = sig + + // Check that the signature corresponds to the owner's key. + err = rsa.VerifyPKCS1v15(&ownerPriv.PublicKey, libcrypto.SHA256, + common.SHA256Hash(serializedRequest), request.OwnerSignature) + require.NoError(t, err) + + // Check that we have the hash of the public key of the owner's key. + // The bytes of the public key have to be obtained via a call to ctx509.MarshalPKIXPublicKey + pubKeyBytes, err := ctx509.MarshalPKIXPublicKey(&ownerPriv.PublicKey) + require.NoError(t, err) + require.Equal(t, common.SHA256Hash(pubKeyBytes), request.OwnerPubKeyHash) + + // Also check that our VerifyOwnerSignature works as expected. + err = crypto.VerifyOwnerSignature(request, &ownerPriv.PublicKey) require.NoError(t, err, "RCSR verify signature error") } // TestIssuanceOfRPC: check if the CA signature is correct -func TestIssuanceOfRPC(t *testing.T) { +func TestSignAsIssuer(t *testing.T) { + // Load crypto material for owner and issuer. + ownerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") + require.NoError(t, err) + issuerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/serverkey.pem") + require.NoError(t, err) + issuerCert, err := util.CertificateFromPEMFile("../../../tests/testdata/servercert.pem") + require.NoError(t, err, "X509 Cert From File error") + // Phase 1: domain owner generates a policy certificate signing request. - privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") - require.NoError(t, err, "Load RSA Key Pair From File error") - pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) - require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") req := random.RandomPolCertSignRequest(t) - req.PublicKey = pubKeyBytes // generate signature for request - err = crypto.SignAsOwner(privKey, req) - require.NoError(t, err, "RCSR Create Signature error") + err = crypto.SignAsOwner(ownerKey, req) + require.NoError(t, err) // Phase 2: pca issues policy certificate. - err = crypto.VerifyOwnerSignature(req) - // Validate the signature in rcsr + // we can validate the signature in the request, but in this test we know it's correct. + err = crypto.VerifyOwnerSignature(req, &ownerKey.PublicKey) require.NoError(t, err, "RCSR Verify Signature error") - - pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") - require.NoError(t, err) - rpc, err := crypto.SignAsIssuer(req, pcaPrivKey) + // Sign as issuer. + polCert, err := crypto.SignRequestAsIssuer(req, issuerKey) require.NoError(t, err, "RCSR Generate RPC error") - - assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") + assert.Equal(t, len(polCert.SPTs), 0, "SPTs must be empty right after first issuer signature") // ------------------------------------- // phase 3: domain owner check rpc // ------------------------------------- - - caCert, err := util.CertificateFromPEMFile("./testdata/servercert.pem") - require.NoError(t, err, "X509 Cert From File error") - - err = crypto.VerifyIssuerSignature(caCert, rpc) + err = crypto.VerifyIssuerSignature(issuerCert, polCert) require.NoError(t, err, "RPC Verify CA Signature error") } @@ -69,11 +86,14 @@ func TestIssuanceOfSP(t *testing.T) { // ------------------------------------- // phase 1: domain owner generate rcsr // ------------------------------------- - privKey, err := util.RSAKeyFromPEMFile("./testdata/clientkey.pem") + privKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") require.NoError(t, err, "Load RSA Key Pair From File error") - pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) + // pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) + // require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") + pubKeyBytes, err := util.RSAPublicToDERBytes(&privKey.PublicKey) require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") + req := random.RandomPolCertSignRequest(t) req.PublicKey = pubKeyBytes @@ -85,12 +105,12 @@ func TestIssuanceOfSP(t *testing.T) { // phase 2: pca issue rpc // ------------------------------------- // validate the signature in rcsr - err = crypto.VerifyOwnerSignature(req) + err = crypto.VerifyOwnerSignature(req, &privKey.PublicKey) require.NoError(t, err, "RCSR Verify Signature error") - pcaPrivKey, err := util.RSAKeyFromPEMFile("./testdata/serverkey.pem") + pcaPrivKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/serverkey.pem") require.NoError(t, err) - rpc, err := crypto.SignAsIssuer(req, pcaPrivKey) + rpc, err := crypto.SignRequestAsIssuer(req, pcaPrivKey) require.NoError(t, err, "RCSR Generate RPC error") assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index d70e3e53..786f60da 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -13,126 +13,89 @@ func (p EmbeddedPolicyBase) Equal(x EmbeddedPolicyBase) bool { return p.PolicyPartBase.Equal(x.PolicyPartBase) } -// . SignedThingTimestamp is common to all timestamps returned by a policy log server. -type SignedThingTimestamp struct { +// SignedEntryTimestamp is common to all timestamps returned by a policy log server. +type SignedEntryTimestamp struct { EmbeddedPolicyBase - LogID []byte `json:",omitempty"` - CertType uint8 `json:",omitempty"` - AddedTS time.Time `json:",omitempty"` - STH []byte `json:",omitempty"` - PoI []byte `json:",omitempty"` - STHSerialNumber int `json:",omitempty"` - Signature []byte `json:",omitempty"` + LogID []byte `json:",omitempty"` // SHA256 of public key of CT log server. + AddedTS time.Time `json:",omitempty"` // When it was added to the CT log server. + Signature []byte `json:",omitempty"` // Using public key of CT log server. } // SignedPolicyCertificateTimestamp is a signed policy certificate timestamp. type SignedPolicyCertificateTimestamp struct { - SignedThingTimestamp + SignedEntryTimestamp } // SignedPolicyCertificateRevocationTimestamp is a signed policy certificate revocation timestamp. type SignedPolicyCertificateRevocationTimestamp struct { - SignedThingTimestamp + SignedEntryTimestamp Reason int `json:",omitempty"` } -func NewSignedThingTimestamp( - subject string, +func NewSignedEntryTimestamp( version int, issuer string, logID []byte, - certType uint8, addedTS time.Time, - sTH []byte, - poI []byte, - sTHSerialNumber int, signature []byte, -) *SignedThingTimestamp { +) *SignedEntryTimestamp { - return &SignedThingTimestamp{ + return &SignedEntryTimestamp{ EmbeddedPolicyBase: EmbeddedPolicyBase{ PolicyPartBase: PolicyPartBase{ Version: version, Issuer: issuer, }, }, - LogID: logID, - CertType: certType, - AddedTS: addedTS, - STH: sTH, - PoI: poI, - STHSerialNumber: sTHSerialNumber, - Signature: signature, + LogID: logID, + AddedTS: addedTS, + Signature: signature, } } -func (s SignedThingTimestamp) Equal(x SignedThingTimestamp) bool { +func (s SignedEntryTimestamp) Equal(x SignedEntryTimestamp) bool { return s.EmbeddedPolicyBase.Equal(x.EmbeddedPolicyBase) && bytes.Equal(s.LogID, x.LogID) && - s.CertType == x.CertType && s.AddedTS.Equal(x.AddedTS) && - bytes.Equal(s.STH, x.STH) && - bytes.Equal(s.PoI, x.PoI) && - s.STHSerialNumber == x.STHSerialNumber && bytes.Equal(s.Signature, x.Signature) } func NewSignedPolicyCertificateTimestamp( - subject string, version int, issuer string, logID []byte, - certType uint8, addedTS time.Time, - sTH []byte, - poI []byte, - sTHSerialNumber int, signature []byte, ) *SignedPolicyCertificateTimestamp { return &SignedPolicyCertificateTimestamp{ - SignedThingTimestamp: *NewSignedThingTimestamp( - subject, + SignedEntryTimestamp: *NewSignedEntryTimestamp( version, issuer, logID, - certType, addedTS, - sTH, - poI, - sTHSerialNumber, signature, ), } } func (t SignedPolicyCertificateTimestamp) Equal(x SignedPolicyCertificateTimestamp) bool { - return t.SignedThingTimestamp.Equal(x.SignedThingTimestamp) + return t.SignedEntryTimestamp.Equal(x.SignedEntryTimestamp) } func NewSignedPolicyCertificateRevocationTimestamp( - subject string, version int, issuer string, logID []byte, - certType uint8, addedTS time.Time, - sTH []byte, - poI []byte, - sTHSerialNumber int, signature []byte, reason int, ) *SignedPolicyCertificateRevocationTimestamp { return &SignedPolicyCertificateRevocationTimestamp{ - SignedThingTimestamp: *NewSignedThingTimestamp( - subject, + SignedEntryTimestamp: *NewSignedEntryTimestamp( version, issuer, logID, - certType, addedTS, - sTH, - poI, - sTHSerialNumber, signature, ), Reason: reason, @@ -140,6 +103,6 @@ func NewSignedPolicyCertificateRevocationTimestamp( } func (t SignedPolicyCertificateRevocationTimestamp) Equal(x SignedPolicyCertificateRevocationTimestamp) bool { - return t.SignedThingTimestamp.Equal(x.SignedThingTimestamp) && + return t.SignedEntryTimestamp.Equal(x.SignedEntryTimestamp) && t.Reason == x.Reason } diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 2ae7bfaa..eb0f5e52 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -32,19 +32,21 @@ type PolicyCertificateFields struct { NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` IsIssuer bool `json:",omitempty"` - PublicKey []byte `json:",omitempty"` // In PEM format + PublicKey []byte `json:",omitempty"` // DER-encoded SubjectPublicKeyInfo PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` PolicyAttributes []PolicyAttributes `json:",omitempty"` OwnerSignature []byte `json:",omitempty"` + OwnerPubKeyHash []byte `json:",omitempty"` // SHA256 of owner's public key } // PolicyCertificate is a Root Policy Certificate. type PolicyCertificate struct { PolicyCertificateFields - IssuerSignature []byte `json:",omitempty"` - SPTs []SignedPolicyCertificateTimestamp `json:",omitempty"` + IssuerSignature []byte `json:",omitempty"` + IssuerPubKeyHash []byte `json:",omitempty"` + SPTs []SignedPolicyCertificateTimestamp `json:",omitempty"` } // PolicyAttributes is a domain policy that specifies what is or not acceptable for a domain. @@ -55,14 +57,16 @@ type PolicyAttributes struct { type PolicyCertificateRevocationFields struct { PolicyCertificateBase - TimeStamp time.Time `json:",omitempty"` - OwnerSignature []byte `json:",omitempty"` + TimeStamp time.Time `json:",omitempty"` + OwnerSignature []byte `json:",omitempty"` + OwnerPubKeyHash []byte `json:",omitempty"` // SHA256 of owner's public key } type PolicyCertificateRevocation struct { PolicyCertificateRevocationFields - IssuerSignature []byte `json:",omitempty"` - SPCRTs []SignedPolicyCertificateRevocationTimestamp `json:",omitempty"` + IssuerSignature []byte `json:",omitempty"` + IssuerPubKeyHash []byte `json:",omitempty"` + SPCRTs []SignedPolicyCertificateRevocationTimestamp `json:",omitempty"` } func NewPolicyCertificateFields( @@ -79,6 +83,7 @@ func NewPolicyCertificateFields( timeStamp time.Time, policyAttributes []PolicyAttributes, ownerSignature []byte, + ownerPubKeyHash []byte, ) *PolicyCertificateFields { return &PolicyCertificateFields{ PolicyCertificateBase: PolicyCertificateBase{ @@ -98,6 +103,7 @@ func NewPolicyCertificateFields( TimeStamp: timeStamp, PolicyAttributes: policyAttributes, OwnerSignature: ownerSignature, + OwnerPubKeyHash: ownerPubKeyHash, } } @@ -110,6 +116,7 @@ func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { c.SignatureAlgorithm == x.SignatureAlgorithm && c.TimeStamp.Equal(x.TimeStamp) && bytes.Equal(c.OwnerSignature, x.OwnerSignature) && + bytes.Equal(c.OwnerPubKeyHash, x.OwnerPubKeyHash) && equalSlices(c.PolicyAttributes, x.PolicyAttributes) } @@ -127,7 +134,9 @@ func NewPolicyCertificate( timeStamp time.Time, policyAttributes []PolicyAttributes, ownerSignature []byte, + ownerPubKeyHash []byte, issuerSignature []byte, + issuerPubKeyHash []byte, SPTs []SignedPolicyCertificateTimestamp, ) *PolicyCertificate { @@ -146,15 +155,18 @@ func NewPolicyCertificate( timeStamp, policyAttributes, ownerSignature, + ownerPubKeyHash, ), - IssuerSignature: issuerSignature, - SPTs: SPTs, + IssuerSignature: issuerSignature, + IssuerPubKeyHash: issuerPubKeyHash, + SPTs: SPTs, } } func (c PolicyCertificate) Equal(x PolicyCertificate) bool { return c.PolicyCertificateFields.Equal(x.PolicyCertificateFields) && bytes.Equal(c.IssuerSignature, x.IssuerSignature) && + bytes.Equal(c.IssuerPubKeyHash, x.IssuerPubKeyHash) && equalSlices(c.SPTs, x.SPTs) } @@ -171,6 +183,7 @@ func NewPolicyCertificateRevocationFields( serialNumber int, timeStamp time.Time, ownerSignature []byte, + ownerPubKeyHash []byte, ) *PolicyCertificateRevocationFields { return &PolicyCertificateRevocationFields{ PolicyCertificateBase: PolicyCertificateBase{ @@ -181,15 +194,17 @@ func NewPolicyCertificateRevocationFields( RawSubject: subject, RawSerialNumber: serialNumber, }, - TimeStamp: timeStamp, - OwnerSignature: ownerSignature, + TimeStamp: timeStamp, + OwnerSignature: ownerSignature, + OwnerPubKeyHash: ownerPubKeyHash, } } func (c PolicyCertificateRevocationFields) Equal(x PolicyCertificateRevocationFields) bool { return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && c.TimeStamp == x.TimeStamp && - bytes.Equal(c.OwnerSignature, x.OwnerSignature) + bytes.Equal(c.OwnerSignature, x.OwnerSignature) && + bytes.Equal(c.OwnerPubKeyHash, x.OwnerPubKeyHash) } func NewPolicyCertificateRevocation( @@ -199,7 +214,9 @@ func NewPolicyCertificateRevocation( serialNumber int, timeStamp time.Time, ownerSignature []byte, + ownerPubKeyHash []byte, issuerSignature []byte, + issuerPubKeyHash []byte, serverTimestamps []SignedPolicyCertificateRevocationTimestamp, ) *PolicyCertificateRevocation { return &PolicyCertificateRevocation{ @@ -210,15 +227,18 @@ func NewPolicyCertificateRevocation( serialNumber, timeStamp, ownerSignature, + ownerPubKeyHash, ), - IssuerSignature: issuerSignature, - SPCRTs: serverTimestamps, + IssuerSignature: issuerSignature, + IssuerPubKeyHash: issuerPubKeyHash, + SPCRTs: serverTimestamps, } } func (r PolicyCertificateRevocation) Equal(x PolicyCertificateRevocation) bool { return r.PolicyCertificateRevocationFields.Equal(x.PolicyCertificateRevocationFields) && bytes.Equal(r.IssuerSignature, x.IssuerSignature) && + bytes.Equal(r.IssuerPubKeyHash, x.IssuerPubKeyHash) && equalSlices(r.SPCRTs, x.SPCRTs) } diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index 753f2d28..78d97c63 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -5,6 +5,12 @@ type MarshallableDocument interface { Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). } +type MarshallableDocumentBase struct { + RawJSON []byte `json:"-"` // omit from JSON (un)marshaling +} + +func (o MarshallableDocumentBase) Raw() []byte { return o.RawJSON } + // PolicyPart is an interface that is implemented by all objects that are part of the set // of "policy objects". A policy object is that one that represents functionality of policies // for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. @@ -14,13 +20,11 @@ type PolicyPart interface { // PolicyPartBase is the common type to all policy documents. type PolicyPartBase struct { - RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + MarshallableDocumentBase Version int `json:",omitempty"` Issuer string `json:",omitempty"` } -func (o PolicyPartBase) Raw() []byte { return o.RawJSON } - func (o PolicyPartBase) Equal(x PolicyPartBase) bool { // Ignore the RawJSON component, use just the regular fields. return o.Version == x.Version && diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 37462ef1..fe5e1b0c 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -27,6 +27,7 @@ func NewPolicyCertificateSigningRequest( timeStamp time.Time, policyAttributes []PolicyAttributes, ownerSignature []byte, + ownerPubKeyHash []byte, ) *PolicyCertificateSigningRequest { return &PolicyCertificateSigningRequest{ @@ -44,6 +45,7 @@ func NewPolicyCertificateSigningRequest( timeStamp, policyAttributes, ownerSignature, + ownerPubKeyHash, ), } } diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 02d32d46..258d1831 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -28,22 +28,21 @@ func NewDomainOwner() *DomainOwner { // GeneratePolCertSignRequest: Generate a Root Certificate Signing Request for one domain // subject is the name of the domain: eg. fpki.com -func (do *DomainOwner) GeneratePolCertSignRequest(domainName string, version int) (*common.PolicyCertificateSigningRequest, error) { - // generate a fresh RSA key pair; new RSA key for every RCSR, thus every RPC +func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, version int) (*common.PolicyCertificateSigningRequest, error) { + // Generate a fresh RSA key pair; new RSA key for every RCSR, thus every RPC newPrivKeyPair, err := do.generateRSAPrivKeyPair() if err != nil { return nil, fmt.Errorf("GeneratePolCertSignRequest | generateRSAPrivKey | %w", err) } - // marshall public key into bytes - pubKeyBytes, err := util.RSAPublicToPEM(&newPrivKeyPair.PublicKey) + pubKeyBytes, err := util.RSAPublicToDERBytes(&newPrivKeyPair.PublicKey) if err != nil { return nil, fmt.Errorf("GeneratePolCertSignRequest | RsaPublicKeyToPemBytes | %w", err) } req := common.NewPolicyCertificateSigningRequest( version, - "", // issuer + issuer, // issuer domainName, 0, // serial number time.Now(), @@ -55,6 +54,7 @@ func (do *DomainOwner) GeneratePolCertSignRequest(domainName string, version int time.Now(), // timestamp nil, // policy attributes nil, // owner signature + nil, // owner pub key hash ) // if domain owner still have the private key of the previous RPC -> can avoid cool-off period @@ -65,12 +65,7 @@ func (do *DomainOwner) GeneratePolCertSignRequest(domainName string, version int } } - // generate signature for RCSR, using the new pub key - err = crypto.SignAsOwner(newPrivKeyPair, req) - if err != nil { - return nil, fmt.Errorf("GeneratePolCertSignRequest | RCSRCreateSignature | %w", err) - } - + // Store the new keys for this domain as the latest owner keys. do.privKeyByDomainName[domainName] = newPrivKeyPair return req, nil @@ -99,6 +94,7 @@ func (do *DomainOwner) RandomPolicyCertificate(domainName string, policy common. time.Now(), // timestamp []common.PolicyAttributes{policy}, // policy attributes nil, // owner's signature + nil, // owner pub key hash ) err := crypto.SignAsOwner(rpcKeyPair, polCertSignReq) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 01c18030..5b6a3595 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -7,6 +7,7 @@ import ( "github.com/google/trillian" "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" @@ -14,6 +15,21 @@ import ( "github.com/stretchr/testify/require" ) +func TestVerifySPT(t *testing.T) { + ownwerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/clientkey.pem") + require.NoError(t, err, "load RSA key error") + issuerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + require.NoError(t, err, "load RSA key error") + + req := random.RandomPolCertSignRequest(t) + err = crypto.SignAsOwner(ownwerPriv, req) + require.NoError(t, err) + + cert, err := crypto.SignRequestAsIssuer(req, issuerPriv) + require.NoError(t, err) + _ = cert +} + func TestVerifyInclusionByHash(t *testing.T) { // Because we are using "random" bytes deterministically here, set a fixed seed. rand.Seed(1) @@ -28,7 +44,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "3mI5Az/2fISqNSrfUQuWZAkvFuP2ozS2ad4+hnZ1Eh4="), + RootHash: tests.MustDecodeBase64(t, "VZfa96+e9du6zpvFD/ZlMFMiTqfruk71mqzcg+NG350="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, @@ -76,47 +92,3 @@ func TestConsistencyBetweenSTH(t *testing.T) { _, err := logverifier.VerifyRoot(sth, newSTH, consistencyProof) require.NoError(t, err, "Verify Root Error") } - -func TestCheckRPC(t *testing.T) { - // Because we are using "random" bytes deterministically here, set a fixed seed. - rand.Seed(1) - - // Mock a STH with the right root hash. - sth := &types.LogRootV1{ - TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "sVt7R5j3fpNSgUfYMH6r9cfWx9N3Nq9UXaLEpa6/KBQ="), - TimestampNanos: 1661986742112252000, - Revision: 0, - Metadata: []byte{}, - } - serializedSTH, err := common.ToJSON(sth) - require.NoError(t, err) - - // Mock a PoI. - poi := []*trillian.Proof{ - { - LeafIndex: 1, - Hashes: [][]byte{random.RandomBytesForTest(t, 32)}, - }, - } - serializedPoI, err := common.ToJSON(poi) - require.NoError(t, err) - - // Mock a RPC. - rpc := random.RandomPolicyCertificate(t) - rpc.SPTs = []common.SignedPolicyCertificateTimestamp{ - *common.NewSignedPolicyCertificateTimestamp( - "", 0, "", nil, - 0, - util.TimeFromSecs(99), - serializedSTH, - serializedPoI, - 0, nil, - ), - } - - // Check VerifyRPC. - logverifier := NewLogVerifier(nil) - err = logverifier.VerifyRPC(rpc) - require.NoError(t, err) -} diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 49ac40ef..1623fe2d 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -6,8 +6,6 @@ import ( "github.com/google/trillian" "github.com/google/trillian/types" - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/util" "github.com/transparency-dev/merkle" logProof "github.com/transparency-dev/merkle/proof" "github.com/transparency-dev/merkle/rfc6962" @@ -112,56 +110,3 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") } - -func (v *LogVerifier) VerifyRPC(rpc *common.PolicyCertificate) error { - // Get the hash of the RPC without SPTs: - SPTs := rpc.SPTs - rpc.SPTs = []common.SignedPolicyCertificateTimestamp{} - serializedStruct, err := common.ToJSON(rpc) - if err != nil { - return fmt.Errorf("VerifyRPC | ToJSON | %w", err) - } - bytesHash := v.HashLeaf([]byte(serializedStruct)) - // Restore the SPTs to the RPC: - rpc.SPTs = SPTs - - if err := v.verifySPTs(rpc.SPTs, bytesHash); err != nil { - return fmt.Errorf("VerifyRPC | %w", err) - } - return nil -} - -func (v *LogVerifier) verifySPTs(SPTs []common.SignedPolicyCertificateTimestamp, dataHash []byte) error { - for _, p := range SPTs { - // Load the STH from JSON. - sthRaw, err := common.FromJSON(p.STH) - if err != nil { - return fmt.Errorf("verifySPTs | FromJSON(STH) | %w", err) - } - // Into its right type. - sth, err := util.ToType[*types.LogRootV1](sthRaw) - if err != nil { - return fmt.Errorf("verifySPTs | ToType | %w", err) - } - - // Load the PoI from JSON. - poiRaw, err := common.FromJSON(p.PoI) - if err != nil { - return fmt.Errorf("verifySPTs | FromJSON(PoI) | %w", err) - } - // Into its right type. - poi, err := util.ToTypedSlice[*trillian.Proof](poiRaw) - if err != nil { - return fmt.Errorf("verifySPTs | ToTypedSlice | %w", err) - } - - if err != nil { - return fmt.Errorf("verifySPTs | JsonBytesToPoI | %w", err) - } - - if err = v.VerifyInclusionByHash(sth, dataHash, poi); err != nil { - return fmt.Errorf("verifySPTs | VerifyInclusionByHash | %w", err) - } - } - return nil -} diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 5b13ed71..2648768a 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -345,7 +345,9 @@ func GetPCAndRPCs( time.Now(), // timestamp nil, // policy attributes nil, // owner signature + nil, // owner pub key hash nil, // issuer signature + nil, // issuer pub key hash nil, // server timestamps )) } diff --git a/pkg/pca/config.go b/pkg/pca/config.go index fcf6408e..e28f52b3 100644 --- a/pkg/pca/config.go +++ b/pkg/pca/config.go @@ -8,10 +8,12 @@ import ( // PCAConfig: configuration of the pca type PCAConfig struct { - CAName string `json:",omitempty"` + CAName string `json:",omitempty"` + CTLogServers []CTLogServerEntryConfig `json:",omitempty"` + KeyPath string `json:",omitempty"` + RootPolicyCertPath string `json:",omitempty"` - // path to store the pca's key - KeyPath string `json:",omitempty"` + // deleteme remove all this below // PCA's output path; sends RPC PolicyLogExgPath string `json:",omitempty"` @@ -19,9 +21,16 @@ type PCAConfig struct { OutputPath string `json:",omitempty"` } +type CTLogServerEntryConfig struct { + Name string `json:",omitempty"` + URL string `json:",omitempty"` + PublicKeyDER []byte `json:",omitempty"` // DER-encoded SubjectPublicKeyInfo object. + // See ctx509.MarshalPKIXPublicKey +} + // SaveConfigToFile: save PCA config to file func SaveConfigToFile(config *PCAConfig, configPath string) error { - bytes, err := json.Marshal(config) + bytes, err := json.MarshalIndent(config, "", " ") if err != nil { return fmt.Errorf("SaveConfigToFile | Marshal | %w", err) } diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index b9f998eb..85ddadf9 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -1,20 +1,16 @@ package pca import ( + "bytes" "crypto/rsa" "fmt" - "log" - "os" + "time" - "github.com/google/trillian" - "github.com/google/trillian/types" "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/logverifier" + "github.com/netsec-ethz/fpki/pkg/common/crypto" "github.com/netsec-ethz/fpki/pkg/util" ) -// CRITICAL: The funcs are not thread-safe for now. DO NOT use them for multi-thread program. - // TODO(yongzhe): // How to handle Cool-off period? // SuspiciousSPTs @@ -22,28 +18,28 @@ import ( // If domain owner loses the RPC, PCA can return the missing RPC) // More complete logic -// PCA: Structure which represent one PCA +// PCA represents a policy certificate authority. type PCA struct { - caName string - - // pca's signing rsa key pair; used to sign rcsr -> rpc - rsaKeyPair *rsa.PrivateKey - - // store valid Pol Cert (with server timestamps) in memory; Later replaced by data base - validPolCertsPerDomain map[string]*common.PolicyCertificate - - // Pol Cert without timestamps; pre-certificate - prePolCertsPerDomain map[string]*common.PolicyCertificate - - policyLogExgPath string - - outputPath string - - // verifier to verify the STH and PoI - logVerifier *logverifier.LogVerifier + CAName string + RsaKeyPair *rsa.PrivateKey // PCA's signing key pair + RootPolicyCert *common.PolicyCertificate // The PCA's policy certificate + CtLogServers map[[32]byte]*CTLogServerEntryConfig // CT log servers + LogServerRequester LogServerRequester // not set + DB map[[32]byte]*common.PolicyCertificate // per hash of public key + SerialNumber int // unique serial number per pol cert +} - // serial number for the RPC; unique for every RPC - serialNumber int +// LogServerRequester is implemented by objects that can talk to CT log servers. +// TODO(juanga) implement a real one, not only a mock for the tests. +type LogServerRequester interface { + ObtainSptFromLogServer( + URL string, + pc *common.PolicyCertificate, + ) (*common.SignedPolicyCertificateTimestamp, error) + SendPolicyCertificateToLogServer( + URL string, + pc *common.PolicyCertificate, + ) error } // NewPCA: Return a new instance of PCa @@ -54,144 +50,231 @@ func NewPCA(configPath string) (*PCA, error) { if err != nil { return nil, fmt.Errorf("NewPCA | ReadConfigFromFile | %w", err) } - // load rsa key pair + + // Load rsa key pair keyPair, err := util.RSAKeyFromPEMFile(config.KeyPath) if err != nil { return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) } + + // Load Root Policy Certificate. + a, err := common.FromJSONFile(config.RootPolicyCertPath) + if err != nil { + return nil, err + } + rpc, err := util.ToType[*common.PolicyCertificate](a) + if err != nil { + return nil, err + } + // Check the private key and RPC match. + derBytes, err := util.RSAPublicToDERBytes(&keyPair.PublicKey) + if err != nil { + return nil, err + } + + if !bytes.Equal(rpc.PublicKey, derBytes) { + return nil, fmt.Errorf("RPC and key do not match") + } + + // Load the CT log server entries. + logServers := make(map[[32]byte]*CTLogServerEntryConfig) + for _, s := range config.CTLogServers { + // Compute the hash of the public key. + h := common.SHA256Hash32Bytes(s.PublicKeyDER) + logServers[h] = &s + } + return &PCA{ - validPolCertsPerDomain: make(map[string]*common.PolicyCertificate), - prePolCertsPerDomain: make(map[string]*common.PolicyCertificate), - logVerifier: logverifier.NewLogVerifier(nil), - caName: config.CAName, - outputPath: config.OutputPath, - policyLogExgPath: config.PolicyLogExgPath, - rsaKeyPair: keyPair, + CAName: config.CAName, + RsaKeyPair: keyPair, + RootPolicyCert: rpc, + CtLogServers: logServers, + DB: make(map[[32]byte]*common.PolicyCertificate), + SerialNumber: 0, }, nil } -// ReceiveSPTFromPolicyLog: When policy log returns SPT, this func will be called -// this func will read the SPTs from the file, and process them -func (pca *PCA) ReceiveSPTFromPolicyLog() error { - for domainName, v := range pca.prePolCertsPerDomain { - // read the corresponding spt - spt, err := common.JsonFileToSPT(pca.policyLogExgPath + "/spt/" + domainName) - if err != nil { - return fmt.Errorf("ReceiveSPTFromPolicyLog | JsonFileToSPT | %w", err) - } +func (pca *PCA) NewPolicyCertificateSigningRequest( + version int, + subject string, + serialNumber int, + notBefore time.Time, + notAfter time.Time, + isIssuer bool, + publicKey []byte, + publicKeyAlgorithm common.PublicKeyAlgorithm, + signatureAlgorithm common.SignatureAlgorithm, + policyAttributes []common.PolicyAttributes, + ownerSigningFunction func(serialized []byte) []byte, + ownerPubKeyHash []byte, +) (*common.PolicyCertificateSigningRequest, error) { - // verify the PoI, STH - err = pca.verifySPTWithRPC(spt, v) - if err == nil { - log.Printf("Get a new SPT for domain RPC: %s\n", domainName) - v.SPTs = []common.SignedPolicyCertificateTimestamp{*spt} - - // move the rpc from pre-rpc to valid-rpc - delete(pca.prePolCertsPerDomain, domainName) - pca.validPolCertsPerDomain[v.RawSubject] = v - } else { - return fmt.Errorf("Fail to verify one SPT RPC") - } - os.Remove(pca.policyLogExgPath + "/spt/" + domainName) + // Check validity range falls inside PCAs. + if notBefore.Before(pca.RootPolicyCert.NotBefore) { + return nil, fmt.Errorf("invalid validity range: %s before PCAs %s", + notBefore, pca.RootPolicyCert.NotBefore) + } + if notAfter.After(pca.RootPolicyCert.NotAfter) { + return nil, fmt.Errorf("invalid validity range: %s after PCAs %s", + notAfter, pca.RootPolicyCert.NotAfter) } - return nil -} - -func (pca *PCA) OutputPolicyCertificate() error { - for domain, rpc := range pca.validPolCertsPerDomain { - err := common.ToJSONFile(rpc, pca.outputPath+"/"+domain+"_"+rpc.Issuer+"_"+"rpc") - if err != nil { - return fmt.Errorf("OutputPolicyCertificate | JsonStructToFile | %w", err) - } + // Create request with appropriate values. + req := common.NewPolicyCertificateSigningRequest( + version, + pca.CAName, + subject, + serialNumber, + notBefore, + notAfter, + isIssuer, + publicKey, + publicKeyAlgorithm, + signatureAlgorithm, + time.Now(), + policyAttributes, + nil, + ownerPubKeyHash, + ) + // Serialize it. + serializedReq, err := common.ToJSON(req) + if err != nil { + return nil, err } - return nil + // Obtain signature. + req.OwnerSignature = ownerSigningFunction(serializedReq) + + return req, nil } -// verify the SPT of the RPC. -func (pca *PCA) verifySPTWithRPC(spt *common.SignedPolicyCertificateTimestamp, rpc *common.PolicyCertificate) error { - proofs, logRoot, err := getProofsAndLogRoot(spt) +// SignAndLogRequest signs the policy certificate request and generates a policy certificate. +func (pca *PCA) SignAndLogRequest( + req *common.PolicyCertificateSigningRequest, +) (*common.PolicyCertificate, error) { + + // verify the signature in the rcsr; check if the domain's pub key is correct + skip, err := pca.canSkipCoolOffPeriod(req) if err != nil { - return fmt.Errorf("verifySPTWithRPC | parsePoIAndSTH | %w", err) + return nil, err + } + if !skip { + return nil, fmt.Errorf("for now we don't support cool off periods; all requests must " + + "be signed by the owner") } - // get leaf hash - rpcBytes, err := common.ToJSON(rpc) + pc, err := pca.signRequest(req) if err != nil { - return fmt.Errorf("verifySPT | Json_StructToBytes | %w", err) + return nil, err } - leafHash := pca.logVerifier.HashLeaf(rpcBytes) - // verify the PoI - err = pca.logVerifier.VerifyInclusionByHash(logRoot, leafHash, proofs) - if err != nil { - return fmt.Errorf("verifySPT | VerifyInclusionByHash | %w", err) + if err := pca.sendRequestToAllLogServers(pc); err != nil { + return nil, err } - return nil -} + if err := pca.signFinalPolicyCertificate(pc); err != nil { + return nil, err + } -// TODO(yongzhe): modify this to make sure unique SN -func (pca *PCA) increaseSerialNumber() { - pca.serialNumber = pca.serialNumber + 1 -} + if err := pca.sendFinalPolCertToAllLogServers(pc); err != nil { + return nil, err + } + + pca.storeInDb(pc) -func (pca *PCA) ReturnValidRPC() map[string]*common.PolicyCertificate { - return pca.validPolCertsPerDomain + return pc, nil } -// getProofsAndLogRoot return the proofs and root parsed from the PoI and STH in JSON. -func getProofsAndLogRoot(spt *common.SignedPolicyCertificateTimestamp) ([]*trillian.Proof, *types.LogRootV1, error) { - // Parse the PoI into []*trillian.Proof. - serializedProofs, err := common.FromJSON(spt.PoI) - if err != nil { - return nil, nil, err +// canSkipCoolOffPeriod verifies that the owner's signature is correct, if there is an owner's +// signature in the request, and this PCA has the policy certificate used to signed said request. +// It returns true if this PCA can skip the cool off period, false otherwise. +func (pca *PCA) canSkipCoolOffPeriod(req *common.PolicyCertificateSigningRequest) (bool, error) { + // Owner's signature? + if len(req.OwnerSignature) == 0 { + // No signature, cannot skip cool off period. + return false, nil } - proofs, err := util.ToTypedSlice[*trillian.Proof](serializedProofs) - if err != nil { - return nil, nil, err + // If there is a owner's signature, the id of the key used must be 32 bytes. + if len(req.OwnerPubKeyHash) != 32 { + return false, fmt.Errorf("field OwnerPubKeyHash should be 32 bytes long but is %d", + len(req.OwnerPubKeyHash)) } - - // Parse the STH into a *types.LogRootV1. - serializedRoot, err := common.FromJSON(spt.STH) - if err != nil { - return nil, nil, err + // Cast it to array and check the DB. + key := (*[32]byte)(req.OwnerPubKeyHash) + stored, ok := pca.DB[*key] + if !ok { + // No such certificate, cannot skip cool off period. + return false, nil } - root, err := util.ToType[*types.LogRootV1](serializedRoot) + + // We found the policy certificate used to sign this request. Get the public key. + pubKey, err := util.DERBytesToRSAPublic(stored.PublicKey) if err != nil { - return nil, nil, err + return false, err } - return proofs, root, nil + // Verify the signature matches. + err = crypto.VerifyOwnerSignature(req, pubKey) + + // Return true if no error, or false and the error. + return err == nil, err +} + +func (pca *PCA) signRequest( + req *common.PolicyCertificateSigningRequest, +) (*common.PolicyCertificate, error) { + + // Set the issuer values from this CA. + pca.increaseSerialNumber() + req.Issuer = pca.RootPolicyCert.Subject() + req.RawSerialNumber = pca.SerialNumber + return crypto.SignRequestAsIssuer(req, pca.RsaKeyPair) } -/* -// check whether the RPC signature is correct -func (pca *PCA) checkRPCSignature(rcsr *common.RCSR) bool { - // if no rpc signature - if len(rcsr.PRCSignature) == 0 { - return false - } - - // check if there is any valid rpc - if rpc, found := pca.validRPCsByDomains[rcsr.Subject]; found { - err := common.RCSRVerifyRPCSignature(rcsr, rpc) - if err == nil { - return true - } else { - return false +func (pca *PCA) sendRequestToAllLogServers(pc *common.PolicyCertificate) error { + // TODO(juagargi) do this concurrently + SPTs := make([]common.SignedPolicyCertificateTimestamp, 0, len(pca.CtLogServers)) + for _, logServer := range pca.CtLogServers { + spt, err := pca.sendRequestToLogServer(pc, logServer) + if err != nil { + return err } - } else { - return false + SPTs = append(SPTs, *spt) } + pc.SPTs = SPTs + return nil } -// GetValidRPCByDomain: return the new RPC with SPT -func (pca *PCA) GetValidRPCByDomain(domainName string) (*common.RPC, error) { - if rpc, found := pca.validRPCsByDomains[domainName]; found { - return rpc, nil - } else { - return nil, errors.New("no valid RPC") +func (pca *PCA) sendRequestToLogServer( + pc *common.PolicyCertificate, + logServer *CTLogServerEntryConfig, +) (*common.SignedPolicyCertificateTimestamp, error) { + + return pca.LogServerRequester.ObtainSptFromLogServer(logServer.URL, pc) +} + +func (pca *PCA) signFinalPolicyCertificate(pc *common.PolicyCertificate) error { + _, err := crypto.SignPolicyCertificateAsIssuer(pc, pca.RsaKeyPair) + return err +} + +// sendFinalPolCertToAllLogServers sends the final policy certificate with all the SPTs included, +// to all the configured CT log servers for final registration. +func (pca *PCA) sendFinalPolCertToAllLogServers(pc *common.PolicyCertificate) error { + for _, logServer := range pca.CtLogServers { + err := pca.LogServerRequester.SendPolicyCertificateToLogServer(logServer.URL, pc) + if err != nil { + return err + } } + return nil +} + +func (pca *PCA) storeInDb(pc *common.PolicyCertificate) { + key := (*[32]byte)(common.SHA256Hash(pc.PublicKey)) + pca.DB[*key] = pc +} + +// TODO(yongzhe): modify this to make sure unique SN +func (pca *PCA) increaseSerialNumber() { + pca.SerialNumber = pca.SerialNumber + 1 } -*/ diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index 6f551518..e92afa90 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -4,13 +4,252 @@ package pca // Testing of PCA is in the integration test, because it also need the help of domain owner. // This file will be used for future logics. import ( + "crypto/rsa" + "fmt" "testing" + "time" + ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/common" + "github.com/netsec-ethz/fpki/pkg/common/crypto" + "github.com/netsec-ethz/fpki/pkg/tests" + "github.com/netsec-ethz/fpki/pkg/util" ) // Test_Config: do nothing -func Test_Config(t *testing.T) { +func TestNewPCA(t *testing.T) { _, err := NewPCA("testdata/pca_config.json") require.NoError(t, err, "New PCA error") } + +func TestCreateConfig(t *testing.T) { + t.Skip("Not creating config") + issuerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + require.NoError(t, err) + derKey, err := util.RSAPublicToDERBytes(&issuerKey.PublicKey) + require.NoError(t, err) + + req := common.NewPolicyCertificateSigningRequest( + 0, + "pca root policy certificate", + "pca root policy certificate", + 13, + util.TimeFromSecs(10), + util.TimeFromSecs(10000), + true, + derKey, + common.RSA, + common.SHA256, + util.TimeFromSecs(1), + []common.PolicyAttributes{ + { + TrustedCA: []string{"pca"}, + AllowedSubdomains: []string{""}, + }, + }, + nil, // no owner signature + nil, // hash of owner's public key + ) + // Self sign this pol cert. + rootPolCert, err := crypto.SignRequestAsIssuer(req, issuerKey) + require.NoError(t, err) + // And serialize it to file to include it in the configuration of the PCA. + err = common.ToJSONFile(rootPolCert, "testdata/rpc.json") + require.NoError(t, err) + + c := &PCAConfig{ + CAName: "pca", + KeyPath: "../../tests/testdata/serverkey.pem", + RootPolicyCertPath: "testdata/rpc.json", + CTLogServers: []CTLogServerEntryConfig{ + { + Name: "CT log server 1", + URL: "URL1.com/foo/bar1", + PublicKeyDER: derKey, + }, + { + Name: "CT log server 2", + URL: "URL2.com/foo/bar2", + PublicKeyDER: derKey, + }, + }, + } + err = SaveConfigToFile(c, "testdata/pca_config.json") + require.NoError(t, err) +} +func TestPCAWorkflow(t *testing.T) { + pca, err := NewPCA("testdata/pca_config.json") + require.NoError(t, err, "New PCA error") + // pca is configured using pca_config.json, which itself specifies the PCA to use serverkey.pem + // as the key to use to issue policy certificates. + notBefore := util.TimeFromSecs(10 + 1) + notAfter := util.TimeFromSecs(10000 - 10) + // The requester needs a key (which will be identified in the request itself). + ownerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/clientkey.pem") + require.NoError(t, err) + ownerDerKey, err := util.RSAPublicToDERBytes(&ownerKey.PublicKey) + require.NoError(t, err) + // The workflow from the PCA's perspective is as follows: + // 1. Create request + req, err := pca.NewPolicyCertificateSigningRequest( + 1, + "fpki.com", + 1, + notBefore, + notAfter, + true, + ownerDerKey, // public key + common.RSA, + common.SHA256, + nil, // policy attributes + func(serialized []byte) []byte { + return nil + }, + common.SHA256Hash(ownerDerKey), // owner pub key hash + ) + require.NoError(t, err) + + // 2. Owner signs request + pca.increaseSerialNumber() + err = crypto.SignAsOwner(ownerKey, req) + require.NoError(t, err) + + // 3. PCA verifies owner's signature + skip, err := pca.canSkipCoolOffPeriod(req) + require.NoError(t, err) + require.False(t, skip) // because the PCA doesn't contain the pol cert used to sign it. + + // 4. PCA signs as issuer + pc, err := pca.signRequest(req) + require.NoError(t, err) + + // 5. PCA sends to log servers. Per log server: + mockRequester := newmockLogServerRequester(t, pca.CtLogServers) + pca.LogServerRequester = mockRequester + // 6. Log server verifies CA signature. + // 7. Creates and returns its SPT + // 8. PCA adds SPT to list in policy certificate + err = pca.sendRequestToAllLogServers(pc) + require.NoError(t, err) + require.Len(t, pc.SPTs, len(pca.CtLogServers)) // as many SPTs as CT log servers + checkSPTs(t, pca, pc) + + // 9. PCA signs again the policy certificate + err = pca.signFinalPolicyCertificate(pc) + require.NoError(t, err) + + // 10. PCA sends the policy certificate to all log servers. + err = pca.sendFinalPolCertToAllLogServers(pc) + require.NoError(t, err) + expectedURLs := make([]string, 0) + for _, e := range pca.CtLogServers { + expectedURLs = append(expectedURLs, e.URL) + } + require.ElementsMatch(t, expectedURLs, mockRequester.finalPolCertSentTo) + + // 11. PCA stores the final policy certificate in its DB. + pca.storeInDb(pc) + require.Len(t, pca.DB, 1) + for certID, cert := range pca.DB { + // The ID is correct: + require.Equal(t, certID, [32]byte(common.SHA256Hash32Bytes(pc.PublicKey))) + // And the DB contains the correct pol cert. + require.Equal(t, pc, cert) + break + } +} + +// mockLogServerRequester mocks a CT log server requester. +type mockLogServerRequester struct { + servers map[string]*CTLogServerEntryConfig + pcaCert *ctx509.Certificate + keys map[string]*rsa.PrivateKey + + // URLs of the called CT log servers when sending the final policy cert. + finalPolCertSentTo []string +} + +func newmockLogServerRequester(t tests.T, servers map[[32]byte]*CTLogServerEntryConfig) *mockLogServerRequester { + // Load the certificate of the PCA. + pcaCert, err := util.CertificateFromPEMFile("../../tests/testdata/servercert.pem") + require.NoError(t, err) + + // Load the keys of the CT log servers. This mock requester uses one for all of them. + ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + require.NoError(t, err) + + m := make(map[string]*CTLogServerEntryConfig) + keys := make(map[string]*rsa.PrivateKey) + for _, s := range servers { + s := s + m[s.URL] = s + keys[s.URL] = ctKey + } + + return &mockLogServerRequester{ + servers: m, + pcaCert: pcaCert, + keys: keys, + finalPolCertSentTo: make([]string, 0), + } +} + +func (m *mockLogServerRequester) ObtainSptFromLogServer( + url string, + pc *common.PolicyCertificate, +) (*common.SignedPolicyCertificateTimestamp, error) { + + // Step 6 verify PCA signature. + if err := crypto.VerifyIssuerSignature(m.pcaCert, pc); err != nil { + return nil, err + } + serializedPc, err := common.ToJSON(pc) + if err != nil { + return nil, err + } + + // Step 7 create and add SPT. + signature, err := crypto.SignBytes(serializedPc, m.keys[url]) + if err != nil { + return nil, fmt.Errorf("error signing: %w", err) + } + logID := common.SHA256Hash(m.servers[url].PublicKeyDER) + spt := common.NewSignedPolicyCertificateTimestamp( + 0, + pc.Issuer, + logID, + time.Now(), + signature, + ) + return spt, nil +} + +func (m *mockLogServerRequester) SendPolicyCertificateToLogServer( + url string, + pc *common.PolicyCertificate, +) error { + + // We only annotate to whom the PCA called. + m.finalPolCertSentTo = append(m.finalPolCertSentTo, url) + return nil +} + +// checkSPTs checks that the SPTs inside the policy certificate are the expected ones. +func checkSPTs(t tests.T, pca *PCA, pc *common.PolicyCertificate) { + t.Helper() + + ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + require.NoError(t, err) + derKey, err := util.RSAPublicToDERBytes(&ctKey.PublicKey) + require.NoError(t, err) + hashedDerKey := common.SHA256Hash(derKey) + + for _, spt := range pc.SPTs { + require.Equal(t, hashedDerKey, spt.LogID) + require.Equal(t, pca.RootPolicyCert.Subject(), spt.Issuer) + require.Less(t, time.Since(spt.AddedTS), time.Minute) + require.Greater(t, time.Since(spt.AddedTS).Seconds(), 0.0) + } +} diff --git a/pkg/pca/sign_and_log.go b/pkg/pca/sign_and_log.go deleted file mode 100644 index c7dfb31e..00000000 --- a/pkg/pca/sign_and_log.go +++ /dev/null @@ -1,113 +0,0 @@ -package pca - -import ( - "encoding/base64" - "fmt" - "strconv" - - "github.com/netsec-ethz/fpki/pkg/common" - "github.com/netsec-ethz/fpki/pkg/common/crypto" -) - -// SignAndLogRCSR: sign the rcsr and generate a rpc -> store the rpc to the "fileExchange" folder; policy log will fetch rpc from the folder -func (pca *PCA) SignAndLogRCSR(req *common.PolicyCertificateSigningRequest) error { - // verify the signature in the rcsr; check if the domain's pub key is correct - err := crypto.VerifyOwnerSignature(req) - if err != nil { - return fmt.Errorf("SignAndLogRCSR | RCSRVerifySignature | %w", err) - } - - // Set the issuer values from this CA. - pca.increaseSerialNumber() - req.Issuer = pca.caName - req.RawSerialNumber = pca.serialNumber - - // generate pre-RPC (without SPT) - rpc, err := crypto.SignAsIssuer(req, pca.rsaKeyPair) - if err != nil { - return fmt.Errorf("SignAndLogRCSR | RCSRGenerateRPC | %w", err) - } - - rpcHash, err := pca.getHashName(rpc) - if err != nil { - return fmt.Errorf("SignAndLogRCSR | getHashName | %w", err) - } - - // add the rpc to preRPC(without SPT) - pca.prePolCertsPerDomain[rpcHash] = rpc - - // send RPC to policy log - err = pca.sendRPCToPolicyLog(rpc, strconv.Itoa(pca.serialNumber)) - if err != nil { - return fmt.Errorf("SignAndLogRCSR | sendRPCToPolicyLog | %w", err) - } - - return nil -} - -// SignAndLogPSR: sign and log policy signing request -func (pca *PCA) SignAndLogPolicyCertificate(req *common.PolicyCertificateSigningRequest) error { - err := pca.findRPCAndVerifyPSR(req) - if err != nil { - return fmt.Errorf("SignAndLogPSR | findRPCAndVerifyPSR | %w", err) - } - - pca.increaseSerialNumber() - - polCert, err := crypto.SignAsIssuer(req, pca.rsaKeyPair) - if err != nil { - return fmt.Errorf("SignAndLogPSR | CASignSP | %w", err) - } - - spHash, err := pca.getHashName(polCert) - if err != nil { - return fmt.Errorf("SignAndLogRCSR | getHashName | %w", err) - } - - pca.prePolCertsPerDomain[spHash] = polCert - - err = pca.sendSPToPolicyLog(polCert, strconv.Itoa(polCert.SerialNumber())) - if err != nil { - return fmt.Errorf("SignAndLogPSR | sendSPToPolicyLog | %w", err) - } - - return nil -} - -func (pca *PCA) findRPCAndVerifyPSR(req *common.PolicyCertificateSigningRequest) error { - rpc, ok := pca.validPolCertsPerDomain[req.Subject()] - if !ok { - return fmt.Errorf("findRPCAndVerifyPSR | validRPCsByDomains | no valid rpc at this moment") - } - - err := crypto.VerifyOwnerSignatureWithPolCert(req, rpc) - if err != nil { - return fmt.Errorf("findRPCAndVerifyPSR | VerifyPSRUsingRPC | %w", err) - } - - return nil -} - -// save file to output dir -func (pca *PCA) sendRPCToPolicyLog(rpc *common.PolicyCertificate, fileName string) error { - return common.ToJSONFile(rpc, pca.policyLogExgPath+"/rpc/"+fileName) -} - -// save file to output dir -func (pca *PCA) sendSPToPolicyLog(sp *common.PolicyCertificate, fileName string) error { - return common.ToJSONFile(sp, pca.policyLogExgPath+"/sp/"+fileName) -} - -func (pca *PCA) getHashName(s interface{}) (string, error) { - structBytes, err := common.ToJSON(s) - if err != nil { - return "", fmt.Errorf("getHashName | ToJSON | %w", err) - } - - bytesHash := pca.logVerifier.HashLeaf([]byte(structBytes)) - - // base64 url encode the hashed value, and this will be the file name of SPT - fileName := base64.URLEncoding.EncodeToString(bytesHash) - - return fileName, nil -} diff --git a/pkg/pca/testdata/pca_config.json b/pkg/pca/testdata/pca_config.json index aa2b4c36..775be53d 100644 --- a/pkg/pca/testdata/pca_config.json +++ b/pkg/pca/testdata/pca_config.json @@ -1,6 +1,17 @@ { - "CAName": "pca", - "KeyPath": "testdata/server_key.pem", - "OutputPath": "./file_exchange/pcaoutput", - "PolicyLogOutputPath": "./file_exchange/policylog" + "CAName": "pca", + "CTLogServers": [ + { + "Name": "CT log server 1", + "URL": "URL1.com/foo/bar1", + "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB" + }, + { + "Name": "CT log server 2", + "URL": "URL2.com/foo/bar2", + "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB" + } + ], + "KeyPath": "../../tests/testdata/serverkey.pem", + "RootPolicyCertPath": "testdata/rpc.json" } \ No newline at end of file diff --git a/pkg/pca/testdata/rpc.json b/pkg/pca/testdata/rpc.json new file mode 100644 index 00000000..ce491cfa --- /dev/null +++ b/pkg/pca/testdata/rpc.json @@ -0,0 +1 @@ +{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":[{"TrustedCA":["pca"],"AllowedSubdomains":[""]}],"IssuerSignature":"dnK6r24h3sBbulHLhhc2HgVmryQ8DJsEzu0g+yYQ62J+erMd7T5U+JFcutq/lsFTT9jWvMcKs9TeS5Z756Jh4hwyWvpjkH6C8XZRSlb/E9CpsuV2IiAT7Xx5IrqFhVW9QT7DmIgdV0yU3HehE4QCAb9dZ3pFybeRRngAcQHzGN92d3U+h6vQzML9nkSKGeP8l1Q1jwCcSd0fbkuwUBEDceyJxfGGQZkqBOXLgVefBKxcP57SlnwuoPhV/vNdIJuM/EJ0MncIuvN7eZvQWQspm/VQmrxMou3osm8hH5eTJhcqiBX0sz3nbjjvQNpHY/TkQ9Uj8T36+KmoMk1xWOX5JQ=="}} \ No newline at end of file diff --git a/pkg/policylog/client/logclient.go b/pkg/policylog/client/logclient.go index 2d67b260..94a6fe26 100644 --- a/pkg/policylog/client/logclient.go +++ b/pkg/policylog/client/logclient.go @@ -203,21 +203,6 @@ func (c *LogClient) QueueRPCs(ctx context.Context) (*QueueRPCResult, error) { fmt.Println("fetch proofs succeed!") fmt.Println(elapsed) - // queueRPCResult will always be returned, even if error occurs in the future - - // store proof to SPT file - err = c.storeProofMapToSPT(fetchInclusionResult.PoIs) - if err != nil { - return queueRPCResult, fmt.Errorf("QueueRPCs | storeProofMapToSPT: %w", err) - } - - /* - // store the STH as well; not necessary - err = common.JsonStructToFile(c.logRoot, c.config.OutPutPath+"/logRoot/logRoot") - if err != nil { - return queueRPCResult, fmt.Errorf("QueueRPCs | JsonStructToFile: %w", err) - }*/ - return queueRPCResult, nil } @@ -254,40 +239,6 @@ func (c *LogClient) readRPCFromFileToBytes() ([][]byte, error) { return data, nil } -// read elements in the proof map, and turn it into a SPT, then store them -func (c *LogClient) storeProofMapToSPT(proofMap map[string]*PoIAndSTH) error { - // for every proof in the map - for k, v := range proofMap { - // serialize the proof - proofBytes, err := common.ToJSON(v.PoIs) - if err != nil { - return fmt.Errorf("storeProofMapToSPT | PoIs ToJSON: %w", err) - } - - // serialize log root (signed tree head) to bytes - sth, err := common.ToJSON(&v.STH) - if err != nil { - return fmt.Errorf("storeProofMapToSPT | STH ToJSON: %w", err) - } - - // attach PoI and STH to SPT - // TODO(yongzhe): fill in the other fields - serverTimestamp := common.NewSignedPolicyCertificateTimestamp( - "", 0, "", nil, 0, time.Time{}, - sth, proofBytes, - 0, nil, - ) - - // store SPT to file - err = common.ToJSONFile(serverTimestamp, c.config.PolicyLogExchangePath+"/spt/"+k) - if err != nil { - return fmt.Errorf("storeProofMapToSPT | JsonStructToFile: %w", err) - } - } - - return nil -} - // BuildLeaf runs the leaf hasher over data and builds a leaf. func buildLeaf(data []byte) *trillian.LogLeaf { leafHash := rfc6962.DefaultHasher.HashLeaf(data) diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 111c7f86..ed45859f 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -101,16 +101,11 @@ func RandomTimeWithoutMonotonic() time.Time { func RandomSignedPolicyCertificateTimestamp(t tests.T) *common.SignedPolicyCertificateTimestamp { return common.NewSignedPolicyCertificateTimestamp( - "spt subject", - rand.Intn(10), // version - "Issuer", - RandomBytesForTest(t, 10), // log id - 0x21, - RandomTimeWithoutMonotonic(), - RandomBytesForTest(t, 32), - RandomBytesForTest(t, 32), - rand.Intn(1000), - RandomBytesForTest(t, 32), + rand.Intn(10), // version + "Issuer", // issuer + RandomBytesForTest(t, 10), // log id + RandomTimeWithoutMonotonic(), // timestamp + RandomBytesForTest(t, 32), // signature ) } @@ -127,8 +122,9 @@ func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest common.RSA, common.SHA256, RandomTimeWithoutMonotonic(), - nil, // policy attributes (empty for now) - RandomBytesForTest(t, 32), + nil, // policy attributes (empty for now) + RandomBytesForTest(t, 32), // ownwer signature + RandomBytesForTest(t, 32), // ownwer pub key hash ) } @@ -145,9 +141,11 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { common.RSA, common.SHA256, RandomTimeWithoutMonotonic(), - nil, // policy attributes (empty for now) - RandomBytesForTest(t, 32), - RandomBytesForTest(t, 32), + nil, // policy attributes (empty for now) + RandomBytesForTest(t, 32), // ownwer signature + RandomBytesForTest(t, 32), // ownwer pub key hash + RandomBytesForTest(t, 32), // issuer signature + RandomBytesForTest(t, 32), // issuer pub key hash []common.SignedPolicyCertificateTimestamp{ *RandomSignedPolicyCertificateTimestamp(t), *RandomSignedPolicyCertificateTimestamp(t), diff --git a/pkg/util/pem.go b/pkg/util/pem.go index 5e025c37..700adb49 100644 --- a/pkg/util/pem.go +++ b/pkg/util/pem.go @@ -40,3 +40,19 @@ func PEMToRSAPublic(pubkey []byte) (*rsa.PublicKey, error) { } return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") } + +func RSAPublicToDERBytes(pubKey *rsa.PublicKey) ([]byte, error) { + return ctx509.MarshalPKIXPublicKey(pubKey) +} + +func DERBytesToRSAPublic(derBytes []byte) (*rsa.PublicKey, error) { + rawKey, err := ctx509.ParsePKIXPublicKey(derBytes) + if err != nil { + return nil, err + } + key, ok := rawKey.(*rsa.PublicKey) + if !ok { + return nil, fmt.Errorf("key is not RSA, but %T", rawKey) + } + return key, nil +} diff --git a/tests/integration/domainowner_pca_policlog_interaction/main.go b/tests/integration/domainowner_pca_policlog_interaction/main.go index 6e1d3510..98442496 100644 --- a/tests/integration/domainowner_pca_policlog_interaction/main.go +++ b/tests/integration/domainowner_pca_policlog_interaction/main.go @@ -31,7 +31,7 @@ func main() { } // first rcsr - rcsr, err := do.GeneratePolCertSignRequest("abc.com", 1) + rcsr, err := do.GeneratePolCertSignRequest("TheIssuer", "abc.com", 1) if err != nil { logErrAndQuit(err) } @@ -41,19 +41,19 @@ func main() { } // sign and log the first rcsr - err = pca.SignAndLogRCSR(rcsr) + err = pca.SignAndLogRequest(rcsr) if err != nil { logErrAndQuit(err) } // second rcsr - rcsr, err = do.GeneratePolCertSignRequest("fpki.com", 1) + rcsr, err = do.GeneratePolCertSignRequest("TheIssuer", "fpki.com", 1) if err != nil { logErrAndQuit(err) } // sign and log the second rcsr - err = pca.SignAndLogRCSR(rcsr) + err = pca.SignAndLogRequest(rcsr) if err != nil { logErrAndQuit(err) } diff --git a/pkg/common/crypto/testdata/clientcert.pem b/tests/testdata/clientcert.pem similarity index 100% rename from pkg/common/crypto/testdata/clientcert.pem rename to tests/testdata/clientcert.pem diff --git a/pkg/common/crypto/testdata/clientkey.pem b/tests/testdata/clientkey.pem similarity index 100% rename from pkg/common/crypto/testdata/clientkey.pem rename to tests/testdata/clientkey.pem diff --git a/pkg/common/crypto/testdata/servercert.pem b/tests/testdata/servercert.pem similarity index 100% rename from pkg/common/crypto/testdata/servercert.pem rename to tests/testdata/servercert.pem diff --git a/pkg/common/crypto/testdata/serverkey.pem b/tests/testdata/serverkey.pem similarity index 100% rename from pkg/common/crypto/testdata/serverkey.pem rename to tests/testdata/serverkey.pem From a14d3f1eb7dd5e4e9890161c2ce0abd9f410529c Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:18:16 +0200 Subject: [PATCH 169/187] Disable print for debug purposes in verifier. --- pkg/logverifier/verifier.go | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 1623fe2d..15377c78 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -1,7 +1,6 @@ package logverifier import ( - "encoding/base64" "fmt" "github.com/google/trillian" @@ -100,12 +99,12 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", err) } - deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, - leafHash, proof.Hashes) - if err != nil { - panic(err) - } - fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) + // deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + // leafHash, proof.Hashes) + // if err != nil { + // panic(err) + // } + // fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) } // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") From fd6dd39a7f116d7d68b3954a5adfc6e8ee7f5dd2 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:19:22 +0200 Subject: [PATCH 170/187] Rename PolicyCertificate.SPTs to SPCTs. --- pkg/common/crypto/crypto.go | 6 +++--- pkg/common/crypto/crypto_test.go | 4 ++-- pkg/common/policies.go | 6 +++--- pkg/common/policies_test.go | 2 +- pkg/logverifier/logverifier_test.go | 2 +- pkg/pca/pca.go | 2 +- pkg/pca/pca_test.go | 4 ++-- 7 files changed, 13 insertions(+), 13 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index e98b2d5e..6734210a 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -153,13 +153,13 @@ func VerifyIssuerSignature(caCert *ctx509.Certificate, rpc *common.PolicyCertifi pubKey := caCert.PublicKey.(*rsa.PublicKey) // Serialize without CA signature or SPTs: - caSig, SPTs := rpc.IssuerSignature, rpc.SPTs - rpc.IssuerSignature, rpc.SPTs = nil, nil + caSig, SPTs := rpc.IssuerSignature, rpc.SPCTs + rpc.IssuerSignature, rpc.SPCTs = nil, nil bytes, err := common.ToJSON(rpc) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - rpc.IssuerSignature, rpc.SPTs = caSig, SPTs + rpc.IssuerSignature, rpc.SPCTs = caSig, SPTs hashOutput := sha256.Sum256(bytes) err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.IssuerSignature) diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index f486fbc6..3cf24a17 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -72,7 +72,7 @@ func TestSignAsIssuer(t *testing.T) { // Sign as issuer. polCert, err := crypto.SignRequestAsIssuer(req, issuerKey) require.NoError(t, err, "RCSR Generate RPC error") - assert.Equal(t, len(polCert.SPTs), 0, "SPTs must be empty right after first issuer signature") + assert.Equal(t, len(polCert.SPCTs), 0, "SPTs must be empty right after first issuer signature") // ------------------------------------- // phase 3: domain owner check rpc @@ -113,5 +113,5 @@ func TestIssuanceOfSP(t *testing.T) { rpc, err := crypto.SignRequestAsIssuer(req, pcaPrivKey) require.NoError(t, err, "RCSR Generate RPC error") - assert.Equal(t, len(rpc.SPTs), 0, "spt in the rpc should be empty") + assert.Equal(t, len(rpc.SPCTs), 0, "spt in the rpc should be empty") } diff --git a/pkg/common/policies.go b/pkg/common/policies.go index eb0f5e52..ec6e9fcf 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -46,7 +46,7 @@ type PolicyCertificate struct { PolicyCertificateFields IssuerSignature []byte `json:",omitempty"` IssuerPubKeyHash []byte `json:",omitempty"` - SPTs []SignedPolicyCertificateTimestamp `json:",omitempty"` + SPCTs []SignedPolicyCertificateTimestamp `json:",omitempty"` } // PolicyAttributes is a domain policy that specifies what is or not acceptable for a domain. @@ -159,7 +159,7 @@ func NewPolicyCertificate( ), IssuerSignature: issuerSignature, IssuerPubKeyHash: issuerPubKeyHash, - SPTs: SPTs, + SPCTs: SPTs, } } @@ -167,7 +167,7 @@ func (c PolicyCertificate) Equal(x PolicyCertificate) bool { return c.PolicyCertificateFields.Equal(x.PolicyCertificateFields) && bytes.Equal(c.IssuerSignature, x.IssuerSignature) && bytes.Equal(c.IssuerPubKeyHash, x.IssuerPubKeyHash) && - equalSlices(c.SPTs, x.SPTs) + equalSlices(c.SPCTs, x.SPCTs) } func (s PolicyAttributes) Equal(o PolicyAttributes) bool { diff --git a/pkg/common/policies_test.go b/pkg/common/policies_test.go index c477ced7..44dfd72e 100644 --- a/pkg/common/policies_test.go +++ b/pkg/common/policies_test.go @@ -33,7 +33,7 @@ func TestEqual(t *testing.T) { // TestJsonReadWrite: RPC -> file -> RPC, then RPC.Equal(RPC) func TestJsonReadWrite(t *testing.T) { rpc := random.RandomPolicyCertificate(t) - rpc.SPTs = []common.SignedPolicyCertificateTimestamp{ + rpc.SPCTs = []common.SignedPolicyCertificateTimestamp{ *random.RandomSignedPolicyCertificateTimestamp(t), *random.RandomSignedPolicyCertificateTimestamp(t), } diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 5b6a3595..1b2862c8 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -44,7 +44,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "VZfa96+e9du6zpvFD/ZlMFMiTqfruk71mqzcg+NG350="), + RootHash: tests.MustDecodeBase64(t, "bpEVSbD39eqInlG1Pw56juoo76gv5XMHoCZiKNn8Kx0="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 85ddadf9..4540212b 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -240,7 +240,7 @@ func (pca *PCA) sendRequestToAllLogServers(pc *common.PolicyCertificate) error { } SPTs = append(SPTs, *spt) } - pc.SPTs = SPTs + pc.SPCTs = SPTs return nil } diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index e92afa90..9e415e7d 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -133,7 +133,7 @@ func TestPCAWorkflow(t *testing.T) { // 8. PCA adds SPT to list in policy certificate err = pca.sendRequestToAllLogServers(pc) require.NoError(t, err) - require.Len(t, pc.SPTs, len(pca.CtLogServers)) // as many SPTs as CT log servers + require.Len(t, pc.SPCTs, len(pca.CtLogServers)) // as many SPTs as CT log servers checkSPTs(t, pca, pc) // 9. PCA signs again the policy certificate @@ -246,7 +246,7 @@ func checkSPTs(t tests.T, pca *PCA, pc *common.PolicyCertificate) { require.NoError(t, err) hashedDerKey := common.SHA256Hash(derKey) - for _, spt := range pc.SPTs { + for _, spt := range pc.SPCTs { require.Equal(t, hashedDerKey, spt.LogID) require.Equal(t, pca.RootPolicyCert.Subject(), spt.Issuer) require.Less(t, time.Since(spt.AddedTS), time.Minute) From b6440c86fcedac122d8f99670ec3904d7e3f01a9 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:27:43 +0200 Subject: [PATCH 171/187] PolicyAttributes is not a slice, but just one element. --- pkg/common/policies.go | 8 ++++---- pkg/common/policy_issuance.go | 2 +- pkg/domainowner/domainowner.go | 16 ++++++++-------- pkg/logverifier/logverifier_test.go | 2 +- pkg/logverifier/verifier.go | 13 +++++++------ pkg/mapserver/logfetcher/logfetcher.go | 14 +++++++------- pkg/pca/pca.go | 2 +- pkg/pca/pca_test.go | 10 ++++------ pkg/pca/testdata/rpc.json | 2 +- pkg/tests/random/random.go | 4 ++-- 10 files changed, 36 insertions(+), 37 deletions(-) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index ec6e9fcf..7c5f3635 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -36,7 +36,7 @@ type PolicyCertificateFields struct { PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` - PolicyAttributes []PolicyAttributes `json:",omitempty"` + PolicyAttributes PolicyAttributes `json:",omitempty"` OwnerSignature []byte `json:",omitempty"` OwnerPubKeyHash []byte `json:",omitempty"` // SHA256 of owner's public key } @@ -81,7 +81,7 @@ func NewPolicyCertificateFields( publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, timeStamp time.Time, - policyAttributes []PolicyAttributes, + policyAttributes PolicyAttributes, ownerSignature []byte, ownerPubKeyHash []byte, ) *PolicyCertificateFields { @@ -117,7 +117,7 @@ func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { c.TimeStamp.Equal(x.TimeStamp) && bytes.Equal(c.OwnerSignature, x.OwnerSignature) && bytes.Equal(c.OwnerPubKeyHash, x.OwnerPubKeyHash) && - equalSlices(c.PolicyAttributes, x.PolicyAttributes) + c.PolicyAttributes.Equal(x.PolicyAttributes) } func NewPolicyCertificate( @@ -132,7 +132,7 @@ func NewPolicyCertificate( publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, timeStamp time.Time, - policyAttributes []PolicyAttributes, + policyAttributes PolicyAttributes, ownerSignature []byte, ownerPubKeyHash []byte, issuerSignature []byte, diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index fe5e1b0c..e3983632 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -25,7 +25,7 @@ func NewPolicyCertificateSigningRequest( publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, timeStamp time.Time, - policyAttributes []PolicyAttributes, + policyAttributes PolicyAttributes, ownerSignature []byte, ownerPubKeyHash []byte, ) *PolicyCertificateSigningRequest { diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 258d1831..91c7ca38 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -51,10 +51,10 @@ func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, ver pubKeyBytes, common.RSA, common.SHA256, - time.Now(), // timestamp - nil, // policy attributes - nil, // owner signature - nil, // owner pub key hash + time.Now(), // timestamp + common.PolicyAttributes{}, // policy attributes + nil, // owner signature + nil, // owner pub key hash ) // if domain owner still have the private key of the previous RPC -> can avoid cool-off period @@ -91,10 +91,10 @@ func (do *DomainOwner) RandomPolicyCertificate(domainName string, policy common. nil, // public key common.RSA, common.SHA256, - time.Now(), // timestamp - []common.PolicyAttributes{policy}, // policy attributes - nil, // owner's signature - nil, // owner pub key hash + time.Now(), // timestamp + policy, // policy attributes + nil, // owner's signature + nil, // owner pub key hash ) err := crypto.SignAsOwner(rpcKeyPair, polCertSignReq) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 1b2862c8..bd3906bc 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -44,7 +44,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "bpEVSbD39eqInlG1Pw56juoo76gv5XMHoCZiKNn8Kx0="), + RootHash: tests.MustDecodeBase64(t, "92bzhDqqvk5x2YFWpeHNNKIS5lRQYnF2pbvdk7NprX0="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 15377c78..1623fe2d 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -1,6 +1,7 @@ package logverifier import ( + "encoding/base64" "fmt" "github.com/google/trillian" @@ -99,12 +100,12 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", err) } - // deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, - // leafHash, proof.Hashes) - // if err != nil { - // panic(err) - // } - // fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) + deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + leafHash, proof.Hashes) + if err != nil { + panic(err) + } + fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) } // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 2648768a..78457de8 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -342,13 +342,13 @@ func GetPCAndRPCs( generateRandomBytes(), // public key common.RSA, common.SHA256, - time.Now(), // timestamp - nil, // policy attributes - nil, // owner signature - nil, // owner pub key hash - nil, // issuer signature - nil, // issuer pub key hash - nil, // server timestamps + time.Now(), // timestamp + common.PolicyAttributes{}, // policy attributes + nil, // owner signature + nil, // owner pub key hash + nil, // issuer signature + nil, // issuer pub key hash + nil, // server timestamps )) } if err := scanner.Err(); err != nil { diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 4540212b..80a18de7 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -104,7 +104,7 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( publicKey []byte, publicKeyAlgorithm common.PublicKeyAlgorithm, signatureAlgorithm common.SignatureAlgorithm, - policyAttributes []common.PolicyAttributes, + policyAttributes common.PolicyAttributes, ownerSigningFunction func(serialized []byte) []byte, ownerPubKeyHash []byte, ) (*common.PolicyCertificateSigningRequest, error) { diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index 9e415e7d..87e0277f 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -43,11 +43,9 @@ func TestCreateConfig(t *testing.T) { common.RSA, common.SHA256, util.TimeFromSecs(1), - []common.PolicyAttributes{ - { - TrustedCA: []string{"pca"}, - AllowedSubdomains: []string{""}, - }, + common.PolicyAttributes{ + TrustedCA: []string{"pca"}, + AllowedSubdomains: []string{""}, }, nil, // no owner signature nil, // hash of owner's public key @@ -103,7 +101,7 @@ func TestPCAWorkflow(t *testing.T) { ownerDerKey, // public key common.RSA, common.SHA256, - nil, // policy attributes + common.PolicyAttributes{}, // policy attributes func(serialized []byte) []byte { return nil }, diff --git a/pkg/pca/testdata/rpc.json b/pkg/pca/testdata/rpc.json index ce491cfa..08c450ae 100644 --- a/pkg/pca/testdata/rpc.json +++ b/pkg/pca/testdata/rpc.json @@ -1 +1 @@ -{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":[{"TrustedCA":["pca"],"AllowedSubdomains":[""]}],"IssuerSignature":"dnK6r24h3sBbulHLhhc2HgVmryQ8DJsEzu0g+yYQ62J+erMd7T5U+JFcutq/lsFTT9jWvMcKs9TeS5Z756Jh4hwyWvpjkH6C8XZRSlb/E9CpsuV2IiAT7Xx5IrqFhVW9QT7DmIgdV0yU3HehE4QCAb9dZ3pFybeRRngAcQHzGN92d3U+h6vQzML9nkSKGeP8l1Q1jwCcSd0fbkuwUBEDceyJxfGGQZkqBOXLgVefBKxcP57SlnwuoPhV/vNdIJuM/EJ0MncIuvN7eZvQWQspm/VQmrxMou3osm8hH5eTJhcqiBX0sz3nbjjvQNpHY/TkQ9Uj8T36+KmoMk1xWOX5JQ=="}} \ No newline at end of file +{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":{"TrustedCA":["pca"],"AllowedSubdomains":[""]},"IssuerSignature":"NHc1B/3XwwgYvCWCV311L67XKa9DuIe7fHq7UUjB9mrb1T2vbwPG2y2Dphy1b8Qpjop3mgOHLUW6HV5Swe0cCI2G0avuoVU9DxLQVtHkIM/5nYO1MHdSIp42+NqZYeLha/k7tNzrptUOUAgIPfIy1VHMlKDXqlGi2+I5C7BG9unVz9z7YZvd92ca/pELzmF5MHX4iU7lsjYaySIAZLzmCBCAdJAgJRyZR/mMAlg7JHmq5/c9xi/NTMiue/VabiTfmf/UwJOhgT8wpAoGD42rPFehLRtX75phEdPQLlxW9gW2MK6ThgQECN6xPZsjjEIxscVJLihvQWlD7RTIRW7R6Q=="}} \ No newline at end of file diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index ed45859f..494c9d4c 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -122,7 +122,7 @@ func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest common.RSA, common.SHA256, RandomTimeWithoutMonotonic(), - nil, // policy attributes (empty for now) + common.PolicyAttributes{}, // policy attributes (empty for now) RandomBytesForTest(t, 32), // ownwer signature RandomBytesForTest(t, 32), // ownwer pub key hash ) @@ -141,7 +141,7 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { common.RSA, common.SHA256, RandomTimeWithoutMonotonic(), - nil, // policy attributes (empty for now) + common.PolicyAttributes{}, // policy attributes (empty for now) RandomBytesForTest(t, 32), // ownwer signature RandomBytesForTest(t, 32), // ownwer pub key hash RandomBytesForTest(t, 32), // issuer signature From 10baf34e7edcd181125526b8dd7903f929e587c0 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:29:25 +0200 Subject: [PATCH 172/187] Improved comment. --- pkg/common/policy_common.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index 78d97c63..d1df5406 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -13,7 +13,7 @@ func (o MarshallableDocumentBase) Raw() []byte { return o.RawJSON } // PolicyPart is an interface that is implemented by all objects that are part of the set // of "policy objects". A policy object is that one that represents functionality of policies -// for a domain, such as RPC, RCSR, SPT, SPRT, SP, PSR or Policy. +// for a domain, such as PolicyCertificate, a PolicyCertificateSigningRequest, etc. type PolicyPart interface { MarshallableDocument } From 42d6f7be8309d2f182f7e3fd5252c4689f810705 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 29 Jun 2023 17:39:18 +0200 Subject: [PATCH 173/187] Added field "Domain" to PolicyCertificate. --- pkg/common/crypto/crypto.go | 1 + pkg/common/policies.go | 6 ++++++ pkg/common/policy_issuance.go | 4 +++- pkg/domainowner/domainowner.go | 4 +++- pkg/logverifier/logverifier_test.go | 2 +- pkg/logverifier/verifier.go | 13 ++++++------- pkg/mapserver/logfetcher/logfetcher.go | 3 ++- pkg/pca/pca.go | 2 ++ pkg/pca/pca_test.go | 2 ++ pkg/pca/testdata/rpc.json | 2 +- pkg/tests/random/random.go | 3 +++ 11 files changed, 30 insertions(+), 12 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 6734210a..746c0705 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -108,6 +108,7 @@ func SignRequestAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *r req.Issuer, req.Subject(), req.SerialNumber(), + req.Domain, req.NotBefore, req.NotAfter, req.IsIssuer, diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 7c5f3635..94e4a92d 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -29,6 +29,7 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { type PolicyCertificateFields struct { PolicyCertificateBase + Domain string `json:",omitempty"` NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` IsIssuer bool `json:",omitempty"` @@ -74,6 +75,7 @@ func NewPolicyCertificateFields( issuer string, subject string, serialNumber int, + domain string, notBefore time.Time, notAfter time.Time, isIssuer bool, @@ -94,6 +96,7 @@ func NewPolicyCertificateFields( RawSubject: subject, RawSerialNumber: serialNumber, }, + Domain: domain, NotBefore: notBefore, NotAfter: notAfter, IsIssuer: isIssuer, @@ -111,6 +114,7 @@ func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && c.PublicKeyAlgorithm == x.PublicKeyAlgorithm && bytes.Equal(c.PublicKey, x.PublicKey) && + c.Domain == x.Domain && c.NotBefore.Equal(x.NotBefore) && c.NotAfter.Equal(x.NotAfter) && c.SignatureAlgorithm == x.SignatureAlgorithm && @@ -125,6 +129,7 @@ func NewPolicyCertificate( issuer string, subject string, serialNumber int, + domain string, notBefore time.Time, notAfter time.Time, isIssuer bool, @@ -146,6 +151,7 @@ func NewPolicyCertificate( issuer, subject, serialNumber, + domain, notBefore, notAfter, isIssuer, diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index e3983632..97e06473 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -10,7 +10,7 @@ type PolicyCertificateSigningRequest struct { } type PolicyCertificateRevocationSigningRequest struct { - Subject string `json:",omitemptyu"` + Subject string `json:",omitempty"` } func NewPolicyCertificateSigningRequest( @@ -18,6 +18,7 @@ func NewPolicyCertificateSigningRequest( issuer string, subject string, serialNumber int, + domain string, notBefore time.Time, notAfter time.Time, isIssuer bool, @@ -36,6 +37,7 @@ func NewPolicyCertificateSigningRequest( issuer, subject, serialNumber, + domain, notBefore, notAfter, isIssuer, diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 91c7ca38..7c5f6b26 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -44,7 +44,8 @@ func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, ver version, issuer, // issuer domainName, - 0, // serial number + 0, // serial number + domainName, // domain time.Now(), time.Now().Add(time.Microsecond), // not after false, // is issuer @@ -85,6 +86,7 @@ func (do *DomainOwner) RandomPolicyCertificate(domainName string, policy common. "", // issuer domainName, // subject 0, // serial number + domainName, // domain time.Now(), time.Now().Add(time.Microsecond), // not after false, // is issuer diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index bd3906bc..1a40b8d4 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -44,7 +44,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "92bzhDqqvk5x2YFWpeHNNKIS5lRQYnF2pbvdk7NprX0="), + RootHash: tests.MustDecodeBase64(t, "rHCFIFTtQjLK5dgSl/DS/wi8qctNmB6nrAI8gEq9AIM="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/logverifier/verifier.go b/pkg/logverifier/verifier.go index 1623fe2d..15377c78 100644 --- a/pkg/logverifier/verifier.go +++ b/pkg/logverifier/verifier.go @@ -1,7 +1,6 @@ package logverifier import ( - "encoding/base64" "fmt" "github.com/google/trillian" @@ -100,12 +99,12 @@ func (c *LogVerifier) VerifyInclusionByHash(trustedRoot *types.LogRootV1, leafHa return fmt.Errorf("VerifyInclusionByHash | Unexpected error: %w", err) } - deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, - leafHash, proof.Hashes) - if err != nil { - panic(err) - } - fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) + // deleteme, err := logProof.RootFromInclusionProof(c.hasher, uint64(proof.LeafIndex), trustedRoot.TreeSize, + // leafHash, proof.Hashes) + // if err != nil { + // panic(err) + // } + // fmt.Printf("deleteme calcRoot = %s\n", base64.StdEncoding.EncodeToString(deleteme)) } // This is a logProof.RootMismatchError, aka different hash values. return fmt.Errorf("verification failed: different hashes") diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 78457de8..10b390b8 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -335,7 +335,8 @@ func GetPCAndRPCs( 0, "", // CA name domainName, - 0, // serial number + 0, // serial number + domainName, time.Now(), // not before time.Now().Add(time.Microsecond), // not after false, // is issuer diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 80a18de7..ad1a5297 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -98,6 +98,7 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( version int, subject string, serialNumber int, + domain string, notBefore time.Time, notAfter time.Time, isIssuer bool, @@ -125,6 +126,7 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( pca.CAName, subject, serialNumber, + domain, notBefore, notAfter, isIssuer, diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index 87e0277f..b5af1a45 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -36,6 +36,7 @@ func TestCreateConfig(t *testing.T) { "pca root policy certificate", "pca root policy certificate", 13, + "fpki.com", util.TimeFromSecs(10), util.TimeFromSecs(10000), true, @@ -95,6 +96,7 @@ func TestPCAWorkflow(t *testing.T) { 1, "fpki.com", 1, + "fpki.com", notBefore, notAfter, true, diff --git a/pkg/pca/testdata/rpc.json b/pkg/pca/testdata/rpc.json index 08c450ae..dc4b1b32 100644 --- a/pkg/pca/testdata/rpc.json +++ b/pkg/pca/testdata/rpc.json @@ -1 +1 @@ -{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":{"TrustedCA":["pca"],"AllowedSubdomains":[""]},"IssuerSignature":"NHc1B/3XwwgYvCWCV311L67XKa9DuIe7fHq7UUjB9mrb1T2vbwPG2y2Dphy1b8Qpjop3mgOHLUW6HV5Swe0cCI2G0avuoVU9DxLQVtHkIM/5nYO1MHdSIp42+NqZYeLha/k7tNzrptUOUAgIPfIy1VHMlKDXqlGi2+I5C7BG9unVz9z7YZvd92ca/pELzmF5MHX4iU7lsjYaySIAZLzmCBCAdJAgJRyZR/mMAlg7JHmq5/c9xi/NTMiue/VabiTfmf/UwJOhgT8wpAoGD42rPFehLRtX75phEdPQLlxW9gW2MK6ThgQECN6xPZsjjEIxscVJLihvQWlD7RTIRW7R6Q=="}} \ No newline at end of file +{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":{"TrustedCA":["pca"],"AllowedSubdomains":[""]},"IssuerSignature":"p4dASSEofyVNzuW/X7Qnwf/7D9LNjWDAZgGTDBQX646S2X9zvAFMbbEYGBtdxncpSk2TN/p0ZuvjS+Zn2B9X5hOgwVAFNys1vGyKI4zHVWmBEirc3EQKiipXmPSjRjY1yMwOOYXvmy4XPiLSMAW+TRtkGZxY1vK9+9yUifMzu4gsWOq93+E1tZHF9AHEVDvVGgUaoFITL75FhIHaPY04NXqYeIfCri3DdvTsAXTQbQTNO9nd06jgEhvzDHhz+d93DiHpFyXwHFnk6W37V0KveewIvZ3i16+umUKz+G2uYNdjNz3wq9DRRr8M8NKB0z1JR9TBHLXFf+nxbinheAAAQQ=="}} \ No newline at end of file diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 494c9d4c..cef4eca5 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -40,6 +40,7 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic for i := range docs { pc := RandomPolicyCertificate(t) pc.RawSubject = domainName + pc.Domain = domainName pc.Issuer = "c0.com" data, err := common.ToJSON(pc) @@ -115,6 +116,7 @@ func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest "Issuer", "RPC subject", rand.Intn(1000), // serial number + "domain.com", RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), true, @@ -134,6 +136,7 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { "Issuer", "RPC subject", rand.Intn(1000), // serial number + "fpki.com", RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), true, From e2c48ae212581e4c1e7b07e7cae24fe803fbe408 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 30 Jun 2023 10:57:04 +0200 Subject: [PATCH 174/187] Add a class diagram of the policy types. --- doc/policy objects class diagram.dia | Bin 0 -> 3326 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 doc/policy objects class diagram.dia diff --git a/doc/policy objects class diagram.dia b/doc/policy objects class diagram.dia new file mode 100644 index 0000000000000000000000000000000000000000..3b7807d55f8a2d86d51c6f9b4b0a87e0d65a4699 GIT binary patch literal 3326 zcmZ|Pdpr~BAHeajk{k<(6h{}A36tyGrItH$+1zhYgjg>v_LkpFf`0^LqYyUf)L|{p7KK7AF_0pf{MNE}Xo5AawOE zA(Fdn@EkCjDbxb-L(YS+#C+mPS@ExCQ1FUDtYNIg*|(O_N&lDuIrMq zv$VFh@R?Y=Ou|E)ND$VIPc|r-3upPkH-DfqIreaUWY6QrK}z_H1#QWB3yC#i$L|_C zEfo>w0vy53P4#Fr`eKilK(3?Z(;pb2jrl)8CWX9rLzVW|0Gwx}5eluc8O9Hbd@;v_h#q_- zJ}_!(fA2oWi+d-c<9_GXTl3G|oyk3z;LRBy^-RR8jNyqCWDNGMEE~!6Q$vD#sdf_u zn9mV8gY%&y&zY}YuL2DLr?+~G;%9;Eo#!!()xapH@E+eOdMQdogpw|=$OeHSu9#~1 zC)Q3671-JnGcctzLDXZGzc9;v*TMxm5x+YFHThD)u|*zS=7k?)T?F*YSx z%$9W$6Slz*>!mIaVq=~*Y#Nyr&vf6?vHVv1wc9tMW?mx8c7IpYib2{~PWEwe{c3sX z#R(hZFGi`};p=c=Zp#Hkp19PMy0=KmZuHc^d;gN4D&xZL0}BVpS8KZ*Y*j~ zO|~Up=A&^Thsnr!&t{dwr z%e6;6?rAJ%obLU?c=?HIEU z($@Ac>SS@VmSAF9jNaYLibFi1*Iw=bOja=5-fS(lM_Q*1E2gWlUpsmSoDqX+tA z3_YOJw?k^x{T*;B`&g;RxSx9Ag+uFafIpc$qy5E-NRJV1nG)~)Z z0OGH^oo&RsS5e*wg)lc7@soxZAj759vgJ*=7DlnZno@t1h9Lb4}j}3wIEeet4TFkg55G&Nqo!mOXob=>g2+pUk6y{~+X_x@_92P^TqH;Du ze2L<|*ukQ2yV6whxr6ChLyLuH-Gn(EvuaNP3-ZfKki^zW7~c@{vYvN%gghHx>RzND z?Z*%BN1%^xV$Ea4W!8^&r!|BfH#7z}Is8Gh?4OihhGl=1tphap)Z82kYHb7!Ph z9YJQ=pk=dc@smRMAZ%9Hl~G~&$r=QX2bTO5kx z)C`tVgvw-XhDM^(p=}j$oTJy{l?6|n2i23RPYLZp%K2bxCVxfW&b@>-64eUxJSj8F z&D?&nlX>2=5tuy`Ye>nxBGxuypCW*nj zM~zz|C{Rc0Vl-at>8Z$f!P91T0Tc!Eo-UI730w{e?xLrNarr=XmV zaZAOE{BT{_j}Ak$o6KKORj_^0EDKR?%7Z*-B=6CELI-^?1_gyg<=$!{mx#wvIa#kc z79?H?}&U&~7KljfOR zC1C_8^!Tuwa*47vyt9ivzF}{tGqb zQ+VHucWZ!cggj_Wha>Nu*TJ>1(+N^>|Ix^S^=RCSya&19_}<*mE=gt_W+?2`e{12{ z8VK^}9Yjggit`uR<&1c`F1#o9fz2K;1f@?tPt?=qu)KB;PMBQCNX+Ze3I&4m5tiBWei>_sP}GG*YD=A-N@C;!P(j^-m!bXydf zjD9wxh=Z~|=M$_O?j;WJrP`NvXnPesHS64b}>uD(#l&Eq@h%ntZ%Mq!wehQ^Q z59RVEeN_`sD`;qj5$TsfUi>i`g&Yz<-47Hmin!sR;I+tnr(f6?9CL8U=XXm;Oi-)2 zgJHX}gIlD`7z$Vc1brQ_Rh}Prj48pIguSDwWmYT-F0u1wQ=O6p= z>cdW)Cs+TT5S>toC3eYBf_+0|E(`G9r_hOV#TEvxbi+FmH?n2$3eofqsx zP1@_UNI7?d`Z8ch{hqT=KoLHzp6B6w7+NWJXL=dDUgrM&wzoB{-mE=tZ}g5~{~RDl zm!88IVzF}@SkZM<49i~2K&-N%toUqq@YPgt63~-^SP@Iuf)DV5#k>auYAQMJlHZUc z^6v$VoXIm~)Vo)GQW0tB$4~M= z=9rAH)ROGKG?c`H6wwe1!i~h$8ja@pY(lO z6={lbeVt0#5%(l;_Rb){rJyKj!+iWP9Cgw#UNA*hN~Z?DIi#n1q>C>O-t}@?$ciO} zIOcqb;)ntM!eK+5l}DE|2tA$3o7d<9r?wH#+IW!4!!OjD3H|n!{1}8LIZoi7w45rm zGNMK2M2OCSG-aihIDett`4w;19tePC+*r#1TXoA|B*wdW<6RqDLtEP?-mO#Hg!Fp$ z?%TL3zU2KCWFYI$^%y&te^27*$5@UledSYR|D$#$XoZ+QX8#0?^rx_@*l8LbG9WsO zbwmEBV?+O+v|xIV#_ZQn&WAE?%KG=wfF?p9esUEf(QKn(t;j)+TohcjR-d2m+X^)M zc>`?^)fb*tq(Z8zJu{@eB)$oZq#(YVs~>ty^D@Q52cX}{`vU}_%3 Date: Mon, 10 Jul 2023 16:10:12 +0200 Subject: [PATCH 175/187] Reproducible and deterministic RSA keys for tests. --- pkg/tests/random/random.go | 29 ++++++++++++++++++++++++++ pkg/tests/random/random_test.go | 36 +++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 pkg/tests/random/random_test.go diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index cef4eca5..c430efab 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -1,6 +1,8 @@ package random import ( + "crypto/rsa" + "io" "math/rand" "time" @@ -13,6 +15,26 @@ import ( "github.com/stretchr/testify/require" ) +// randReader is a type implementing io.Reader which _fools_ `rsa.GenerateKey` to always generate +// reproducible keys if the random source is deterministic (reproducible). +// Use this reader only in tests. +type randReader struct{} + +func (randReader) Read(p []byte) (int, error) { + if len(p) == 1 { + return 1, nil + } + return rand.Read(p) +} + +// NewRandReader returns an io.Reader that forces rsa.GenerateKeys to generate reproducible keys, +// iff the random source is deterministic. Make a prior call to `rand.Seed()` to obtain +// deterministic results, otherwise the random source will be pseudorandom but +// probably not reproducible. +func NewRandReader() io.Reader { + return randReader{} +} + func RandomBytesForTest(t tests.T, size int) []byte { buff := make([]byte, size) n, err := rand.Read(buff) @@ -155,3 +177,10 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { }, ) } + +// RandomRSAPrivateKey generates a NON-cryptographycally secure RSA private key. +func RandomRSAPrivateKey(t tests.T) *rsa.PrivateKey { + privateKeyPair, err := rsa.GenerateKey(NewRandReader(), 2048) + require.NoError(t, err) + return privateKeyPair +} diff --git a/pkg/tests/random/random_test.go b/pkg/tests/random/random_test.go new file mode 100644 index 00000000..09f0a767 --- /dev/null +++ b/pkg/tests/random/random_test.go @@ -0,0 +1,36 @@ +package random_test + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/netsec-ethz/fpki/pkg/tests/random" +) + +func TestRandomPolicyCertificate(t *testing.T) { + rand.Seed(0) + pc1 := random.RandomPolicyCertificate(t) + pc2 := random.RandomPolicyCertificate(t) + require.NotEqual(t, pc1, pc2) + + rand.Seed(0) + gotPc1 := random.RandomPolicyCertificate(t) + gotPc2 := random.RandomPolicyCertificate(t) + require.Equal(t, pc1, gotPc1) + require.Equal(t, pc2, gotPc2) +} + +func TestRandomRSAPrivateKey(t *testing.T) { + rand.Seed(0) + k1 := random.RandomRSAPrivateKey(t) + k2 := random.RandomRSAPrivateKey(t) + require.NotEqual(t, k1, k2) + + rand.Seed(0) + gotK1 := random.RandomRSAPrivateKey(t) + gotK2 := random.RandomRSAPrivateKey(t) + require.Equal(t, k1, gotK1) + require.Equal(t, k2, gotK2) +} From 21f7a00b518f88189c2a151279a738c084e59150 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 10 Jul 2023 15:28:19 +0200 Subject: [PATCH 176/187] Including generation of policy certificates for test. --- pkg/common/crypto/crypto_test.go | 69 ++++++++++++++++++++++++++++++++ tests/testdata/issuer_cert.pem | 26 ++++++++++++ tests/testdata/issuer_key.pem | 27 +++++++++++++ tests/testdata/owner_cert.pem | 32 +++++++++++++++ tests/testdata/owner_key.pem | 27 +++++++++++++ 5 files changed, 181 insertions(+) create mode 100644 tests/testdata/issuer_cert.pem create mode 100644 tests/testdata/issuer_key.pem create mode 100644 tests/testdata/owner_cert.pem create mode 100644 tests/testdata/owner_key.pem diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 3cf24a17..56fa7636 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -3,6 +3,7 @@ package crypto_test import ( libcrypto "crypto" "crypto/rsa" + "io/ioutil" "testing" ctx509 "github.com/google/certificate-transparency-go/x509" @@ -11,10 +12,78 @@ import ( "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/common/crypto" + "github.com/netsec-ethz/fpki/pkg/tests" "github.com/netsec-ethz/fpki/pkg/tests/random" "github.com/netsec-ethz/fpki/pkg/util" ) +var update = tests.UpdateGoldenFiles() + +func TestCreatePolicyCertificatesForTests(t *testing.T) { + if !*update { + t.Skip("Not updating golden files: flag not set") + } + // Obtain a new pair for the root issuer. + issuerCert, issuerKey := randomPolCertAndKey(t) + + // Objain a new pair for the owner. + ownerCert, ownerKey := randomPolCertAndKey(t) + // The owner will be issued by the root issuer. + err := crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) + require.NoError(t, err) + + // Store all certs and keys. Filename -> payload. + const ( + typeIssuerCert int = iota // Even numbers are certs + typeIssuerKey // Odd numbers are keys + typeOwnerCert + typeOwnerKey + ) + filenames := map[int]string{ + typeIssuerCert: "../../../tests/testdata/issuer_cert.pem", + typeIssuerKey: "../../../tests/testdata/issuer_key.pem", + typeOwnerCert: "../../../tests/testdata/owner_cert.pem", + typeOwnerKey: "../../../tests/testdata/owner_key.pem", + } + + payloads := make(map[int][]byte) + // Issuer pair: + data, err := util.PolicyCertificateToPEM(issuerCert) + require.NoError(t, err) + payloads[typeIssuerCert] = data + payloads[typeIssuerKey] = util.RSAKeyToPEM(issuerKey) + // Owner pair: + data, err = util.PolicyCertificateToPEM(ownerCert) + require.NoError(t, err) + payloads[typeOwnerCert] = data + payloads[typeOwnerKey] = util.RSAKeyToPEM(ownerKey) + + // Write all files. + for _type, payload := range payloads { + err = ioutil.WriteFile(filenames[_type], payload, 0666) + require.NoError(t, err) + } + + // For safety of these tests, check again the created files. + expectedObjects := map[int]any{ + typeIssuerCert: issuerCert, + typeIssuerKey: issuerKey, + typeOwnerCert: ownerCert, + typeOwnerKey: ownerKey, + } + for _type, filename := range filenames { + var gotObj any + if _type%2 == 0 { + gotObj, err = util.PolicyCertificateFromPEMFile(filename) + require.NoError(t, err) + } else { + gotObj, err = util.RSAKeyFromPEMFile(filename) + require.NoError(t, err) + } + require.Equal(t, expectedObjects[_type], gotObj) + } +} + func TestSignatureOfPolicyCertSignRequest(t *testing.T) { ownerPriv, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") require.NoError(t, err, "load RSA key error") diff --git a/tests/testdata/issuer_cert.pem b/tests/testdata/issuer_cert.pem new file mode 100644 index 00000000..f6eb88ee --- /dev/null +++ b/tests/testdata/issuer_cert.pem @@ -0,0 +1,26 @@ +-----BEGIN FPKI MARSHABLE DOCUMENT----- +eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo0LCJTZXJpYWxOdW1iZXIiOjUxNCwi +RG9tYWluIjoiZnBraS5jb20iLCJOb3RCZWZvcmUiOiIyMDUzLTExLTEzVDE2OjA3 +OjE3WiIsIk5vdEFmdGVyIjoiMTk4OC0wMS0wM1QxMTo1OTo0OFoiLCJJc0lzc3Vl +ciI6dHJ1ZSwiUHVibGljS2V5IjoiTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFP +Q0FROEFNSUlCQ2dLQ0FRRUFwZm1hNlk5UTFaQnVuNmN1Y2U0TDRGQjRBd2dZZW5X +M3Nib2hrZXFNTTVjSHF2ZDMwbE5OUFpvUnFEVXdnVU1JTlRIQ1NBVzV6WkV2Y1V6 +blA0UjRDM2xNU1h1UTBWMzVaSGs0R1dxdTJzbUFBZG1JT3dBWUhyYlBoVjJaVEpx +b2lqRlN5UTcySlN1SWtJcDE2ekdrRitBMzZMOGlyTzJjWVRRVTBOdmtYVVhJK3E2 +K21JL0ZOaWd5ZmNDWG5nLzMrL1VQcVlBL1RiekxEMEZsZEFaL1BKZ0xnU2dLUG1G +WWtoSWM3ZFp6UEFyclJQVEVjbTFhWmJZbTk0b1JKT1Y2UW5KblBsVmhiK0NTU2Jm +UEE1bzRrWjZ5K1E5YXAyZ01TZ1JneWpnVTdZR0RMV0VqTGtORlhOWVhORXlCWW9L +SmZlTEt3d2ZGT0l4YjFoajVmbXluL3dJREFRQUIiLCJUaW1lU3RhbXAiOiIxOTk0 +LTEyLTAzVDAyOjAwOjQxWiIsIlBvbGljeUF0dHJpYnV0ZXMiOnt9LCJPd25lclNp +Z25hdHVyZSI6IjR0YStJTy9OYk9xRXRwSmVZSHZnWTNGdmx0M04wQjExQkZ3L0FB +K0tlV3M9IiwiT3duZXJIYXNoIjoiem14UkxEZ0Jxc3J1MzYxYlVHWms2TURrcDNI +czRMaTN3Wlpka1lFbEczdz0iLCJJc3N1ZXJTaWduYXR1cmUiOiJGUUczMllSclp1 +c0N0WDVjMm50c3VtaVIxaGE5YUd3M3VEUmhPc2k2b2l3PSIsIklzc3Vlckhhc2gi +OiJBSS8rYUlOU2Mwcms0L0VoZXMxZmd5Y0lGREFZWjdYUVp4R3lPQUFjZVZjPSIs +IlNQQ1RzIjpbeyJWZXJzaW9uIjo4LCJMb2dJRCI6Im5KeWk3ODNTMFMwcWVRPT0i +LCJBZGRlZFRTIjoiMjAxMC0wNi0yM1QwNzoyNDozNloiLCJTaWduYXR1cmUiOiIw +SFNvS0FycC9od011V3JUSXRZaWdpbGZ2K0VlSnFRekIyMjF3VVJNT2pRPSJ9LHsi +TG9nSUQiOiIweXIzN1R1TS9wQlBrdz09IiwiQWRkZWRUUyI6IjIwNTAtMDUtMDdU +MTY6MTk6MDBaIiwiU2lnbmF0dXJlIjoiK1BCdEtielpoa1FkSlprRzFwck5pVXVX +aXVudzY1MldYT2FrYVR4T3ZvZz0ifV19fQ== +-----END FPKI MARSHABLE DOCUMENT----- diff --git a/tests/testdata/issuer_key.pem b/tests/testdata/issuer_key.pem new file mode 100644 index 00000000..787f1c78 --- /dev/null +++ b/tests/testdata/issuer_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd3 +0lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmI +OwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+ +mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTE +cm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEj +LkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQABAoIBAFhiEdc8BSyky/Pi +n/31aIeb8jyTDA3yL87JsmS0hNJYI7IsM7dhuqkKKUrsgBblbjJ9e9rEicvo+12X +OTF+xjo97pzW2yhSpaKWVoRaZiCF2s+hWkUo6K86ZEm9rQu48/UbDN2aRQNIKmHm +CaMj/SDluoALgFib84FjrY4G+pWb3sPFJB0bdIW60HooC0C9uSi6li8N51bhJGO/ +9ULakLiJhvNjGwejb24pT5aLutooUVIJgAB9lUF7T7rkvhYxL03NmgWdc8LbYM5K +FnU1J3+VlLNszb4P6x4e6ebZWGHLIY4d1l0WAeDrbmubTXmSx/GE+R2vRf9TWj65 +nMF4s+ECgYEA3FLqErqChbXxhg3VpV6aL5OzDW93PttdDQ0Ex2LvC8qTO4NLw0PV +bNvh++BU3Qo6RIN6wD/TIakGwk88mQ3TTL5/Wz+wZS1zseBBd9jurfIXhSvjsdd6 +/9FJOrCCJAiyISnpdIZRYE6oMXoVEfPSW2CpTzK9v8/j5hVWGC7Q7DECgYEAwNnE +c7/VNTj2OugVVbm+BetHO3BD+dTv3CLgg27gbGrSJnZs8atT3NpeTOA1GrXb+y1J +f9n6gFaDRNWw4RreaGlwlGr6crAnw7Izj3Tv41juWZgUha0mrLqeLHKE7H3S6n7E +Kl54Y/s1IKzCDYFGPNsw/4ut+9MG4pTJR8tCOy8CgYBuUlZC6oFQ0roNlf/UvnoW +OK7/m0DqiI0fa9VowXFRRi5TLntRWzYTPEb8CGh2BhkHFY7xlQPzZt+rjGGbggC7 +/OdomYuKHihAeTpO+KELGJ4OYNuywWMllXZFnPyNbo1DbekoH9m/j6NIowjJ5H0A +pHYi4hWBZmeHXket9ENJAQKBgC6O5HzFXWXiAKAsE6qEkzm+mY3iBqc0H0xYJzfT +9t5105JxmM+nezGgjo+kuW1fZmJ3hn0VVQLS4BIoPPG7mOJAQmuygBBcMt1uEkCa +DKgoejKrIpk7OlENJM66PxkRL3BpdlZ8RDhvF15yFs9H3Hsot+WaBUD8q3bcUNPC +YJUFAoGBAM8zv4aJXwfRfjxk7up4jBOkOk3if7YVc8e+X8pv4cTjrFRtjKE98fhb +nFHS184SIfJ3JimwD4flaH8E8vtvwOrZEIy7k4h1iHxOi/8yyLnddb4HybfjPwCg +YUuxGoDG4EjqysiJdNHG/D9g2J/GSLn08fMC/aPAWCiuzgdWHPz/ +-----END RSA PRIVATE KEY----- diff --git a/tests/testdata/owner_cert.pem b/tests/testdata/owner_cert.pem new file mode 100644 index 00000000..b53ed320 --- /dev/null +++ b/tests/testdata/owner_cert.pem @@ -0,0 +1,32 @@ +-----BEGIN FPKI MARSHABLE DOCUMENT----- +eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo5LCJTZXJpYWxOdW1iZXIiOjQwLCJE +b21haW4iOiJmcGtpLmNvbSIsIk5vdEJlZm9yZSI6IjIwMTMtMDEtMjJUMTc6MzI6 +MTJaIiwiTm90QWZ0ZXIiOiIxOTA1LTAzLTE1VDE5OjU1OjE2WiIsIklzSXNzdWVy +Ijp0cnVlLCJQdWJsaWNLZXkiOiJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9D +QVE4QU1JSUJDZ0tDQVFFQTVtdEdhZWZrNzFhR1lFemNJOHZqYkUycHV4c3NZNExm +MVY3dDBIMEppNENTbVRuQWtLRXNlL24ydm8va2xRbWExR3hPUkc2RHBWbTZnZ2Ro +b05CWlI1T2QraTY3UkI5cUZqMzVTQkhsaDl1d1VuU05ac0lsL2lPQ004dWJEL3Nr +OWNNckVIVWdUQjRiZ0xObCtpbXlwQ2x0NHpqYWE0OFVXd3Zvek9ZYVZVVi9yNEZn +SVZWNmkrR0hpU1VHNnlEa016c2gwWDhUSlBJNlp5amNyMTRZUHZPeTB5K1RucXY3 +N3UxbjBNUnlTTHFGbVRySTBCVG81SEs3SDZwUDVwRnA5RVRUZ09nbnUycldEbTRu +TWhlZXllcGZvUWJtRmpTN1VTRm00R0k0SjZ1RTZNVmlsdUJTTTF3Mmg2SXRTTFFT +M0VSc0dlMFBVQVREV0VUcEhmQlJIUUlEQVFBQiIsIlRpbWVTdGFtcCI6IjE5MjYt +MDctMzBUMDI6MDM6MzhaIiwiUG9saWN5QXR0cmlidXRlcyI6e30sIk93bmVyU2ln +bmF0dXJlIjoiaDR5STNzMDNQUG93QW1ac21nWHJHTGxMN1ZxUUNZY2NWMkhVc1dt +ZW55bz0iLCJPd25lckhhc2giOiJpMG9xVHNmdjkyWjBWYUo2VGRQNzBDR2FqczJX +clNabFhDb01Wdm0xV0VZPSIsIklzc3VlclNpZ25hdHVyZSI6ImxHZUpRSWhrOVUw +Q2RiZDJoRXlvU0dJZXZlZGx4UDFJRDZqREtFSXNTekxiT3c5bngvNktnd2ZmYkt5 +cUFqSnQwOU5BQk5VODRRNUZBSlRYVXJvOEx4UFJGMXp3MlorTXBYVUgwVm9ZV0or +YUF1bzhnR3NzNHMvTStQWmhBVTVub3FNMDNVZVZSdFNGRGFrVUVRTEhJYnB4YW9B +QkxqYjBBdnd4b2JFWkpremFFR3YxUytqWWhERjZWNFlVWUdBcUVZUEduSW94WEZI +M0J2RnhOZTYvd3JGeitBWHFaQ2RWd2tCb290VHlwaCtCSnNxak5WYXhwMlVISHpj +M1FvNXhXQTBHTVNhNlVUamdkS1crd2dnRnJZYzc3c1Z0VU1BUHVWYlBHb1k2OURK +Y1BQR01naFRXaUF3KzNFNzcvWWp0ODZrRmlQWjJrVXE3MkhEd0FkRkRTUT09Iiwi +SXNzdWVySGFzaCI6IkFjakZka3RuZ3VZY0taMTk2R3hmd2lZd1kyVGRVWUUyV29M +M1NOazR2aVU9IiwiU1BDVHMiOlt7IlZlcnNpb24iOjUsIkxvZ0lEIjoiYW0wOUtp +THFTeEEwN3c9PSIsIkFkZGVkVFMiOiIyMDc3LTAxLTA5VDE5OjI3OjI4WiIsIlNp +Z25hdHVyZSI6ImdPcHZycURmQlFpYVVXd2x1Mmt3NjBkVmRLVy9MTkZ1bEhMWkxu +cTZ1R2s9In0seyJWZXJzaW9uIjoyLCJMb2dJRCI6InQ2b1ExS05qaGlCdG93PT0i +LCJBZGRlZFRTIjoiMjA2OC0wNS0yMFQxNzowNDo1N1oiLCJTaWduYXR1cmUiOiJW +YURKcXFudTdKQUUxaDk2cVFZN24vajYxZjZxVVFjekJ0NTdFSGpxNkhNPSJ9XX19 +-----END FPKI MARSHABLE DOCUMENT----- diff --git a/tests/testdata/owner_key.pem b/tests/testdata/owner_key.pem new file mode 100644 index 00000000..7f168780 --- /dev/null +++ b/tests/testdata/owner_key.pem @@ -0,0 +1,27 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnA +kKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSN +ZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4Fg +IVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI +0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MVi +luBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQABAoIBAGg/sszpqQ+MEyHw +xEzBy3RNsvQ/eBbRuJmd6HTCjZX7JtiCY5hMA4dX8YYILkAr50wicErPwWoHLbX0 +xvvuJ+aVaA0wJXPNzr6fbXBpAHM2NxEnQG50eqH/yMHMZayWUQOMOKiqhWFK4eCZ +yhKj2Poi8F+0ckXlpy0GN1604Mcg3Xw/1MxkIWBgCZl0dMIGdFzMnhrcgskI6WWq +wRizqe2cgOcli9NOleD6+boIWT2TRi2ZNWvH7ABBVbtWHHgx1bScn699J1URuQf0 +8wBYVlnTBXXyn05BTTfhCsKM3TNRToZ9h8pAvdUlh8Z+U5h2T8NycxMoJFSMawhG +Y90/sLUCgYEA52e2TfLOOuZCuWfZPGq+ehGzop9G7iOZBu26okwb7B0IzHkJs0IC +qMtin7pb6ZtgnrsCuFwNkK5csGIUX5RBdnRLQImB6WZ2LVHRL+G5VP9ePx/LMrEu +d7lolntbxSRQ6yqfL6PFojV3d5y5YohOek14CFy0Uka+dikZdhkQ+E8CgYEA/ui7 +kHmDZC5Dc5MmLjM9ZfM1kmKJh5BPHymnOL5Nvq4F0MMYoegRQar2z1TJZNJeTG2f +ZKHy4mkZpHEb7Y5bid7XBx2WhDYxbm+H7z9ZJSgd8u1p5dwuvC3Tkr6Ph6M6z/bx +Eq/k9XFbxYodgBsUA3Is9C+BSKfYO6+uBng/2NMCgYEAmVnSp497+e50U3gtr2sx +9aVbfS6+i7xNghjzMPDtJCkNlquzsPbS2leGcUHH/WT5v9dA8zoVx0a1qDInPDv8 +sepZoFhAHpPHRztMPMYC0wA0zfJc76gZmj/lvqvbYicx9NaXDbQjmd6C4GKdPF4G +W+iQ3kHJQikduSJ9fBUaeOMCgYEArw+GQKIy6gobqLPV9MSb1fhBQlcJvGAO9ofU +n9OR8PHybHyP3iFSOJcBCUJhTSxsNnDSlnniJDupsJU9aNfrZbP0WJ6pUAMnz4Ap +CtWmky4J41vI7mNYdUPDmXPlfnFLYqdkyECkLSu8qt0/hUsTY4p19Pzq/MrIZbUw +/qB2uKECgYEAs7rheQs2DjOhK3WgWarW1Gzgi3vJ81vIMPy30vH7waNhTLQJBYbG +JQ/Q6pdQ1VkFYBqg5mfFEOdOFJ4Dk7+fzG+5Bf8Qd4GOxwdmNPupx6lUd9EfI1LT +FwhzuuNusaw/+abGNL4XoNsAA0CKBbUJ/LgrB8e8maoCviLYYeIfg48= +-----END RSA PRIVATE KEY----- From d7d7d59abe4b3225a10074a99da80af1dabd735e Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 10 Jul 2023 16:23:57 +0200 Subject: [PATCH 177/187] Change format for policy certificates to JSON in the tests. --- pkg/common/crypto/crypto_test.go | 14 ++++++++------ tests/testdata/issuer_cert.json | 1 + tests/testdata/issuer_cert.pem | 26 -------------------------- tests/testdata/owner_cert.json | 1 + tests/testdata/owner_cert.pem | 32 -------------------------------- 5 files changed, 10 insertions(+), 64 deletions(-) create mode 100644 tests/testdata/issuer_cert.json delete mode 100644 tests/testdata/issuer_cert.pem create mode 100644 tests/testdata/owner_cert.json delete mode 100644 tests/testdata/owner_cert.pem diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 56fa7636..9d38eab1 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -20,16 +20,18 @@ import ( var update = tests.UpdateGoldenFiles() func TestCreatePolicyCertificatesForTests(t *testing.T) { + rand.Seed(0) if !*update { t.Skip("Not updating golden files: flag not set") } + t.Log("Updating policy certificate files for tests/testdata") // Obtain a new pair for the root issuer. issuerCert, issuerKey := randomPolCertAndKey(t) // Objain a new pair for the owner. ownerCert, ownerKey := randomPolCertAndKey(t) // The owner will be issued by the root issuer. - err := crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) + err := crypto_pkg.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) require.NoError(t, err) // Store all certs and keys. Filename -> payload. @@ -40,20 +42,20 @@ func TestCreatePolicyCertificatesForTests(t *testing.T) { typeOwnerKey ) filenames := map[int]string{ - typeIssuerCert: "../../../tests/testdata/issuer_cert.pem", + typeIssuerCert: "../../../tests/testdata/issuer_cert.json", typeIssuerKey: "../../../tests/testdata/issuer_key.pem", - typeOwnerCert: "../../../tests/testdata/owner_cert.pem", + typeOwnerCert: "../../../tests/testdata/owner_cert.json", typeOwnerKey: "../../../tests/testdata/owner_key.pem", } payloads := make(map[int][]byte) // Issuer pair: - data, err := util.PolicyCertificateToPEM(issuerCert) + data, err := util.PolicyCertificateToBytes(issuerCert) require.NoError(t, err) payloads[typeIssuerCert] = data payloads[typeIssuerKey] = util.RSAKeyToPEM(issuerKey) // Owner pair: - data, err = util.PolicyCertificateToPEM(ownerCert) + data, err = util.PolicyCertificateToBytes(ownerCert) require.NoError(t, err) payloads[typeOwnerCert] = data payloads[typeOwnerKey] = util.RSAKeyToPEM(ownerKey) @@ -74,7 +76,7 @@ func TestCreatePolicyCertificatesForTests(t *testing.T) { for _type, filename := range filenames { var gotObj any if _type%2 == 0 { - gotObj, err = util.PolicyCertificateFromPEMFile(filename) + gotObj, err = util.PolicyCertificateFromFile(filename) require.NoError(t, err) } else { gotObj, err = util.RSAKeyFromPEMFile(filename) diff --git a/tests/testdata/issuer_cert.json b/tests/testdata/issuer_cert.json new file mode 100644 index 00000000..92b5b54a --- /dev/null +++ b/tests/testdata/issuer_cert.json @@ -0,0 +1 @@ +{"T":"*pc","O":{"Version":4,"SerialNumber":514,"Domain":"fpki.com","NotBefore":"2053-11-13T16:07:17Z","NotAfter":"1988-01-03T11:59:48Z","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB","TimeStamp":"1994-12-03T02:00:41Z","PolicyAttributes":{},"OwnerSignature":"4ta+IO/NbOqEtpJeYHvgY3Fvlt3N0B11BFw/AA+KeWs=","OwnerHash":"zmxRLDgBqsru361bUGZk6MDkp3Hs4Li3wZZdkYElG3w=","IssuerSignature":"FQG32YRrZusCtX5c2ntsumiR1ha9aGw3uDRhOsi6oiw=","IssuerHash":"AI/+aINSc0rk4/Ehes1fgycIFDAYZ7XQZxGyOAAceVc=","SPCTs":[{"Version":8,"LogID":"nJyi783S0S0qeQ==","AddedTS":"2010-06-23T07:24:36Z","Signature":"0HSoKArp/hwMuWrTItYigilfv+EeJqQzB221wURMOjQ="},{"LogID":"0yr37TuM/pBPkw==","AddedTS":"2050-05-07T16:19:00Z","Signature":"+PBtKbzZhkQdJZkG1prNiUuWiunw652WXOakaTxOvog="}]}} \ No newline at end of file diff --git a/tests/testdata/issuer_cert.pem b/tests/testdata/issuer_cert.pem deleted file mode 100644 index f6eb88ee..00000000 --- a/tests/testdata/issuer_cert.pem +++ /dev/null @@ -1,26 +0,0 @@ ------BEGIN FPKI MARSHABLE DOCUMENT----- -eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo0LCJTZXJpYWxOdW1iZXIiOjUxNCwi -RG9tYWluIjoiZnBraS5jb20iLCJOb3RCZWZvcmUiOiIyMDUzLTExLTEzVDE2OjA3 -OjE3WiIsIk5vdEFmdGVyIjoiMTk4OC0wMS0wM1QxMTo1OTo0OFoiLCJJc0lzc3Vl -ciI6dHJ1ZSwiUHVibGljS2V5IjoiTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFP -Q0FROEFNSUlCQ2dLQ0FRRUFwZm1hNlk5UTFaQnVuNmN1Y2U0TDRGQjRBd2dZZW5X -M3Nib2hrZXFNTTVjSHF2ZDMwbE5OUFpvUnFEVXdnVU1JTlRIQ1NBVzV6WkV2Y1V6 -blA0UjRDM2xNU1h1UTBWMzVaSGs0R1dxdTJzbUFBZG1JT3dBWUhyYlBoVjJaVEpx -b2lqRlN5UTcySlN1SWtJcDE2ekdrRitBMzZMOGlyTzJjWVRRVTBOdmtYVVhJK3E2 -K21JL0ZOaWd5ZmNDWG5nLzMrL1VQcVlBL1RiekxEMEZsZEFaL1BKZ0xnU2dLUG1G -WWtoSWM3ZFp6UEFyclJQVEVjbTFhWmJZbTk0b1JKT1Y2UW5KblBsVmhiK0NTU2Jm -UEE1bzRrWjZ5K1E5YXAyZ01TZ1JneWpnVTdZR0RMV0VqTGtORlhOWVhORXlCWW9L -SmZlTEt3d2ZGT0l4YjFoajVmbXluL3dJREFRQUIiLCJUaW1lU3RhbXAiOiIxOTk0 -LTEyLTAzVDAyOjAwOjQxWiIsIlBvbGljeUF0dHJpYnV0ZXMiOnt9LCJPd25lclNp -Z25hdHVyZSI6IjR0YStJTy9OYk9xRXRwSmVZSHZnWTNGdmx0M04wQjExQkZ3L0FB -K0tlV3M9IiwiT3duZXJIYXNoIjoiem14UkxEZ0Jxc3J1MzYxYlVHWms2TURrcDNI -czRMaTN3Wlpka1lFbEczdz0iLCJJc3N1ZXJTaWduYXR1cmUiOiJGUUczMllSclp1 -c0N0WDVjMm50c3VtaVIxaGE5YUd3M3VEUmhPc2k2b2l3PSIsIklzc3Vlckhhc2gi -OiJBSS8rYUlOU2Mwcms0L0VoZXMxZmd5Y0lGREFZWjdYUVp4R3lPQUFjZVZjPSIs -IlNQQ1RzIjpbeyJWZXJzaW9uIjo4LCJMb2dJRCI6Im5KeWk3ODNTMFMwcWVRPT0i -LCJBZGRlZFRTIjoiMjAxMC0wNi0yM1QwNzoyNDozNloiLCJTaWduYXR1cmUiOiIw -SFNvS0FycC9od011V3JUSXRZaWdpbGZ2K0VlSnFRekIyMjF3VVJNT2pRPSJ9LHsi -TG9nSUQiOiIweXIzN1R1TS9wQlBrdz09IiwiQWRkZWRUUyI6IjIwNTAtMDUtMDdU -MTY6MTk6MDBaIiwiU2lnbmF0dXJlIjoiK1BCdEtielpoa1FkSlprRzFwck5pVXVX -aXVudzY1MldYT2FrYVR4T3ZvZz0ifV19fQ== ------END FPKI MARSHABLE DOCUMENT----- diff --git a/tests/testdata/owner_cert.json b/tests/testdata/owner_cert.json new file mode 100644 index 00000000..d5bbbf78 --- /dev/null +++ b/tests/testdata/owner_cert.json @@ -0,0 +1 @@ +{"T":"*pc","O":{"Version":9,"SerialNumber":40,"Domain":"fpki.com","NotBefore":"2013-01-22T17:32:12Z","NotAfter":"1905-03-15T19:55:16Z","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnAkKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSNZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4FgIVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MViluBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQAB","TimeStamp":"1926-07-30T02:03:38Z","PolicyAttributes":{},"OwnerSignature":"h4yI3s03PPowAmZsmgXrGLlL7VqQCYccV2HUsWmenyo=","OwnerHash":"i0oqTsfv92Z0VaJ6TdP70CGajs2WrSZlXCoMVvm1WEY=","IssuerSignature":"lGeJQIhk9U0Cdbd2hEyoSGIevedlxP1ID6jDKEIsSzLbOw9nx/6KgwffbKyqAjJt09NABNU84Q5FAJTXUro8LxPRF1zw2Z+MpXUH0VoYWJ+aAuo8gGss4s/M+PZhAU5noqM03UeVRtSFDakUEQLHIbpxaoABLjb0AvwxobEZJkzaEGv1S+jYhDF6V4YUYGAqEYPGnIoxXFH3BvFxNe6/wrFz+AXqZCdVwkBootTyph+BJsqjNVaxp2UHHzc3Qo5xWA0GMSa6UTjgdKW+wggFrYc77sVtUMAPuVbPGoY69DJcPPGMghTWiAw+3E77/Yjt86kFiPZ2kUq72HDwAdFDSQ==","IssuerHash":"AcjFdktnguYcKZ196GxfwiYwY2TdUYE2WoL3SNk4viU=","SPCTs":[{"Version":5,"LogID":"am09KiLqSxA07w==","AddedTS":"2077-01-09T19:27:28Z","Signature":"gOpvrqDfBQiaUWwlu2kw60dVdKW/LNFulHLZLnq6uGk="},{"Version":2,"LogID":"t6oQ1KNjhiBtow==","AddedTS":"2068-05-20T17:04:57Z","Signature":"VaDJqqnu7JAE1h96qQY7n/j61f6qUQczBt57EHjq6HM="}]}} \ No newline at end of file diff --git a/tests/testdata/owner_cert.pem b/tests/testdata/owner_cert.pem deleted file mode 100644 index b53ed320..00000000 --- a/tests/testdata/owner_cert.pem +++ /dev/null @@ -1,32 +0,0 @@ ------BEGIN FPKI MARSHABLE DOCUMENT----- -eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo5LCJTZXJpYWxOdW1iZXIiOjQwLCJE -b21haW4iOiJmcGtpLmNvbSIsIk5vdEJlZm9yZSI6IjIwMTMtMDEtMjJUMTc6MzI6 -MTJaIiwiTm90QWZ0ZXIiOiIxOTA1LTAzLTE1VDE5OjU1OjE2WiIsIklzSXNzdWVy -Ijp0cnVlLCJQdWJsaWNLZXkiOiJNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9D -QVE4QU1JSUJDZ0tDQVFFQTVtdEdhZWZrNzFhR1lFemNJOHZqYkUycHV4c3NZNExm -MVY3dDBIMEppNENTbVRuQWtLRXNlL24ydm8va2xRbWExR3hPUkc2RHBWbTZnZ2Ro -b05CWlI1T2QraTY3UkI5cUZqMzVTQkhsaDl1d1VuU05ac0lsL2lPQ004dWJEL3Nr -OWNNckVIVWdUQjRiZ0xObCtpbXlwQ2x0NHpqYWE0OFVXd3Zvek9ZYVZVVi9yNEZn -SVZWNmkrR0hpU1VHNnlEa016c2gwWDhUSlBJNlp5amNyMTRZUHZPeTB5K1RucXY3 -N3UxbjBNUnlTTHFGbVRySTBCVG81SEs3SDZwUDVwRnA5RVRUZ09nbnUycldEbTRu -TWhlZXllcGZvUWJtRmpTN1VTRm00R0k0SjZ1RTZNVmlsdUJTTTF3Mmg2SXRTTFFT -M0VSc0dlMFBVQVREV0VUcEhmQlJIUUlEQVFBQiIsIlRpbWVTdGFtcCI6IjE5MjYt -MDctMzBUMDI6MDM6MzhaIiwiUG9saWN5QXR0cmlidXRlcyI6e30sIk93bmVyU2ln -bmF0dXJlIjoiaDR5STNzMDNQUG93QW1ac21nWHJHTGxMN1ZxUUNZY2NWMkhVc1dt -ZW55bz0iLCJPd25lckhhc2giOiJpMG9xVHNmdjkyWjBWYUo2VGRQNzBDR2FqczJX -clNabFhDb01Wdm0xV0VZPSIsIklzc3VlclNpZ25hdHVyZSI6ImxHZUpRSWhrOVUw -Q2RiZDJoRXlvU0dJZXZlZGx4UDFJRDZqREtFSXNTekxiT3c5bngvNktnd2ZmYkt5 -cUFqSnQwOU5BQk5VODRRNUZBSlRYVXJvOEx4UFJGMXp3MlorTXBYVUgwVm9ZV0or -YUF1bzhnR3NzNHMvTStQWmhBVTVub3FNMDNVZVZSdFNGRGFrVUVRTEhJYnB4YW9B -QkxqYjBBdnd4b2JFWkpremFFR3YxUytqWWhERjZWNFlVWUdBcUVZUEduSW94WEZI -M0J2RnhOZTYvd3JGeitBWHFaQ2RWd2tCb290VHlwaCtCSnNxak5WYXhwMlVISHpj -M1FvNXhXQTBHTVNhNlVUamdkS1crd2dnRnJZYzc3c1Z0VU1BUHVWYlBHb1k2OURK -Y1BQR01naFRXaUF3KzNFNzcvWWp0ODZrRmlQWjJrVXE3MkhEd0FkRkRTUT09Iiwi -SXNzdWVySGFzaCI6IkFjakZka3RuZ3VZY0taMTk2R3hmd2lZd1kyVGRVWUUyV29M -M1NOazR2aVU9IiwiU1BDVHMiOlt7IlZlcnNpb24iOjUsIkxvZ0lEIjoiYW0wOUtp -THFTeEEwN3c9PSIsIkFkZGVkVFMiOiIyMDc3LTAxLTA5VDE5OjI3OjI4WiIsIlNp -Z25hdHVyZSI6ImdPcHZycURmQlFpYVVXd2x1Mmt3NjBkVmRLVy9MTkZ1bEhMWkxu -cTZ1R2s9In0seyJWZXJzaW9uIjoyLCJMb2dJRCI6InQ2b1ExS05qaGlCdG93PT0i -LCJBZGRlZFRTIjoiMjA2OC0wNS0yMFQxNzowNDo1N1oiLCJTaWduYXR1cmUiOiJW -YURKcXFudTdKQUUxaDk2cVFZN24vajYxZjZxVVFjekJ0NTdFSGpxNkhNPSJ9XX19 ------END FPKI MARSHABLE DOCUMENT----- From af7517d960b43e51bc486ad716be276d362f76da Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 11 Jul 2023 17:50:47 +0200 Subject: [PATCH 178/187] Introduced OwnerHash and IssuerHash. They are a SHA256 of the payload of the Policy Certificates of owner and issuer. Added necessary tests in pkg/common/crypto. Modified PCA behavior to use the new owner and issuer hashes. Also removed Reason from the SPCRT. --- pkg/common/crypto/crypto.go | 239 +++++++++++++++------- pkg/common/crypto/crypto_test.go | 269 ++++++++++++++++++------- pkg/common/crypto/export_test.go | 9 + pkg/common/embedded_policies.go | 11 +- pkg/common/policies.go | 119 ++++++----- pkg/common/policy_common.go | 6 +- pkg/common/policy_issuance.go | 10 +- pkg/domainowner/domainowner.go | 73 ++----- pkg/logverifier/logverifier_test.go | 22 +- pkg/mapserver/logfetcher/logfetcher.go | 8 +- pkg/mapserver/updater/updater.go | 4 +- pkg/mapserver/updater/updater_test.go | 2 +- pkg/pca/config.go | 15 +- pkg/pca/pca.go | 64 +++--- pkg/pca/pca_test.go | 261 ++++++++++++++++-------- pkg/pca/testdata/pca_config.json | 8 +- pkg/tests/random/random.go | 36 ++-- pkg/util/io.go | 41 +++- pkg/util/io_test.go | 14 ++ pkg/util/pem.go | 34 ---- pkg/util/pem_test.go | 19 +- pkg/util/types_test.go | 2 +- tests/testdata/issuer_cert.json | 2 +- tests/testdata/owner_cert.json | 2 +- 24 files changed, 769 insertions(+), 501 deletions(-) create mode 100644 pkg/common/crypto/export_test.go diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 746c0705..04fb1355 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -5,10 +5,10 @@ import ( "crypto/rand" "crypto/rsa" "crypto/sha256" + "crypto/subtle" + "encoding/hex" "fmt" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/netsec-ethz/fpki/pkg/common" "github.com/netsec-ethz/fpki/pkg/util" ) @@ -24,91 +24,115 @@ func SignBytes(b []byte, key *rsa.PrivateKey) ([]byte, error) { // SignAsOwner generates a signature using the owner's key, and fills the owner signature in // the policy certificate signing request. -func SignAsOwner(ownerKey *rsa.PrivateKey, req *common.PolicyCertificateSigningRequest) error { - // Clear owner signature (it's normally empty). - req.OwnerSignature = nil +// +// The request is modified in-place iff no errors are found. +func SignAsOwner( + ownerPolCert *common.PolicyCertificate, + ownerKey *rsa.PrivateKey, + req *common.PolicyCertificateSigningRequest, +) error { - // Identify the public key of the signer with its hash. - // In CT, the hash of the public key is calculated over the DER-encoded - // SubjectPublicKeyInfo object - // From the MarshalPKIXPublicKey go docs: - // MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. - // The encoded public key is a SubjectPublicKeyInfo structure - // (see RFC 5280, Section 4.1). - pubKeyBytes, err := ctx509.MarshalPKIXPublicKey(&ownerKey.PublicKey) + if req.OwnerSignature != nil || req.OwnerHash != nil { + return fmt.Errorf("there exists a non nil owner signature and hash") + } + // Owner identifier: + ownerHash, err := ComputeHashAsOwner(ownerPolCert) if err != nil { return err } - req.OwnerPubKeyHash = common.SHA256Hash(pubKeyBytes) + req.OwnerHash = ownerHash // Sign using the owner's private key and including the hash of its public key. - req.OwnerSignature, err = signStructRSASHA256(req, ownerKey) + ownerSignature, err := signStructRSASHA256(req, ownerKey) if err != nil { + req.OwnerHash = nil return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) } + // No errors. Modify the request in-place. + req.OwnerSignature = ownerSignature + return nil } // VerifyOwnerSignature verifies the owner's signature using the public key. -func VerifyOwnerSignature(req *common.PolicyCertificateSigningRequest, - pubKey *rsa.PublicKey) error { +func VerifyOwnerSignature( + ownerPolCert *common.PolicyCertificate, + req *common.PolicyCertificateSigningRequest, +) error { - // Serialize without signature: - sig := req.OwnerSignature - req.OwnerSignature = nil - serializedStruct, err := common.ToJSON(req) + // Check owner identification. + ownerHash, err := ComputeHashAsOwner(ownerPolCert) if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + return err + } + if subtle.ConstantTimeCompare(req.OwnerHash, ownerHash) != 1 { + // Not equal. + return fmt.Errorf("request's owner is identified by %s, but policy certificate is %s", + hex.EncodeToString(req.OwnerHash), hex.EncodeToString(ownerHash)) } - req.OwnerSignature = sig - hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) + // Reconstruct owner's public key. + pubKey, err := util.DERBytesToRSAPublic(ownerPolCert.PublicKey) if err != nil { - return fmt.Errorf("RCSRVerifySignature | VerifyPKCS1v15 | %w", err) + return err } - return nil -} - -func VerifyOwnerSignatureWithPolCert(req *common.PolicyCertificateSigningRequest, - polCert *common.PolicyCertificate) error { - // Serialize without signature: + // Serialize request without signature: sig := req.OwnerSignature req.OwnerSignature = nil serializedStruct, err := common.ToJSON(req) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - req.OwnerSignature = sig + req.OwnerSignature = sig // restore previous signature - pubKey, err := util.DERBytesToRSAPublic(polCert.PublicKey) - if err != nil { - return err - } - - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, - common.SHA256Hash(serializedStruct), req.OwnerSignature) + // Hash serialized request and check the signature with the owner's public key. + hashOutput := sha256.Sum256(serializedStruct) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) if err != nil { - return fmt.Errorf("RCSRVerifyRPCSignature | VerifyPKCS1v15 | %w", err) + return fmt.Errorf("bad owner signature: %w", err) } return nil } +func VerifyOwnerSignatureInPolicyCertificate( + ownerPolCert *common.PolicyCertificate, + c *common.PolicyCertificate, +) error { + + req := common.NewPolicyCertificateSigningRequest( + c.Version, + c.RawSerialNumber, + c.RawDomain, + c.NotBefore, + c.NotAfter, + c.IsIssuer, + c.PublicKey, + c.PublicKeyAlgorithm, + c.SignatureAlgorithm, + c.TimeStamp, + c.PolicyAttributes, + c.OwnerSignature, + c.OwnerHash, + ) + return VerifyOwnerSignature(ownerPolCert, req) +} + // SignRequestAsIssuer is called by the Policy CA. It signs the request and generates a // PolicyCertificate. The SPTs field is (should be) empty. -func SignRequestAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *rsa.PrivateKey, +func SignRequestAsIssuer( + issuerPolCert *common.PolicyCertificate, + privKey *rsa.PrivateKey, + req *common.PolicyCertificateSigningRequest, ) (*common.PolicyCertificate, error) { // Create a certificate policy inheriting all values from the request. cert := common.NewPolicyCertificate( req.Version, - req.Issuer, - req.Subject(), req.SerialNumber(), - req.Domain, + req.RawDomain, req.NotBefore, req.NotAfter, req.IsIssuer, @@ -118,63 +142,128 @@ func SignRequestAsIssuer(req *common.PolicyCertificateSigningRequest, privKey *r req.TimeStamp, req.PolicyAttributes, req.OwnerSignature, - req.OwnerPubKeyHash, - nil, // issuer signature - nil, // issuer pub key hash + req.OwnerHash, nil, // SPTs + nil, // issuer signature + nil, // issuer hash ) - // Sign the policy certificate. - signature, err := signStructRSASHA256(cert, privKey) - if err != nil { - return nil, fmt.Errorf("RCSRGenerateRPC | SignStructRSASHA256 | %w", err) - } - cert.IssuerSignature = signature - - return cert, nil + err := SignPolicyCertificateAsIssuer(issuerPolCert, privKey, cert) + return cert, err } // SignPolicyCertificateAsIssuer is called by PCAs after they have received the SPTs from the // CT log servers. The SPTs are embedded in the policy certificate passed to this function, and // the PCA uses its key to create a signature. The policy certificate is passes with an empty // IssuerSignature (this function does not remove IssuerSignature if it's set). -func SignPolicyCertificateAsIssuer(pc *common.PolicyCertificate, privKey *rsa.PrivateKey, -) (*common.PolicyCertificate, error) { +// +// The childPolCert policy certificate is modified in place iif no error is found. +func SignPolicyCertificateAsIssuer( + issuerPolCert *common.PolicyCertificate, + privKey *rsa.PrivateKey, + childPolCert *common.PolicyCertificate, +) error { - signature, err := signStructRSASHA256(pc, privKey) + if childPolCert.IssuerSignature != nil || childPolCert.IssuerHash != nil { + return fmt.Errorf("remove any issuer signature or issuer hash before signing (set to nil)") + } + // Identify the issuer of the child policy certificate with the hash of the modified policy + // certificate of the issuer. + issuerHash, err := ComputeHashAsIssuer(issuerPolCert) + if err != nil { + return err + } + childPolCert.IssuerHash = issuerHash + + // Sign the child policy certificate. + signature, err := signStructRSASHA256(childPolCert, privKey) if err != nil { - return nil, err + childPolCert.IssuerHash = nil + return err } - pc.IssuerSignature = signature - return pc, nil + + // No errors: modify the child policy certificate in-place. + childPolCert.IssuerSignature = signature + + return nil } // VerifyIssuerSignature: used by domain owner, check whether CA signature is correct -func VerifyIssuerSignature(caCert *ctx509.Certificate, rpc *common.PolicyCertificate) error { - pubKey := caCert.PublicKey.(*rsa.PublicKey) +func VerifyIssuerSignature( + issuerPolCert *common.PolicyCertificate, + childPolCert *common.PolicyCertificate, +) error { - // Serialize without CA signature or SPTs: - caSig, SPTs := rpc.IssuerSignature, rpc.SPCTs - rpc.IssuerSignature, rpc.SPCTs = nil, nil - bytes, err := common.ToJSON(rpc) + // Check owner identification. + issuerHash, err := ComputeHashAsIssuer(issuerPolCert) if err != nil { - return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) + return err + } + if subtle.ConstantTimeCompare(childPolCert.IssuerHash, issuerHash) != 1 { + // Not equal. + return fmt.Errorf("policy certificate's issuer is identified by %s, but "+ + "policy certificate is %s", + hex.EncodeToString(childPolCert.IssuerHash), hex.EncodeToString(issuerHash)) } - rpc.IssuerSignature, rpc.SPCTs = caSig, SPTs - hashOutput := sha256.Sum256(bytes) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], rpc.IssuerSignature) + // Reconstruct issuer's public key. + pubKey, err := util.DERBytesToRSAPublic(issuerPolCert.PublicKey) if err != nil { - return fmt.Errorf("RPCVerifyCASignature | VerifyPKCS1v15 | %w", err) + return err } + + // Serialize child cert without signature: + sig := childPolCert.IssuerSignature + childPolCert.IssuerSignature = nil + serializedStruct, err := common.ToJSON(childPolCert) + if err != nil { + return err + } + childPolCert.IssuerSignature = sig // restore previous signature + + // Hash serialized request and check the signature with the owner's public key. + hashOutput := common.SHA256Hash(serializedStruct) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput, childPolCert.IssuerSignature) + if err != nil { + return fmt.Errorf("bad owner signature: %w", err) + } + return nil + } // signStructRSASHA256: generate a signature using SHA256 and RSA func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { - b, err := common.ToJSON(s) + data, err := common.ToJSON(s) if err != nil { return nil, fmt.Errorf("SignStructRSASHA256 | ToJSON | %w", err) } - return SignBytes(b, key) + + return SignBytes(data, key) +} + +// ComputeHashAsOwner computes the bytes of the policy certificate as being an owner certificate. +// This means: it serializes it but without SPCTs or issuer signature, and computes its sha256. +func ComputeHashAsOwner(p *common.PolicyCertificate) ([]byte, error) { + // Remove SPCTs and issuer signature. + SPCTs, issuerSignature := p.SPCTs, p.IssuerSignature + p.SPCTs, p.IssuerSignature = nil, nil + + // Serialize and restore previously removed fields. + serializedPC, err := common.ToJSON(p) + p.SPCTs, p.IssuerSignature = SPCTs, issuerSignature + + return common.SHA256Hash(serializedPC), err +} + +func ComputeHashAsIssuer(p *common.PolicyCertificate) ([]byte, error) { + // Remove SPCTs. + SPCTs := p.SPCTs + p.SPCTs = nil + + // Serialize and restore previously removed fields. + serializedPC, err := common.ToJSON(p) + p.SPCTs = SPCTs + + return common.SHA256Hash(serializedPC), err } diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 9d38eab1..c73c97de 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -1,13 +1,12 @@ package crypto_test import ( - libcrypto "crypto" + cryptolib "crypto" "crypto/rsa" "io/ioutil" + "math/rand" "testing" - ctx509 "github.com/google/certificate-transparency-go/x509" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" @@ -19,7 +18,7 @@ import ( var update = tests.UpdateGoldenFiles() -func TestCreatePolicyCertificatesForTests(t *testing.T) { +func TestUpdateGoldenFiles(t *testing.T) { rand.Seed(0) if !*update { t.Skip("Not updating golden files: flag not set") @@ -31,7 +30,7 @@ func TestCreatePolicyCertificatesForTests(t *testing.T) { // Objain a new pair for the owner. ownerCert, ownerKey := randomPolCertAndKey(t) // The owner will be issued by the root issuer. - err := crypto_pkg.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) + err := crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) require.NoError(t, err) // Store all certs and keys. Filename -> payload. @@ -86,103 +85,227 @@ func TestCreatePolicyCertificatesForTests(t *testing.T) { } } -func TestSignatureOfPolicyCertSignRequest(t *testing.T) { - ownerPriv, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") - require.NoError(t, err, "load RSA key error") +func TestComputeHashAsOwner(t *testing.T) { + rand.Seed(1) + // Get random policy certificate and check it contains SPCTs, owner, and issuer fields. + pc := random.RandomPolicyCertificate(t) + require.NotEmpty(t, pc.SPCTs) + require.NotEmpty(t, pc.OwnerSignature) + require.NotEmpty(t, pc.OwnerHash) + require.NotEmpty(t, pc.IssuerSignature) + require.NotEmpty(t, pc.IssuerHash) + + gotHash, err := crypto.ComputeHashAsOwner(pc) + require.NoError(t, err) + + // Remove SPCTs and issuer signature, and serialize. + pc.SPCTs = nil + pc.IssuerSignature = nil + serializedPC, err := common.ToJSON(pc) + require.NoError(t, err) + + // Compare with the expected value. + expected := common.SHA256Hash(serializedPC) + require.Equal(t, expected, gotHash) +} + +func TestComputeHashAsIssuer(t *testing.T) { + rand.Seed(2) + + // Get random policy certificate and check it contains SPCTs, owner, and issuer fields. + pc := random.RandomPolicyCertificate(t) + require.NotEmpty(t, pc.SPCTs) + require.NotEmpty(t, pc.OwnerSignature) + require.NotEmpty(t, pc.OwnerHash) + require.NotEmpty(t, pc.IssuerSignature) + require.NotEmpty(t, pc.IssuerHash) + + gotHash, err := crypto.ComputeHashAsIssuer(pc) + require.NoError(t, err) + + // Remove SPCTs, and serialize. + pc.SPCTs = nil + serializedPC, err := common.ToJSON(pc) + require.NoError(t, err) + + // Compare with the expected value. + expected := common.SHA256Hash(serializedPC) + require.Equal(t, expected, gotHash) +} + +func TestSignAsOwner(t *testing.T) { + rand.Seed(11) + + // Load owner policy cert and key. + ownerCert, err := util.PolicyCertificateFromFile("../../../tests/testdata/owner_cert.json") + require.NoError(t, err) + ownerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/owner_key.pem") + require.NoError(t, err) + + // Create random request. request := random.RandomPolCertSignRequest(t) + require.NotEmpty(t, request.OwnerSignature) + require.NotEmpty(t, request.OwnerHash) request.IsIssuer = true // Sign as owner. - err = crypto.SignAsOwner(ownerPriv, request) + err = crypto.SignAsOwner(ownerCert, ownerKey, request) + require.Error(t, err) // owner signature and hash not nil + request.OwnerHash = nil + err = crypto.SignAsOwner(ownerCert, ownerKey, request) + require.Error(t, err) // owner hash not nil + request.OwnerSignature = []byte{} + err = crypto.SignAsOwner(ownerCert, ownerKey, request) + require.Error(t, err) // owner hash empty but not nil + request.OwnerSignature = nil + // It should not fail now: + err = crypto.SignAsOwner(ownerCert, ownerKey, request) require.NoError(t, err, "RCSR sign signature error") + require.NotEmpty(t, request.OwnerSignature) + require.NotEmpty(t, request.OwnerHash) + gotSignature := request.OwnerSignature - // Serialize the request (w/out signature) to bytes to later check its hash value. - sig := request.OwnerSignature + // Manually do the steps to sign, and compare results. 3 stesps. + // 1. Check the owner hash is correct. + ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) + require.NoError(t, err) + require.Equal(t, ownerHash, request.OwnerHash) + // 2. Sign the child request without owner signature. request.OwnerSignature = nil - serializedRequest, err := common.ToJSON(request) + serializedRequestWoutOwnerSignature, err := common.ToJSON(request) + require.NoError(t, err) + expectedSignature, err := crypto.SignStructRSASHA256(request, ownerKey) require.NoError(t, err) - request.OwnerSignature = sig + // 3. Compare signatures. + require.Equal(t, expectedSignature, gotSignature) + request.OwnerSignature = gotSignature // Check that the signature corresponds to the owner's key. - err = rsa.VerifyPKCS1v15(&ownerPriv.PublicKey, libcrypto.SHA256, - common.SHA256Hash(serializedRequest), request.OwnerSignature) + err = rsa.VerifyPKCS1v15(&ownerKey.PublicKey, cryptolib.SHA256, + common.SHA256Hash(serializedRequestWoutOwnerSignature), gotSignature) require.NoError(t, err) - // Check that we have the hash of the public key of the owner's key. - // The bytes of the public key have to be obtained via a call to ctx509.MarshalPKIXPublicKey - pubKeyBytes, err := ctx509.MarshalPKIXPublicKey(&ownerPriv.PublicKey) + // Additionally check that our VerifyOwnerSignature works as expected. + err = crypto.VerifyOwnerSignature(ownerCert, request) require.NoError(t, err) - require.Equal(t, common.SHA256Hash(pubKeyBytes), request.OwnerPubKeyHash) - - // Also check that our VerifyOwnerSignature works as expected. - err = crypto.VerifyOwnerSignature(request, &ownerPriv.PublicKey) - require.NoError(t, err, "RCSR verify signature error") } -// TestIssuanceOfRPC: check if the CA signature is correct -func TestSignAsIssuer(t *testing.T) { - // Load crypto material for owner and issuer. - ownerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") +func TestSignPolicyCertificateAsIssuer(t *testing.T) { + rand.Seed(12) + + // Load issuer policy cert and key. + issuerCert, err := util.PolicyCertificateFromFile("../../../tests/testdata/issuer_cert.json") + require.NoError(t, err) + issuerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/issuer_key.pem") require.NoError(t, err) - issuerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/serverkey.pem") + + // Create random policy certificate. + childPolCert := random.RandomPolicyCertificate(t) + require.NotEmpty(t, childPolCert.SPCTs) + require.NotEmpty(t, childPolCert.OwnerSignature) + require.NotEmpty(t, childPolCert.OwnerHash) + require.NotEmpty(t, childPolCert.IssuerSignature) + require.NotEmpty(t, childPolCert.IssuerHash) + + // Issuer-sign it: + err = crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, childPolCert) + require.Error(t, err) // issuer signature and hash not nil + childPolCert.IssuerSignature = nil + err = crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, childPolCert) + require.Error(t, err) // issuer hash not nil + childPolCert.IssuerHash = []byte{} + err = crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, childPolCert) + require.Error(t, err) // issuer hash empty, but still not nil + childPolCert.IssuerHash = nil + // It has to work now: + err = crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, childPolCert) require.NoError(t, err) - issuerCert, err := util.CertificateFromPEMFile("../../../tests/testdata/servercert.pem") - require.NoError(t, err, "X509 Cert From File error") + gotSignature := childPolCert.IssuerSignature - // Phase 1: domain owner generates a policy certificate signing request. - req := random.RandomPolCertSignRequest(t) - // generate signature for request - err = crypto.SignAsOwner(ownerKey, req) + // Manually do the steps to sign, and compare results. 3 stesps. + // 1. Check that the issuer hash is correct. + // Check that the issuer hash is correct. + issuerHash, err := crypto.ComputeHashAsIssuer(issuerCert) + require.NoError(t, err) + require.Equal(t, issuerHash, childPolCert.IssuerHash) + // 2. Sign the child policy certificate without issuer signature. + childPolCert.IssuerSignature = nil + expectedSignature, err := crypto.SignStructRSASHA256(childPolCert, issuerKey) + require.NoError(t, err) + serializedChildPolCertWoutOwnerSignature, err := common.ToJSON(childPolCert) require.NoError(t, err) + // 3. Compare signatures. + require.Equal(t, expectedSignature, gotSignature) + childPolCert.IssuerSignature = gotSignature - // Phase 2: pca issues policy certificate. - // we can validate the signature in the request, but in this test we know it's correct. - err = crypto.VerifyOwnerSignature(req, &ownerKey.PublicKey) - require.NoError(t, err, "RCSR Verify Signature error") - // Sign as issuer. - polCert, err := crypto.SignRequestAsIssuer(req, issuerKey) - require.NoError(t, err, "RCSR Generate RPC error") - assert.Equal(t, len(polCert.SPCTs), 0, "SPTs must be empty right after first issuer signature") + // Check that the signature corresponds to the owner's key. + err = rsa.VerifyPKCS1v15(&issuerKey.PublicKey, cryptolib.SHA256, + common.SHA256Hash(serializedChildPolCertWoutOwnerSignature), gotSignature) + require.NoError(t, err) - // ------------------------------------- - // phase 3: domain owner check rpc - // ------------------------------------- - err = crypto.VerifyIssuerSignature(issuerCert, polCert) - require.NoError(t, err, "RPC Verify CA Signature error") + // Additionally check that our VerifyIssuerSignature works as expected. + err = crypto.VerifyIssuerSignature(issuerCert, childPolCert) + require.NoError(t, err) } -// TestIssuanceOfPC: generate PC -> domain owner generate signature -> pca verify signature -> pca sign PC -> domain owner verifies PC -func TestIssuanceOfSP(t *testing.T) { - // ------------------------------------- - // phase 1: domain owner generate rcsr - // ------------------------------------- - privKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/clientkey.pem") - require.NoError(t, err, "Load RSA Key Pair From File error") +func TestSignRequestAsIssuer(t *testing.T) { + rand.Seed(13) - // pubKeyBytes, err := util.RSAPublicToPEM(&privKey.PublicKey) - // require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") - pubKeyBytes, err := util.RSAPublicToDERBytes(&privKey.PublicKey) - require.NoError(t, err, "Rsa PublicKey To Pem Bytes error") + // Load issuer policy cert and key. + issuerCert, err := util.PolicyCertificateFromFile("../../../tests/testdata/issuer_cert.json") + require.NoError(t, err) + issuerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/issuer_key.pem") + require.NoError(t, err) - req := random.RandomPolCertSignRequest(t) - req.PublicKey = pubKeyBytes + // Load owner policy cert and key. + ownerCert, err := util.PolicyCertificateFromFile("../../../tests/testdata/owner_cert.json") + require.NoError(t, err) + ownerKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/owner_key.pem") + require.NoError(t, err) - // generate signature for rcsr - err = crypto.SignAsOwner(privKey, req) - require.NoError(t, err, "RCSR Create Signature error") + // Create random request. + request := random.RandomPolCertSignRequest(t) + request.OwnerHash = nil + request.OwnerSignature = nil - // ------------------------------------- - // phase 2: pca issue rpc - // ------------------------------------- - // validate the signature in rcsr - err = crypto.VerifyOwnerSignature(req, &privKey.PublicKey) - require.NoError(t, err, "RCSR Verify Signature error") + // Owner-sign it. + err = crypto.SignAsOwner(ownerCert, ownerKey, request) + require.NoError(t, err) + + // Issuer-sign the request. + childPolCert, err := crypto.SignRequestAsIssuer(issuerCert, issuerKey, request) + require.NoError(t, err) + + // Verify both owner and issuer. + err = crypto.VerifyOwnerSignature(ownerCert, request) + require.NoError(t, err) + err = crypto.VerifyOwnerSignatureInPolicyCertificate(ownerCert, childPolCert) + require.NoError(t, err) + err = crypto.VerifyIssuerSignature(issuerCert, childPolCert) + require.NoError(t, err) +} + +func randomPolCertAndKey(t tests.T) (*common.PolicyCertificate, *rsa.PrivateKey) { + cert := random.RandomPolicyCertificate(t) + key := random.RandomRSAPrivateKey(t) - pcaPrivKey, err := util.RSAKeyFromPEMFile("../../../tests/testdata/serverkey.pem") + // DER encoded public key. + derPubKey, err := util.RSAPublicToDERBytes(&key.PublicKey) require.NoError(t, err) - rpc, err := crypto.SignRequestAsIssuer(req, pcaPrivKey) - require.NoError(t, err, "RCSR Generate RPC error") - assert.Equal(t, len(rpc.SPCTs), 0, "spt in the rpc should be empty") + // Set the public key. + cert.PublicKey = derPubKey + + // Set validity times between unix time 1 and 10000 + cert.NotBefore = util.TimeFromSecs(1) + cert.NotAfter = util.TimeFromSecs(10000) + + // Remove signature and hash for owner and issuer. + cert.OwnerSignature = nil + cert.OwnerHash = nil + cert.IssuerSignature = nil + cert.IssuerHash = nil + + return cert, key } diff --git a/pkg/common/crypto/export_test.go b/pkg/common/crypto/export_test.go new file mode 100644 index 00000000..65d9e8eb --- /dev/null +++ b/pkg/common/crypto/export_test.go @@ -0,0 +1,9 @@ +package crypto + +import ( + "crypto/rsa" +) + +func SignStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { + return signStructRSASHA256(s, key) +} diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index 786f60da..03d97eea 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -29,12 +29,10 @@ type SignedPolicyCertificateTimestamp struct { // SignedPolicyCertificateRevocationTimestamp is a signed policy certificate revocation timestamp. type SignedPolicyCertificateRevocationTimestamp struct { SignedEntryTimestamp - Reason int `json:",omitempty"` } func NewSignedEntryTimestamp( version int, - issuer string, logID []byte, addedTS time.Time, signature []byte, @@ -44,7 +42,6 @@ func NewSignedEntryTimestamp( EmbeddedPolicyBase: EmbeddedPolicyBase{ PolicyPartBase: PolicyPartBase{ Version: version, - Issuer: issuer, }, }, LogID: logID, @@ -62,7 +59,6 @@ func (s SignedEntryTimestamp) Equal(x SignedEntryTimestamp) bool { func NewSignedPolicyCertificateTimestamp( version int, - issuer string, logID []byte, addedTS time.Time, signature []byte, @@ -70,7 +66,6 @@ func NewSignedPolicyCertificateTimestamp( return &SignedPolicyCertificateTimestamp{ SignedEntryTimestamp: *NewSignedEntryTimestamp( version, - issuer, logID, addedTS, signature, @@ -84,7 +79,6 @@ func (t SignedPolicyCertificateTimestamp) Equal(x SignedPolicyCertificateTimesta func NewSignedPolicyCertificateRevocationTimestamp( version int, - issuer string, logID []byte, addedTS time.Time, signature []byte, @@ -93,16 +87,13 @@ func NewSignedPolicyCertificateRevocationTimestamp( return &SignedPolicyCertificateRevocationTimestamp{ SignedEntryTimestamp: *NewSignedEntryTimestamp( version, - issuer, logID, addedTS, signature, ), - Reason: reason, } } func (t SignedPolicyCertificateRevocationTimestamp) Equal(x SignedPolicyCertificateRevocationTimestamp) bool { - return t.SignedEntryTimestamp.Equal(x.SignedEntryTimestamp) && - t.Reason == x.Reason + return t.SignedEntryTimestamp.Equal(x.SignedEntryTimestamp) } diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 94e4a92d..050f2fd7 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -9,45 +9,60 @@ import ( // and others. type PolicyDocument interface { PolicyPart - Subject() string SerialNumber() int + Domain() string } type PolicyCertificateBase struct { PolicyPartBase - RawSubject string `json:"Subject,omitempty"` RawSerialNumber int `json:"SerialNumber,omitempty"` + RawDomain string `json:"Domain,omitempty"` } -func (o PolicyCertificateBase) Subject() string { return o.RawSubject } func (o PolicyCertificateBase) SerialNumber() int { return o.RawSerialNumber } +func (o PolicyCertificateBase) Domain() string { return o.RawDomain } func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { return p.PolicyPartBase.Equal(x.PolicyPartBase) && - p.RawSubject == x.RawSubject && - p.RawSerialNumber == x.RawSerialNumber + p.RawSerialNumber == x.RawSerialNumber && + p.RawDomain == x.RawDomain } +// PolicyCertificateFields contains all the fields that a policy certificate or a signing request +// have in common. This excudes e.g. the issuer signature and hash. +// +// The `PublicKey` field is the DER-encoded SubjectPublicKeyInfo, as returned by the call +// `x509.MarshalPKIXPublicKey` in the `crypto/x509` package. +// From the `MarshalPKIXPublicKey` go docs: +// MarshalPKIXPublicKey converts a public key to PKIX, ASN.1 DER form. +// The encoded public key is a SubjectPublicKeyInfo structure +// (see RFC 5280, Section 4.1). +// +// The `OwnerHash` field is the SHA256 of the payload of the owner certificate that contained the +// owner signature. The hash is computed on the owner's policy certificate, but without any +// SPCTs or issuer signature, but preserving the owner's signature. type PolicyCertificateFields struct { PolicyCertificateBase - Domain string `json:",omitempty"` NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` IsIssuer bool `json:",omitempty"` - PublicKey []byte `json:",omitempty"` // DER-encoded SubjectPublicKeyInfo + PublicKey []byte `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` TimeStamp time.Time `json:",omitempty"` PolicyAttributes PolicyAttributes `json:",omitempty"` OwnerSignature []byte `json:",omitempty"` - OwnerPubKeyHash []byte `json:",omitempty"` // SHA256 of owner's public key + OwnerHash []byte `json:",omitempty"` } -// PolicyCertificate is a Root Policy Certificate. +// PolicyCertificate can be a Root Policy Certificate, or a policy certificate that was issued by +// a previously existing policy certificate. +// The field `IssuerHash` has semantics analogouys to `OwnerHash`: it is the SHA256 of the issuer +// policy certificate that was used to sign this policy certificate, without SCPTs. type PolicyCertificate struct { PolicyCertificateFields - IssuerSignature []byte `json:",omitempty"` - IssuerPubKeyHash []byte `json:",omitempty"` - SPCTs []SignedPolicyCertificateTimestamp `json:",omitempty"` + IssuerSignature []byte `json:",omitempty"` + IssuerHash []byte `json:",omitempty"` + SPCTs []SignedPolicyCertificateTimestamp `json:",omitempty"` } // PolicyAttributes is a domain policy that specifies what is or not acceptable for a domain. @@ -58,22 +73,21 @@ type PolicyAttributes struct { type PolicyCertificateRevocationFields struct { PolicyCertificateBase - TimeStamp time.Time `json:",omitempty"` - OwnerSignature []byte `json:",omitempty"` - OwnerPubKeyHash []byte `json:",omitempty"` // SHA256 of owner's public key + TimeStamp time.Time `json:",omitempty"` + OwnerSignature []byte `json:",omitempty"` + OwnerHash []byte `json:",omitempty"` } type PolicyCertificateRevocation struct { PolicyCertificateRevocationFields - IssuerSignature []byte `json:",omitempty"` - IssuerPubKeyHash []byte `json:",omitempty"` - SPCRTs []SignedPolicyCertificateRevocationTimestamp `json:",omitempty"` + IssuerSignature []byte `json:",omitempty"` + // Hash of the issuer's cert w/out SPCTs: + IssuerHash []byte `json:",omitempty"` + SPCRTs []SignedPolicyCertificateRevocationTimestamp `json:",omitempty"` } func NewPolicyCertificateFields( version int, - issuer string, - subject string, serialNumber int, domain string, notBefore time.Time, @@ -85,18 +99,16 @@ func NewPolicyCertificateFields( timeStamp time.Time, policyAttributes PolicyAttributes, ownerSignature []byte, - ownerPubKeyHash []byte, + ownerHash []byte, ) *PolicyCertificateFields { return &PolicyCertificateFields{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ Version: version, - Issuer: issuer, }, - RawSubject: subject, RawSerialNumber: serialNumber, + RawDomain: domain, }, - Domain: domain, NotBefore: notBefore, NotAfter: notAfter, IsIssuer: isIssuer, @@ -106,7 +118,7 @@ func NewPolicyCertificateFields( TimeStamp: timeStamp, PolicyAttributes: policyAttributes, OwnerSignature: ownerSignature, - OwnerPubKeyHash: ownerPubKeyHash, + OwnerHash: ownerHash, } } @@ -114,20 +126,17 @@ func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && c.PublicKeyAlgorithm == x.PublicKeyAlgorithm && bytes.Equal(c.PublicKey, x.PublicKey) && - c.Domain == x.Domain && c.NotBefore.Equal(x.NotBefore) && c.NotAfter.Equal(x.NotAfter) && c.SignatureAlgorithm == x.SignatureAlgorithm && c.TimeStamp.Equal(x.TimeStamp) && bytes.Equal(c.OwnerSignature, x.OwnerSignature) && - bytes.Equal(c.OwnerPubKeyHash, x.OwnerPubKeyHash) && + bytes.Equal(c.OwnerHash, x.OwnerHash) && c.PolicyAttributes.Equal(x.PolicyAttributes) } func NewPolicyCertificate( version int, - issuer string, - subject string, serialNumber int, domain string, notBefore time.Time, @@ -139,17 +148,15 @@ func NewPolicyCertificate( timeStamp time.Time, policyAttributes PolicyAttributes, ownerSignature []byte, - ownerPubKeyHash []byte, - issuerSignature []byte, - issuerPubKeyHash []byte, + ownerHash []byte, SPTs []SignedPolicyCertificateTimestamp, + issuerSignature []byte, + issuerHash []byte, ) *PolicyCertificate { return &PolicyCertificate{ PolicyCertificateFields: *NewPolicyCertificateFields( version, - issuer, - subject, serialNumber, domain, notBefore, @@ -161,18 +168,18 @@ func NewPolicyCertificate( timeStamp, policyAttributes, ownerSignature, - ownerPubKeyHash, + ownerHash, ), - IssuerSignature: issuerSignature, - IssuerPubKeyHash: issuerPubKeyHash, - SPCTs: SPTs, + IssuerSignature: issuerSignature, + IssuerHash: issuerHash, + SPCTs: SPTs, } } func (c PolicyCertificate) Equal(x PolicyCertificate) bool { return c.PolicyCertificateFields.Equal(x.PolicyCertificateFields) && bytes.Equal(c.IssuerSignature, x.IssuerSignature) && - bytes.Equal(c.IssuerPubKeyHash, x.IssuerPubKeyHash) && + bytes.Equal(c.IssuerHash, x.IssuerHash) && equalSlices(c.SPCTs, x.SPCTs) } @@ -184,25 +191,21 @@ func (s PolicyAttributes) Equal(o PolicyAttributes) bool { func NewPolicyCertificateRevocationFields( version int, - issuer string, - subject string, serialNumber int, timeStamp time.Time, ownerSignature []byte, - ownerPubKeyHash []byte, + ownerHash []byte, ) *PolicyCertificateRevocationFields { return &PolicyCertificateRevocationFields{ PolicyCertificateBase: PolicyCertificateBase{ PolicyPartBase: PolicyPartBase{ Version: version, - Issuer: issuer, }, - RawSubject: subject, RawSerialNumber: serialNumber, }, - TimeStamp: timeStamp, - OwnerSignature: ownerSignature, - OwnerPubKeyHash: ownerPubKeyHash, + TimeStamp: timeStamp, + OwnerSignature: ownerSignature, + OwnerHash: ownerHash, } } @@ -210,41 +213,37 @@ func (c PolicyCertificateRevocationFields) Equal(x PolicyCertificateRevocationFi return c.PolicyCertificateBase.Equal(x.PolicyCertificateBase) && c.TimeStamp == x.TimeStamp && bytes.Equal(c.OwnerSignature, x.OwnerSignature) && - bytes.Equal(c.OwnerPubKeyHash, x.OwnerPubKeyHash) + bytes.Equal(c.OwnerHash, x.OwnerHash) } func NewPolicyCertificateRevocation( version int, - issuer string, - subject string, serialNumber int, timeStamp time.Time, ownerSignature []byte, - ownerPubKeyHash []byte, - issuerSignature []byte, - issuerPubKeyHash []byte, + ownerHash []byte, serverTimestamps []SignedPolicyCertificateRevocationTimestamp, + issuerSignature []byte, + issuerHash []byte, ) *PolicyCertificateRevocation { return &PolicyCertificateRevocation{ PolicyCertificateRevocationFields: *NewPolicyCertificateRevocationFields( version, - issuer, - subject, serialNumber, timeStamp, ownerSignature, - ownerPubKeyHash, + ownerHash, ), - IssuerSignature: issuerSignature, - IssuerPubKeyHash: issuerPubKeyHash, - SPCRTs: serverTimestamps, + IssuerSignature: issuerSignature, + IssuerHash: issuerHash, + SPCRTs: serverTimestamps, } } func (r PolicyCertificateRevocation) Equal(x PolicyCertificateRevocation) bool { return r.PolicyCertificateRevocationFields.Equal(x.PolicyCertificateRevocationFields) && bytes.Equal(r.IssuerSignature, x.IssuerSignature) && - bytes.Equal(r.IssuerPubKeyHash, x.IssuerPubKeyHash) && + bytes.Equal(r.IssuerHash, x.IssuerHash) && equalSlices(r.SPCRTs, x.SPCRTs) } diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index d1df5406..c24473ca 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -21,12 +21,10 @@ type PolicyPart interface { // PolicyPartBase is the common type to all policy documents. type PolicyPartBase struct { MarshallableDocumentBase - Version int `json:",omitempty"` - Issuer string `json:",omitempty"` + Version int `json:",omitempty"` } func (o PolicyPartBase) Equal(x PolicyPartBase) bool { // Ignore the RawJSON component, use just the regular fields. - return o.Version == x.Version && - o.Issuer == x.Issuer + return o.Version == x.Version } diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 97e06473..0a6ecd9a 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -10,13 +10,11 @@ type PolicyCertificateSigningRequest struct { } type PolicyCertificateRevocationSigningRequest struct { - Subject string `json:",omitempty"` + PolicyCertificateHash []byte `json:",omitempty"` // Hash of the pol. cert. to revoke } func NewPolicyCertificateSigningRequest( version int, - issuer string, - subject string, serialNumber int, domain string, notBefore time.Time, @@ -28,14 +26,12 @@ func NewPolicyCertificateSigningRequest( timeStamp time.Time, policyAttributes PolicyAttributes, ownerSignature []byte, - ownerPubKeyHash []byte, + ownerHash []byte, ) *PolicyCertificateSigningRequest { return &PolicyCertificateSigningRequest{ PolicyCertificateFields: *NewPolicyCertificateFields( version, - issuer, - subject, serialNumber, domain, notBefore, @@ -47,7 +43,7 @@ func NewPolicyCertificateSigningRequest( timeStamp, policyAttributes, ownerSignature, - ownerPubKeyHash, + ownerHash, ), } } diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 7c5f6b26..36555791 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -14,21 +14,32 @@ import ( // Assume one domain owner only have one domain; Logic can be changed later // TODO(yongzhe): Cool-off period is not fully implemented. -// DomainOwner: struct which represents one domain owner. +// DomainOwner represents a domain owner. It contains a map of a domain to the latest domain root +// and its private key. type DomainOwner struct { - privKeyByDomainName map[string]*rsa.PrivateKey + DomainRoots map[string]CertKey // My latest domain root pol. cert. per domain +} + +type CertKey struct { + Cert *common.PolicyCertificate + Key *rsa.PrivateKey } // NewDomainOwner: returns a new domain owner func NewDomainOwner() *DomainOwner { return &DomainOwner{ - privKeyByDomainName: make(map[string]*rsa.PrivateKey), + DomainRoots: make(map[string]CertKey), } } -// GeneratePolCertSignRequest: Generate a Root Certificate Signing Request for one domain +// GeneratePolCertSignRequest generates a Policy Certificate Signing Request for one domain. +// It will try to sign this request as an owner with an existing policy certificate. // subject is the name of the domain: eg. fpki.com -func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, version int) (*common.PolicyCertificateSigningRequest, error) { +func (do *DomainOwner) GeneratePolCertSignRequest( + domainName string, + version int, +) (*common.PolicyCertificateSigningRequest, error) { + // Generate a fresh RSA key pair; new RSA key for every RCSR, thus every RPC newPrivKeyPair, err := do.generateRSAPrivKeyPair() if err != nil { @@ -42,14 +53,12 @@ func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, ver req := common.NewPolicyCertificateSigningRequest( version, - issuer, // issuer - domainName, - 0, // serial number - domainName, // domain - time.Now(), + 0, // serial number + domainName, // domain + time.Now(), // not before time.Now().Add(time.Microsecond), // not after false, // is issuer - pubKeyBytes, + pubKeyBytes, // public key common.RSA, common.SHA256, time.Now(), // timestamp @@ -59,54 +68,16 @@ func (do *DomainOwner) GeneratePolCertSignRequest(issuer, domainName string, ver ) // if domain owner still have the private key of the previous RPC -> can avoid cool-off period - if prevKey, ok := do.privKeyByDomainName[domainName]; ok { - err = crypto.SignAsOwner(prevKey, req) + if prevRoot, ok := do.DomainRoots[domainName]; ok { + err = crypto.SignAsOwner(prevRoot.Cert, prevRoot.Key, req) if err != nil { return nil, fmt.Errorf("GeneratePolCertSignRequest | RCSRGenerateRPCSignature | %w", err) } } - // Store the new keys for this domain as the latest owner keys. - do.privKeyByDomainName[domainName] = newPrivKeyPair - return req, nil } -// RandomPolicyCertificate: generate one psr for one specific domain. -func (do *DomainOwner) RandomPolicyCertificate(domainName string, policy common.PolicyAttributes, -) (*common.PolicyCertificateSigningRequest, error) { - - rpcKeyPair, ok := do.privKeyByDomainName[domainName] - if !ok { - return nil, fmt.Errorf("RandomPolicyCertificate | No valid RPC for domain %s", domainName) - } - - polCertSignReq := common.NewPolicyCertificateSigningRequest( - 0, // version - "", // issuer - domainName, // subject - 0, // serial number - domainName, // domain - time.Now(), - time.Now().Add(time.Microsecond), // not after - false, // is issuer - nil, // public key - common.RSA, - common.SHA256, - time.Now(), // timestamp - policy, // policy attributes - nil, // owner's signature - nil, // owner pub key hash - ) - - err := crypto.SignAsOwner(rpcKeyPair, polCertSignReq) - if err != nil { - return nil, fmt.Errorf("RandomPolicyCertificate | DomainOwnerSignPSR | %w", err) - } - - return polCertSignReq, nil -} - // generate new rsa key pair func (do *DomainOwner) generateRSAPrivKeyPair() (*rsa.PrivateKey, error) { privateKeyPair, err := rsa.GenerateKey(rand.Reader, 2048) diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 1a40b8d4..06256acb 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -16,18 +16,24 @@ import ( ) func TestVerifySPT(t *testing.T) { - ownwerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/clientkey.pem") - require.NoError(t, err, "load RSA key error") - issuerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") - require.NoError(t, err, "load RSA key error") + ownerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/owner_cert.json") + require.NoError(t, err) + ownwerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/owner_key.pem") + require.NoError(t, err) + + issuerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/issuer_cert.json") + require.NoError(t, err) + issuerPriv, err := util.RSAKeyFromPEMFile("../../tests/testdata/issuer_key.pem") + require.NoError(t, err) req := random.RandomPolCertSignRequest(t) - err = crypto.SignAsOwner(ownwerPriv, req) + req.OwnerHash = nil + req.OwnerSignature = nil + err = crypto.SignAsOwner(ownerCert, ownwerPriv, req) require.NoError(t, err) - cert, err := crypto.SignRequestAsIssuer(req, issuerPriv) + _, err = crypto.SignRequestAsIssuer(issuerCert, issuerPriv, req) require.NoError(t, err) - _ = cert } func TestVerifyInclusionByHash(t *testing.T) { @@ -44,7 +50,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "rHCFIFTtQjLK5dgSl/DS/wi8qctNmB6nrAI8gEq9AIM="), + RootHash: tests.MustDecodeBase64(t, "7+1ODWJbmPz206K4n/kabPoCxAiyJ2e+jSe9rH5uYFk="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 10b390b8..57baad60 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -333,8 +333,6 @@ func GetPCAndRPCs( resultPolCerts = append(resultPolCerts, common.NewPolicyCertificate( 0, - "", // CA name - domainName, 0, // serial number domainName, time.Now(), // not before @@ -346,10 +344,10 @@ func GetPCAndRPCs( time.Now(), // timestamp common.PolicyAttributes{}, // policy attributes nil, // owner signature - nil, // owner pub key hash - nil, // issuer signature - nil, // issuer pub key hash + nil, // owner hash nil, // server timestamps + nil, // issuer signature + nil, // issuer hash )) } if err := scanner.Err(); err != nil { diff --git a/pkg/mapserver/updater/updater.go b/pkg/mapserver/updater/updater.go index 2189bc42..5b845ce2 100644 --- a/pkg/mapserver/updater/updater.go +++ b/pkg/mapserver/updater/updater.go @@ -149,7 +149,7 @@ func UpdateWithOverwrite(ctx context.Context, conn db.Conn, domainNames [][]stri payloads[i] = pol.Raw() id := common.SHA256Hash32Bytes(pol.Raw()) policyIDs[i] = &id - policySubjects[i] = pol.Subject() + policySubjects[i] = pol.Domain() } err = insertPolicies(ctx, conn, policySubjects, policyIDs, payloads) @@ -196,7 +196,7 @@ func UpdateWithKeepExisting(ctx context.Context, conn db.Conn, domainNames [][]s payloads[i] = pol.Raw() id := common.SHA256Hash32Bytes(pol.Raw()) policyIDs[i] = &id - policySubjects[i] = pol.Subject() + policySubjects[i] = pol.Domain() } // Check which policies are already present in the DB. maskPols, err := conn.CheckPoliciesExist(ctx, policyIDs) diff --git a/pkg/mapserver/updater/updater_test.go b/pkg/mapserver/updater/updater_test.go index 25f5a80c..fa4099b7 100644 --- a/pkg/mapserver/updater/updater_test.go +++ b/pkg/mapserver/updater/updater_test.go @@ -93,7 +93,7 @@ func TestUpdateWithKeepExisting(t *testing.T) { // Check policy coalescing. policiesPerName := make(map[string][]common.PolicyDocument, len(pols)) for _, pol := range pols { - policiesPerName[pol.Subject()] = append(policiesPerName[pol.Subject()], pol) + policiesPerName[pol.Domain()] = append(policiesPerName[pol.Domain()], pol) } for name, policies := range policiesPerName { id := common.SHA256Hash32Bytes([]byte(name)) diff --git a/pkg/pca/config.go b/pkg/pca/config.go index e28f52b3..3fc6d501 100644 --- a/pkg/pca/config.go +++ b/pkg/pca/config.go @@ -8,17 +8,10 @@ import ( // PCAConfig: configuration of the pca type PCAConfig struct { - CAName string `json:",omitempty"` - CTLogServers []CTLogServerEntryConfig `json:",omitempty"` - KeyPath string `json:",omitempty"` - RootPolicyCertPath string `json:",omitempty"` - - // deleteme remove all this below - - // PCA's output path; sends RPC - PolicyLogExgPath string `json:",omitempty"` - - OutputPath string `json:",omitempty"` + CAName string `json:",omitempty"` + CTLogServers []CTLogServerEntryConfig `json:",omitempty"` + KeyPEM []byte `json:",omitempty"` + CertJSON []byte `json:",omitempty"` } type CTLogServerEntryConfig struct { diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index ad1a5297..b95a4052 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -51,29 +51,24 @@ func NewPCA(configPath string) (*PCA, error) { return nil, fmt.Errorf("NewPCA | ReadConfigFromFile | %w", err) } - // Load rsa key pair - keyPair, err := util.RSAKeyFromPEMFile(config.KeyPath) + // Load cert and rsa key pair. + cert, err := util.PolicyCertificateFromBytes(config.CertJSON) if err != nil { - return nil, fmt.Errorf("NewPCA | LoadRSAKeyPairFromFile | %w", err) + return nil, fmt.Errorf("loading policy certificate from config: %w", err) } - - // Load Root Policy Certificate. - a, err := common.FromJSONFile(config.RootPolicyCertPath) - if err != nil { - return nil, err - } - rpc, err := util.ToType[*common.PolicyCertificate](a) + keyPair, err := util.RSAKeyFromPEM(config.KeyPEM) if err != nil { - return nil, err + return nil, fmt.Errorf("loading RSA key from config: %w", err) } + // Check the private key and RPC match. derBytes, err := util.RSAPublicToDERBytes(&keyPair.PublicKey) if err != nil { return nil, err } - if !bytes.Equal(rpc.PublicKey, derBytes) { - return nil, fmt.Errorf("RPC and key do not match") + if !bytes.Equal(cert.PublicKey, derBytes) { + return nil, fmt.Errorf("key and root policy certificate do not match") } // Load the CT log server entries. @@ -87,7 +82,7 @@ func NewPCA(configPath string) (*PCA, error) { return &PCA{ CAName: config.CAName, RsaKeyPair: keyPair, - RootPolicyCert: rpc, + RootPolicyCert: cert, CtLogServers: logServers, DB: make(map[[32]byte]*common.PolicyCertificate), SerialNumber: 0, @@ -96,8 +91,6 @@ func NewPCA(configPath string) (*PCA, error) { func (pca *PCA) NewPolicyCertificateSigningRequest( version int, - subject string, - serialNumber int, domain string, notBefore time.Time, notAfter time.Time, @@ -107,7 +100,7 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( signatureAlgorithm common.SignatureAlgorithm, policyAttributes common.PolicyAttributes, ownerSigningFunction func(serialized []byte) []byte, - ownerPubKeyHash []byte, + ownerHash []byte, ) (*common.PolicyCertificateSigningRequest, error) { // Check validity range falls inside PCAs. @@ -120,12 +113,12 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( notAfter, pca.RootPolicyCert.NotAfter) } + pca.increaseSerialNumber() + // Create request with appropriate values. req := common.NewPolicyCertificateSigningRequest( version, - pca.CAName, - subject, - serialNumber, + pca.SerialNumber, domain, notBefore, notAfter, @@ -136,9 +129,9 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( time.Now(), policyAttributes, nil, - ownerPubKeyHash, + ownerHash, ) - // Serialize it. + // Serialize it including the owner hash. serializedReq, err := common.ToJSON(req) if err != nil { return nil, err @@ -196,26 +189,20 @@ func (pca *PCA) canSkipCoolOffPeriod(req *common.PolicyCertificateSigningRequest return false, nil } // If there is a owner's signature, the id of the key used must be 32 bytes. - if len(req.OwnerPubKeyHash) != 32 { - return false, fmt.Errorf("field OwnerPubKeyHash should be 32 bytes long but is %d", - len(req.OwnerPubKeyHash)) + if len(req.OwnerHash) != 32 { + return false, fmt.Errorf("field OwnerHash should be 32 bytes long but is %d", + len(req.OwnerHash)) } // Cast it to array and check the DB. - key := (*[32]byte)(req.OwnerPubKeyHash) + key := (*[32]byte)(req.OwnerHash) stored, ok := pca.DB[*key] if !ok { // No such certificate, cannot skip cool off period. return false, nil } - // We found the policy certificate used to sign this request. Get the public key. - pubKey, err := util.DERBytesToRSAPublic(stored.PublicKey) - if err != nil { - return false, err - } - // Verify the signature matches. - err = crypto.VerifyOwnerSignature(req, pubKey) + err := crypto.VerifyOwnerSignature(stored, req) // Return true if no error, or false and the error. return err == nil, err @@ -225,11 +212,7 @@ func (pca *PCA) signRequest( req *common.PolicyCertificateSigningRequest, ) (*common.PolicyCertificate, error) { - // Set the issuer values from this CA. - pca.increaseSerialNumber() - req.Issuer = pca.RootPolicyCert.Subject() - req.RawSerialNumber = pca.SerialNumber - return crypto.SignRequestAsIssuer(req, pca.RsaKeyPair) + return crypto.SignRequestAsIssuer(pca.RootPolicyCert, pca.RsaKeyPair, req) } func (pca *PCA) sendRequestToAllLogServers(pc *common.PolicyCertificate) error { @@ -255,8 +238,9 @@ func (pca *PCA) sendRequestToLogServer( } func (pca *PCA) signFinalPolicyCertificate(pc *common.PolicyCertificate) error { - _, err := crypto.SignPolicyCertificateAsIssuer(pc, pca.RsaKeyPair) - return err + pc.IssuerSignature = nil + pc.IssuerHash = nil + return crypto.SignPolicyCertificateAsIssuer(pca.RootPolicyCert, pca.RsaKeyPair, pc) } // sendFinalPolCertToAllLogServers sends the final policy certificate with all the SPTs included, diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index b5af1a45..d8c42819 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -6,10 +6,10 @@ package pca import ( "crypto/rsa" "fmt" + "os" "testing" "time" - ctx509 "github.com/google/certificate-transparency-go/x509" "github.com/stretchr/testify/require" "github.com/netsec-ethz/fpki/pkg/common" @@ -18,112 +18,122 @@ import ( "github.com/netsec-ethz/fpki/pkg/util" ) +var updateGolden = tests.UpdateGoldenFiles() + // Test_Config: do nothing func TestNewPCA(t *testing.T) { _, err := NewPCA("testdata/pca_config.json") require.NoError(t, err, "New PCA error") } -func TestCreateConfig(t *testing.T) { - t.Skip("Not creating config") - issuerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") +func TestUpdateGoldenFiles(t *testing.T) { + if !*updateGolden { + t.Skip("Not creating config") + } + + // Read the files containing the cert and key. + certJSON, err := os.ReadFile("../../tests/testdata/issuer_cert.json") require.NoError(t, err) - derKey, err := util.RSAPublicToDERBytes(&issuerKey.PublicKey) + keyPEM, err := os.ReadFile("../../tests/testdata/issuer_key.pem") require.NoError(t, err) - req := common.NewPolicyCertificateSigningRequest( - 0, - "pca root policy certificate", - "pca root policy certificate", - 13, - "fpki.com", - util.TimeFromSecs(10), - util.TimeFromSecs(10000), - true, - derKey, - common.RSA, - common.SHA256, - util.TimeFromSecs(1), - common.PolicyAttributes{ - TrustedCA: []string{"pca"}, - AllowedSubdomains: []string{""}, - }, - nil, // no owner signature - nil, // hash of owner's public key - ) - // Self sign this pol cert. - rootPolCert, err := crypto.SignRequestAsIssuer(req, issuerKey) - require.NoError(t, err) - // And serialize it to file to include it in the configuration of the PCA. - err = common.ToJSONFile(rootPolCert, "testdata/rpc.json") + // Instantiate the certificate. + cert, err := util.PolicyCertificateFromBytes(certJSON) require.NoError(t, err) c := &PCAConfig{ - CAName: "pca", - KeyPath: "../../tests/testdata/serverkey.pem", - RootPolicyCertPath: "testdata/rpc.json", + CAName: "pca", + CertJSON: certJSON, + KeyPEM: keyPEM, CTLogServers: []CTLogServerEntryConfig{ { Name: "CT log server 1", URL: "URL1.com/foo/bar1", - PublicKeyDER: derKey, + PublicKeyDER: cert.PublicKey, }, { Name: "CT log server 2", URL: "URL2.com/foo/bar2", - PublicKeyDER: derKey, + PublicKeyDER: cert.PublicKey, }, }, } err = SaveConfigToFile(c, "testdata/pca_config.json") require.NoError(t, err) } + +// TestPCAWorkflow checks that the PCA workflow works as intended. func TestPCAWorkflow(t *testing.T) { pca, err := NewPCA("testdata/pca_config.json") require.NoError(t, err, "New PCA error") - // pca is configured using pca_config.json, which itself specifies the PCA to use serverkey.pem - // as the key to use to issue policy certificates. - notBefore := util.TimeFromSecs(10 + 1) - notAfter := util.TimeFromSecs(10000 - 10) + // The requester needs a key (which will be identified in the request itself). - ownerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/clientkey.pem") + ownerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/owner_key.pem") + require.NoError(t, err) + ownerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/owner_cert.json") require.NoError(t, err) - ownerDerKey, err := util.RSAPublicToDERBytes(&ownerKey.PublicKey) + ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) require.NoError(t, err) + + signingFunctionCallTimes := 0 // incremented when the owner is requested to sign // The workflow from the PCA's perspective is as follows: - // 1. Create request - req, err := pca.NewPolicyCertificateSigningRequest( - 1, - "fpki.com", - 1, - "fpki.com", - notBefore, - notAfter, - true, - ownerDerKey, // public key - common.RSA, - common.SHA256, - common.PolicyAttributes{}, // policy attributes - func(serialized []byte) []byte { - return nil - }, - common.SHA256Hash(ownerDerKey), // owner pub key hash - ) + // 1. Create request. + notBefore := pca.RootPolicyCert.NotBefore.Add(-1) // this will be invalid at first + notAfter := pca.RootPolicyCert.NotAfter.Add(1) // this will be invalid at first + create := func() (*common.PolicyCertificateSigningRequest, error) { + return pca.NewPolicyCertificateSigningRequest( + 1, + "fpki.com", + notBefore, + notAfter, + true, + ownerCert.PublicKey, // public key + common.RSA, + common.SHA256, + common.PolicyAttributes{}, // policy attributes + func(serialized []byte) []byte { + signingFunctionCallTimes++ + data, err := crypto.SignBytes(serialized, ownerKey) + require.NoError(t, err) + return data + }, + ownerHash, // owner hash + ) + } + _, err = create() + require.Error(t, err) // not before is too early + notBefore = pca.RootPolicyCert.NotBefore.Add(1) + _, err = create() + require.Error(t, err) // not after is too late + notAfter = pca.RootPolicyCert.NotAfter.Add(-1) + // It shouldn't fail now. + req, err := create() require.NoError(t, err) - // 2. Owner signs request - pca.increaseSerialNumber() - err = crypto.SignAsOwner(ownerKey, req) + // 2. Owner has signed the request. We can verify this. + require.Equal(t, 1, signingFunctionCallTimes) + // Check the signature. + err = crypto.VerifyOwnerSignature(ownerCert, req) require.NoError(t, err) // 3. PCA verifies owner's signature skip, err := pca.canSkipCoolOffPeriod(req) require.NoError(t, err) require.False(t, skip) // because the PCA doesn't contain the pol cert used to sign it. + // Let's add the root policy certificate that owner-signed the child pol cert. + pca.DB[*(*[32]byte)(ownerHash)] = ownerCert + skip, err = pca.canSkipCoolOffPeriod(req) + require.NoError(t, err) + require.True(t, skip) + // For the test, remove the root pol cert from the DB. + delete(pca.DB, *(*[32]byte)(ownerHash)) // 4. PCA signs as issuer pc, err := pca.signRequest(req) require.NoError(t, err) + // Verify PCA's signature. + err = crypto.VerifyIssuerSignature(pca.RootPolicyCert, pc) + require.NoError(t, err) // 5. PCA sends to log servers. Per log server: mockRequester := newmockLogServerRequester(t, pca.CtLogServers) @@ -133,7 +143,7 @@ func TestPCAWorkflow(t *testing.T) { // 8. PCA adds SPT to list in policy certificate err = pca.sendRequestToAllLogServers(pc) require.NoError(t, err) - require.Len(t, pc.SPCTs, len(pca.CtLogServers)) // as many SPTs as CT log servers + require.Equal(t, len(pca.CtLogServers), len(pc.SPCTs)) // as many SPTs as CT log servers checkSPTs(t, pca, pc) // 9. PCA signs again the policy certificate @@ -151,7 +161,7 @@ func TestPCAWorkflow(t *testing.T) { // 11. PCA stores the final policy certificate in its DB. pca.storeInDb(pc) - require.Len(t, pca.DB, 1) + require.Equal(t, 1, len(pca.DB)) for certID, cert := range pca.DB { // The ID is correct: require.Equal(t, certID, [32]byte(common.SHA256Hash32Bytes(pc.PublicKey))) @@ -161,36 +171,127 @@ func TestPCAWorkflow(t *testing.T) { } } +func TestSignAndLogRequest(t *testing.T) { + pca, err := NewPCA("testdata/pca_config.json") + require.NoError(t, err, "New PCA error") + + // The requester needs a key (which will be identified in the request itself). + ownerKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/owner_key.pem") + require.NoError(t, err) + ownerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/owner_cert.json") + require.NoError(t, err) + ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) + require.NoError(t, err) + + // Let's add the root policy certificate from the owner. + pca.DB[*(*[32]byte)(ownerHash)] = ownerCert + + req, err := pca.NewPolicyCertificateSigningRequest( + 1, + "fpki.com", + pca.RootPolicyCert.NotBefore, + pca.RootPolicyCert.NotAfter, + true, + ownerCert.PublicKey, // public key + common.RSA, + common.SHA256, + common.PolicyAttributes{}, // policy attributes + func(serialized []byte) []byte { + data, err := crypto.SignBytes(serialized, ownerKey) + require.NoError(t, err) + return data + }, + ownerHash, // owner hash + ) + require.NoError(t, err) + + // Before we call the regular function, we must have a LogServerRequester + mockRequester := newmockLogServerRequester(t, pca.CtLogServers) + pca.LogServerRequester = mockRequester + + // Call the regular function. + pc, err := pca.SignAndLogRequest(req) + require.NoError(t, err) + + // Check we have as many SPTs as CT log servers. + require.Equal(t, len(pca.CtLogServers), len(pc.SPCTs)) + // And check the SPTs themselves. + checkSPTs(t, pca, pc) + + // Check we made the correct calls to the CT log servers. + expectedURLs := make([]string, 0) + for _, e := range pca.CtLogServers { + expectedURLs = append(expectedURLs, e.URL) + } + require.ElementsMatch(t, expectedURLs, mockRequester.finalPolCertSentTo) + + // Check that the PCA stored the policy certificate. + require.Equal(t, 2, len(pca.DB)) // owner root pol cert plus the new one. + + // For the test, remove the root pol cert from the DB. + delete(pca.DB, *(*[32]byte)(ownerHash)) + + // Verify that the remainig one is the child certificate. + for certID, cert := range pca.DB { + // The ID is correct: + require.Equal(t, certID, [32]byte(common.SHA256Hash32Bytes(pc.PublicKey))) + // And the DB contains the correct pol cert. + require.Equal(t, pc, cert) + break + } + + // Verify owner's signature still valid. + err = crypto.VerifyOwnerSignatureInPolicyCertificate(ownerCert, pc) + require.NoError(t, err) + + // Verify PCA's signature. + err = crypto.VerifyIssuerSignature(pca.RootPolicyCert, pc) + require.NoError(t, err) +} + // mockLogServerRequester mocks a CT log server requester. type mockLogServerRequester struct { - servers map[string]*CTLogServerEntryConfig - pcaCert *ctx509.Certificate - keys map[string]*rsa.PrivateKey + ctLogServers map[string]*CTLogServerEntryConfig + pcaCert *common.PolicyCertificate + + // Cert-Key pair per domain: + certs map[string]*common.PolicyCertificate + keys map[string]*rsa.PrivateKey // URLs of the called CT log servers when sending the final policy cert. finalPolCertSentTo []string } -func newmockLogServerRequester(t tests.T, servers map[[32]byte]*CTLogServerEntryConfig) *mockLogServerRequester { +func newmockLogServerRequester( + t tests.T, + servers map[[32]byte]*CTLogServerEntryConfig, +) *mockLogServerRequester { + // Load the certificate of the PCA. - pcaCert, err := util.CertificateFromPEMFile("../../tests/testdata/servercert.pem") + pcaCert, err := util.PolicyCertificateFromFile("../../tests/testdata/issuer_cert.json") require.NoError(t, err) - // Load the keys of the CT log servers. This mock requester uses one for all of them. - ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + // Load the policy certificates and keys of the CT log servers. This mock requester + // uses one pair for all of them. + ctCert, err := util.PolicyCertificateFromFile("../../tests/testdata/issuer_cert.json") + require.NoError(t, err) + ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/issuer_key.pem") require.NoError(t, err) m := make(map[string]*CTLogServerEntryConfig) + certs := make(map[string]*common.PolicyCertificate) keys := make(map[string]*rsa.PrivateKey) for _, s := range servers { s := s m[s.URL] = s + certs[s.URL] = ctCert keys[s.URL] = ctKey } return &mockLogServerRequester{ - servers: m, + ctLogServers: m, pcaCert: pcaCert, + certs: certs, keys: keys, finalPolCertSentTo: make([]string, 0), } @@ -215,10 +316,9 @@ func (m *mockLogServerRequester) ObtainSptFromLogServer( if err != nil { return nil, fmt.Errorf("error signing: %w", err) } - logID := common.SHA256Hash(m.servers[url].PublicKeyDER) + logID := common.SHA256Hash(m.ctLogServers[url].PublicKeyDER) spt := common.NewSignedPolicyCertificateTimestamp( 0, - pc.Issuer, logID, time.Now(), signature, @@ -240,16 +340,19 @@ func (m *mockLogServerRequester) SendPolicyCertificateToLogServer( func checkSPTs(t tests.T, pca *PCA, pc *common.PolicyCertificate) { t.Helper() - ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + ctCert, err := util.PolicyCertificateFromFile("../../tests/testdata/issuer_cert.json") require.NoError(t, err) - derKey, err := util.RSAPublicToDERBytes(&ctKey.PublicKey) - require.NoError(t, err) - hashedDerKey := common.SHA256Hash(derKey) + + // ctKey, err := util.RSAKeyFromPEMFile("../../tests/testdata/serverkey.pem") + // require.NoError(t, err) + // derKey, err := util.RSAPublicToDERBytes(&ctKey.PublicKey) + // require.NoError(t, err) + hashedDerKey := common.SHA256Hash(ctCert.PublicKey) for _, spt := range pc.SPCTs { require.Equal(t, hashedDerKey, spt.LogID) - require.Equal(t, pca.RootPolicyCert.Subject(), spt.Issuer) require.Less(t, time.Since(spt.AddedTS), time.Minute) require.Greater(t, time.Since(spt.AddedTS).Seconds(), 0.0) + // TODO check spt.Signature } } diff --git a/pkg/pca/testdata/pca_config.json b/pkg/pca/testdata/pca_config.json index 775be53d..377469b1 100644 --- a/pkg/pca/testdata/pca_config.json +++ b/pkg/pca/testdata/pca_config.json @@ -4,14 +4,14 @@ { "Name": "CT log server 1", "URL": "URL1.com/foo/bar1", - "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB" + "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB" }, { "Name": "CT log server 2", "URL": "URL2.com/foo/bar2", - "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB" + "PublicKeyDER": "MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB" } ], - "KeyPath": "../../tests/testdata/serverkey.pem", - "RootPolicyCertPath": "testdata/rpc.json" + "KeyPEM": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcGZtYTZZOVExWkJ1bjZjdWNlNEw0RkI0QXdnWWVuVzNzYm9oa2VxTU01Y0hxdmQzCjBsTk5QWm9ScURVd2dVTUlOVEhDU0FXNXpaRXZjVXpuUDRSNEMzbE1TWHVRMFYzNVpIazRHV3F1MnNtQUFkbUkKT3dBWUhyYlBoVjJaVEpxb2lqRlN5UTcySlN1SWtJcDE2ekdrRitBMzZMOGlyTzJjWVRRVTBOdmtYVVhJK3E2KwptSS9GTmlneWZjQ1huZy8zKy9VUHFZQS9UYnpMRDBGbGRBWi9QSmdMZ1NnS1BtRllraEljN2RaelBBcnJSUFRFCmNtMWFaYlltOTRvUkpPVjZRbkpuUGxWaGIrQ1NTYmZQQTVvNGtaNnkrUTlhcDJnTVNnUmd5amdVN1lHRExXRWoKTGtORlhOWVhORXlCWW9LSmZlTEt3d2ZGT0l4YjFoajVmbXluL3dJREFRQUJBb0lCQUZoaUVkYzhCU3lreS9QaQpuLzMxYUllYjhqeVREQTN5TDg3SnNtUzBoTkpZSTdJc003ZGh1cWtLS1Vyc2dCYmxiako5ZTlyRWljdm8rMTJYCk9URit4am85N3B6VzJ5aFNwYUtXVm9SYVppQ0YycytoV2tVbzZLODZaRW05clF1NDgvVWJETjJhUlFOSUttSG0KQ2FNai9TRGx1b0FMZ0ZpYjg0RmpyWTRHK3BXYjNzUEZKQjBiZElXNjBIb29DMEM5dVNpNmxpOE41MWJoSkdPLwo5VUxha0xpSmh2TmpHd2VqYjI0cFQ1YUx1dG9vVVZJSmdBQjlsVUY3VDdya3ZoWXhMMDNObWdXZGM4TGJZTTVLCkZuVTFKMytWbExOc3piNFA2eDRlNmViWldHSExJWTRkMWwwV0FlRHJibXViVFhtU3gvR0UrUjJ2UmY5VFdqNjUKbk1GNHMrRUNnWUVBM0ZMcUVycUNoYlh4aGczVnBWNmFMNU96RFc5M1B0dGREUTBFeDJMdkM4cVRPNE5MdzBQVgpiTnZoKytCVTNRbzZSSU42d0QvVElha0d3azg4bVEzVFRMNS9Xeit3WlMxenNlQkJkOWp1cmZJWGhTdmpzZGQ2Ci85RkpPckNDSkFpeUlTbnBkSVpSWUU2b01Yb1ZFZlBTVzJDcFR6Szl2OC9qNWhWV0dDN1E3REVDZ1lFQXdObkUKYzcvVk5UajJPdWdWVmJtK0JldEhPM0JEK2RUdjNDTGdnMjdnYkdyU0puWnM4YXRUM05wZVRPQTFHclhiK3kxSgpmOW42Z0ZhRFJOV3c0UnJlYUdsd2xHcjZjckFudzdJemozVHY0MWp1V1pnVWhhMG1yTHFlTEhLRTdIM1M2bjdFCktsNTRZL3MxSUt6Q0RZRkdQTnN3LzR1dCs5TUc0cFRKUjh0Q095OENnWUJ1VWxaQzZvRlEwcm9ObGYvVXZub1cKT0s3L20wRHFpSTBmYTlWb3dYRlJSaTVUTG50Uld6WVRQRWI4Q0doMkJoa0hGWTd4bFFQelp0K3JqR0diZ2dDNwovT2RvbVl1S0hpaEFlVHBPK0tFTEdKNE9ZTnV5d1dNbGxYWkZuUHlOYm8xRGJla29IOW0vajZOSW93ako1SDBBCnBIWWk0aFdCWm1lSFhrZXQ5RU5KQVFLQmdDNk81SHpGWFdYaUFLQXNFNnFFa3ptK21ZM2lCcWMwSDB4WUp6ZlQKOXQ1MTA1SnhtTStuZXpHZ2pvK2t1VzFmWm1KM2huMFZWUUxTNEJJb1BQRzdtT0pBUW11eWdCQmNNdDF1RWtDYQpES2dvZWpLcklwazdPbEVOSk02NlB4a1JMM0JwZGxaOFJEaHZGMTV5RnM5SDNIc290K1dhQlVEOHEzYmNVTlBDCllKVUZBb0dCQU04enY0YUpYd2ZSZmp4azd1cDRqQk9rT2szaWY3WVZjOGUrWDhwdjRjVGpyRlJ0aktFOThmaGIKbkZIUzE4NFNJZkozSmltd0Q0ZmxhSDhFOHZ0dndPclpFSXk3azRoMWlIeE9pLzh5eUxuZGRiNEh5YmZqUHdDZwpZVXV4R29ERzRFanF5c2lKZE5IRy9EOWcySi9HU0xuMDhmTUMvYVBBV0NpdXpnZFdIUHovCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", + "CertJSON": "eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo0LCJTZXJpYWxOdW1iZXIiOjUxNCwiRG9tYWluIjoiZnBraS5jb20iLCJOb3RCZWZvcmUiOiIxOTcwLTAxLTAxVDAxOjAwOjAxKzAxOjAwIiwiTm90QWZ0ZXIiOiIxOTcwLTAxLTAxVDAzOjQ2OjQwKzAxOjAwIiwiSXNJc3N1ZXIiOnRydWUsIlB1YmxpY0tleSI6Ik1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcGZtYTZZOVExWkJ1bjZjdWNlNEw0RkI0QXdnWWVuVzNzYm9oa2VxTU01Y0hxdmQzMGxOTlBab1JxRFV3Z1VNSU5USENTQVc1elpFdmNVem5QNFI0QzNsTVNYdVEwVjM1WkhrNEdXcXUyc21BQWRtSU93QVlIcmJQaFYyWlRKcW9pakZTeVE3MkpTdUlrSXAxNnpHa0YrQTM2TDhpck8yY1lUUVUwTnZrWFVYSStxNittSS9GTmlneWZjQ1huZy8zKy9VUHFZQS9UYnpMRDBGbGRBWi9QSmdMZ1NnS1BtRllraEljN2RaelBBcnJSUFRFY20xYVpiWW05NG9SSk9WNlFuSm5QbFZoYitDU1NiZlBBNW80a1o2eStROWFwMmdNU2dSZ3lqZ1U3WUdETFdFakxrTkZYTllYTkV5QllvS0pmZUxLd3dmRk9JeGIxaGo1Zm15bi93SURBUUFCIiwiVGltZVN0YW1wIjoiMTk5NC0xMi0wM1QwMjowMDo0MVoiLCJQb2xpY3lBdHRyaWJ1dGVzIjp7fSwiU1BDVHMiOlt7IlZlcnNpb24iOjgsIkxvZ0lEIjoibkp5aTc4M1MwUzBxZVE9PSIsIkFkZGVkVFMiOiIyMDEwLTA2LTIzVDA3OjI0OjM2WiIsIlNpZ25hdHVyZSI6IjBIU29LQXJwL2h3TXVXclRJdFlpZ2lsZnYrRWVKcVF6QjIyMXdVUk1PalE9In0seyJMb2dJRCI6IjB5cjM3VHVNL3BCUGt3PT0iLCJBZGRlZFRTIjoiMjA1MC0wNS0wN1QxNjoxOTowMFoiLCJTaWduYXR1cmUiOiIrUEJ0S2J6WmhrUWRKWmtHMXByTmlVdVdpdW53NjUyV1hPYWthVHhPdm9nPSJ9XX19" } \ No newline at end of file diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index c430efab..bd7b3917 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -61,9 +61,8 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic docs := make([]common.PolicyDocument, 2) for i := range docs { pc := RandomPolicyCertificate(t) - pc.RawSubject = domainName - pc.Domain = domainName - pc.Issuer = "c0.com" + pc.RawDomain = domainName + pc.IssuerHash = RandomBytesForTest(t, 32) data, err := common.ToJSON(pc) require.NoError(t, err) @@ -125,7 +124,6 @@ func RandomTimeWithoutMonotonic() time.Time { func RandomSignedPolicyCertificateTimestamp(t tests.T) *common.SignedPolicyCertificateTimestamp { return common.NewSignedPolicyCertificateTimestamp( rand.Intn(10), // version - "Issuer", // issuer RandomBytesForTest(t, 10), // log id RandomTimeWithoutMonotonic(), // timestamp RandomBytesForTest(t, 32), // signature @@ -135,46 +133,42 @@ func RandomSignedPolicyCertificateTimestamp(t tests.T) *common.SignedPolicyCerti func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest { return common.NewPolicyCertificateSigningRequest( rand.Intn(10), - "Issuer", - "RPC subject", rand.Intn(1000), // serial number - "domain.com", + "domain", // domain RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), true, - RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), // public key common.RSA, common.SHA256, - RandomTimeWithoutMonotonic(), - common.PolicyAttributes{}, // policy attributes (empty for now) - RandomBytesForTest(t, 32), // ownwer signature - RandomBytesForTest(t, 32), // ownwer pub key hash + RandomTimeWithoutMonotonic(), // timestamp + common.PolicyAttributes{}, // policy attributes (empty for now) + RandomBytesForTest(t, 32), // owner signature + RandomBytesForTest(t, 32), // owner hash ) } func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { return common.NewPolicyCertificate( rand.Intn(10), - "Issuer", - "RPC subject", rand.Intn(1000), // serial number "fpki.com", RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), true, - RandomBytesForTest(t, 32), + RandomBytesForTest(t, 32), // public key common.RSA, common.SHA256, - RandomTimeWithoutMonotonic(), - common.PolicyAttributes{}, // policy attributes (empty for now) - RandomBytesForTest(t, 32), // ownwer signature - RandomBytesForTest(t, 32), // ownwer pub key hash - RandomBytesForTest(t, 32), // issuer signature - RandomBytesForTest(t, 32), // issuer pub key hash + RandomTimeWithoutMonotonic(), // timestamp + common.PolicyAttributes{}, // policy attributes (empty for now) + RandomBytesForTest(t, 32), // owner signature + RandomBytesForTest(t, 32), // owner hash []common.SignedPolicyCertificateTimestamp{ *RandomSignedPolicyCertificateTimestamp(t), *RandomSignedPolicyCertificateTimestamp(t), }, + RandomBytesForTest(t, 32), // issuer signature + RandomBytesForTest(t, 32), // issuer hash ) } diff --git a/pkg/util/io.go b/pkg/util/io.go index f5303b53..5699721a 100644 --- a/pkg/util/io.go +++ b/pkg/util/io.go @@ -89,20 +89,57 @@ func RSAKeyFromPEMFile(keyPath string) (*rsa.PrivateKey, error) { return nil, err } + return RSAKeyFromPEM(bytes) +} + +func RSAKeyFromPEM(bytes []byte) (*rsa.PrivateKey, error) { block, _ := pem.Decode(bytes) - if block.Type != "RSA PRIVATE KEY" { + expectType := "RSA PRIVATE KEY" + if block.Type != expectType { // wrong type. return nil, fmt.Errorf("wrong type. Got '%s' expected '%s'", - block.Type, "RSA PRIVATE KEY") + block.Type, expectType) } keyPair, err := ctx509.ParsePKCS1PrivateKey(block.Bytes) if err != nil { return nil, err } + return keyPair, nil } +func RSAKeyToPEM(key *rsa.PrivateKey) []byte { + pemBlock := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: ctx509.MarshalPKCS1PrivateKey(key), + } + buff := bytes.NewBuffer(nil) + pem.Encode(buff, pemBlock) + return buff.Bytes() +} + +func PolicyCertificateToBytes(pc *common.PolicyCertificate) ([]byte, error) { + return common.ToJSON(pc) +} + +func PolicyCertificateFromFile(filepath string) (*common.PolicyCertificate, error) { + data, err := ioutil.ReadFile(filepath) + if err != nil { + return nil, err + } + + return PolicyCertificateFromBytes(data) +} + +func PolicyCertificateFromBytes(data []byte) (*common.PolicyCertificate, error) { + obj, err := common.FromJSON(data, common.WithSkipCopyJSONIntoPolicyObjects) + if err != nil { + return nil, err + } + return ToType[*common.PolicyCertificate](obj) +} + // LoadCertsAndChainsFromCSV returns a ready to insert-in-DB collection of the leaf certificate // payload, its ID, its parent ID, and its names, for each certificate and its ancestry chain. // The returned names contains nil unless the corresponding certificate is a leaf certificate. diff --git a/pkg/util/io_test.go b/pkg/util/io_test.go index 4dea1a69..4be06e37 100644 --- a/pkg/util/io_test.go +++ b/pkg/util/io_test.go @@ -1,6 +1,7 @@ package util import ( + "os" "testing" "github.com/stretchr/testify/require" @@ -28,3 +29,16 @@ func TestCertificateFromPEMFile(t *testing.T) { result := ExtractCertDomains(cert) require.ElementsMatch(t, result, []string{"*.adiq.com.br", "adiq.com.br"}) } + +func TestRSAKeyFromPEMFileAndBack(t *testing.T) { + filename := "../../tests/testdata/clientkey.pem" + expectedPEM, err := os.ReadFile(filename) + require.NoError(t, err) + + key, err := RSAKeyFromPEMFile(filename) + require.NoError(t, err, "load RSA key error") + + gotPEM := RSAKeyToPEM(key) + // Compare against bytes in file. + require.Equal(t, expectedPEM, gotPEM) +} diff --git a/pkg/util/pem.go b/pkg/util/pem.go index 700adb49..698d2509 100644 --- a/pkg/util/pem.go +++ b/pkg/util/pem.go @@ -2,45 +2,11 @@ package util import ( "crypto/rsa" - "encoding/pem" - "errors" "fmt" ctx509 "github.com/google/certificate-transparency-go/x509" ) -func RSAPublicToPEM(pubkey *rsa.PublicKey) ([]byte, error) { - pubkey_bytes, err := ctx509.MarshalPKIXPublicKey(pubkey) - if err != nil { - return nil, err - } - - return pem.EncodeToMemory( - &pem.Block{ - Type: "RSA PUBLIC KEY", - Bytes: pubkey_bytes, - }, - ), nil -} - -func PEMToRSAPublic(pubkey []byte) (*rsa.PublicKey, error) { - block, _ := pem.Decode(pubkey) - if block == nil { - return nil, fmt.Errorf("PemBytesToRsaPublicKey | Decode | block empty") - } - - pub, err := ctx509.ParsePKIXPublicKey(block.Bytes) - if err != nil { - return nil, fmt.Errorf("PemBytesToRsaPublicKey | ParsePKIXPublicKey | %w", err) - } - - pubKeyResult, ok := pub.(*rsa.PublicKey) - if ok { - return pubKeyResult, nil - } - return nil, errors.New("PemBytesToRsaPublicKey | ParsePKIXPublicKey | Key type is not RSA") -} - func RSAPublicToDERBytes(pubKey *rsa.PublicKey) ([]byte, error) { return ctx509.MarshalPKIXPublicKey(pubKey) } diff --git a/pkg/util/pem_test.go b/pkg/util/pem_test.go index 9a21aa1a..fa4f06fd 100644 --- a/pkg/util/pem_test.go +++ b/pkg/util/pem_test.go @@ -1,22 +1,19 @@ -package util +package util_test import ( - "crypto/rsa" - "math/rand" "testing" + "github.com/netsec-ethz/fpki/pkg/tests/random" + "github.com/netsec-ethz/fpki/pkg/util" "github.com/stretchr/testify/require" ) -func TestRSAPublicToPEMAndBack(t *testing.T) { - privateKeyPair, err := rsa.GenerateKey(rand.New(rand.NewSource(0)), 2048) - require.NoError(t, err) +func TestRSAPublicToDERBytesAndBack(t *testing.T) { + privateKeyPair := random.RandomRSAPrivateKey(t) - bytes, err := RSAPublicToPEM(&privateKeyPair.PublicKey) + pubKeyDER, err := util.RSAPublicToDERBytes(&privateKeyPair.PublicKey) require.NoError(t, err) - - pubKey, err := PEMToRSAPublic(bytes) + got, err := util.DERBytesToRSAPublic(pubKeyDER) require.NoError(t, err) - - require.Equal(t, privateKeyPair.PublicKey, *pubKey) + require.Equal(t, &privateKeyPair.PublicKey, got) } diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index d3872c3b..1dcaad79 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -38,7 +38,7 @@ func TestToType(t *testing.T) { // *common.RPC { orig := &common.PolicyCertificate{} - orig.RawSubject = "a.com" + orig.RawDomain = "a.com" orig.Version = 1 e := any(orig) r, err := ToType[*common.PolicyCertificate](e) diff --git a/tests/testdata/issuer_cert.json b/tests/testdata/issuer_cert.json index 92b5b54a..6ec65160 100644 --- a/tests/testdata/issuer_cert.json +++ b/tests/testdata/issuer_cert.json @@ -1 +1 @@ -{"T":"*pc","O":{"Version":4,"SerialNumber":514,"Domain":"fpki.com","NotBefore":"2053-11-13T16:07:17Z","NotAfter":"1988-01-03T11:59:48Z","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB","TimeStamp":"1994-12-03T02:00:41Z","PolicyAttributes":{},"OwnerSignature":"4ta+IO/NbOqEtpJeYHvgY3Fvlt3N0B11BFw/AA+KeWs=","OwnerHash":"zmxRLDgBqsru361bUGZk6MDkp3Hs4Li3wZZdkYElG3w=","IssuerSignature":"FQG32YRrZusCtX5c2ntsumiR1ha9aGw3uDRhOsi6oiw=","IssuerHash":"AI/+aINSc0rk4/Ehes1fgycIFDAYZ7XQZxGyOAAceVc=","SPCTs":[{"Version":8,"LogID":"nJyi783S0S0qeQ==","AddedTS":"2010-06-23T07:24:36Z","Signature":"0HSoKArp/hwMuWrTItYigilfv+EeJqQzB221wURMOjQ="},{"LogID":"0yr37TuM/pBPkw==","AddedTS":"2050-05-07T16:19:00Z","Signature":"+PBtKbzZhkQdJZkG1prNiUuWiunw652WXOakaTxOvog="}]}} \ No newline at end of file +{"T":"*pc","O":{"Version":4,"SerialNumber":514,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB","TimeStamp":"1994-12-03T02:00:41Z","PolicyAttributes":{},"SPCTs":[{"Version":8,"LogID":"nJyi783S0S0qeQ==","AddedTS":"2010-06-23T07:24:36Z","Signature":"0HSoKArp/hwMuWrTItYigilfv+EeJqQzB221wURMOjQ="},{"LogID":"0yr37TuM/pBPkw==","AddedTS":"2050-05-07T16:19:00Z","Signature":"+PBtKbzZhkQdJZkG1prNiUuWiunw652WXOakaTxOvog="}]}} \ No newline at end of file diff --git a/tests/testdata/owner_cert.json b/tests/testdata/owner_cert.json index d5bbbf78..d923bcbc 100644 --- a/tests/testdata/owner_cert.json +++ b/tests/testdata/owner_cert.json @@ -1 +1 @@ -{"T":"*pc","O":{"Version":9,"SerialNumber":40,"Domain":"fpki.com","NotBefore":"2013-01-22T17:32:12Z","NotAfter":"1905-03-15T19:55:16Z","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnAkKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSNZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4FgIVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MViluBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQAB","TimeStamp":"1926-07-30T02:03:38Z","PolicyAttributes":{},"OwnerSignature":"h4yI3s03PPowAmZsmgXrGLlL7VqQCYccV2HUsWmenyo=","OwnerHash":"i0oqTsfv92Z0VaJ6TdP70CGajs2WrSZlXCoMVvm1WEY=","IssuerSignature":"lGeJQIhk9U0Cdbd2hEyoSGIevedlxP1ID6jDKEIsSzLbOw9nx/6KgwffbKyqAjJt09NABNU84Q5FAJTXUro8LxPRF1zw2Z+MpXUH0VoYWJ+aAuo8gGss4s/M+PZhAU5noqM03UeVRtSFDakUEQLHIbpxaoABLjb0AvwxobEZJkzaEGv1S+jYhDF6V4YUYGAqEYPGnIoxXFH3BvFxNe6/wrFz+AXqZCdVwkBootTyph+BJsqjNVaxp2UHHzc3Qo5xWA0GMSa6UTjgdKW+wggFrYc77sVtUMAPuVbPGoY69DJcPPGMghTWiAw+3E77/Yjt86kFiPZ2kUq72HDwAdFDSQ==","IssuerHash":"AcjFdktnguYcKZ196GxfwiYwY2TdUYE2WoL3SNk4viU=","SPCTs":[{"Version":5,"LogID":"am09KiLqSxA07w==","AddedTS":"2077-01-09T19:27:28Z","Signature":"gOpvrqDfBQiaUWwlu2kw60dVdKW/LNFulHLZLnq6uGk="},{"Version":2,"LogID":"t6oQ1KNjhiBtow==","AddedTS":"2068-05-20T17:04:57Z","Signature":"VaDJqqnu7JAE1h96qQY7n/j61f6qUQczBt57EHjq6HM="}]}} \ No newline at end of file +{"T":"*pc","O":{"Version":9,"SerialNumber":40,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnAkKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSNZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4FgIVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MViluBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQAB","TimeStamp":"1926-07-30T02:03:38Z","PolicyAttributes":{},"IssuerSignature":"QQt1BhHqDY7iyDRa4f65jJyPk70fDYFHW29t5DClhyNtvPHHTeNlpGghGixdvXR6iVfw+sn5ev5zf7w+pl6BprlZ7gTJj6G/L3/c/Ksk3kR2e/XfXelAGZ/fNWRFs7/AmORMUgZUFsngJIiholtB4NPsyuNWEj01Kbc48kUHcJFzVeWkjwc5mo9tTGh3v8Mrn+xzzX5ONr/JA8XuJlfLBT4WFel3wn8zTfeUdNaW0k+yFVx6Ajg/E1HrXvSQvbr5mQlLZ7uAziSbddrXcUKdZrbkM/aj/OfyGnKao4SGFPGnO1pKbgGW2AS9haq2zhVwSe+ecoSfltdbSAPK4Y2eVA==","IssuerHash":"jdUREDKTlHexuImHcB5NdzLubV1OglH/VdfbczHIiK8=","SPCTs":[{"Version":5,"LogID":"am09KiLqSxA07w==","AddedTS":"2077-01-09T19:27:28Z","Signature":"gOpvrqDfBQiaUWwlu2kw60dVdKW/LNFulHLZLnq6uGk="},{"Version":2,"LogID":"t6oQ1KNjhiBtow==","AddedTS":"2068-05-20T17:04:57Z","Signature":"VaDJqqnu7JAE1h96qQY7n/j61f6qUQczBt57EHjq6HM="}]}} \ No newline at end of file From a3754a7b3c236c13b38b60f42ed431bd383b47c5 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Tue, 11 Jul 2023 17:55:42 +0200 Subject: [PATCH 179/187] Renamed Raw* fields to *Field. Modified json.go to introspect for "JSONField" instead of "RawJSON". --- pkg/common/crypto/crypto.go | 6 +++--- pkg/common/json.go | 4 ++-- pkg/common/json_test.go | 10 +++++----- pkg/common/policies.go | 18 +++++++++--------- pkg/common/policy_common.go | 6 ++++-- pkg/tests/random/random.go | 4 ++-- pkg/util/types_test.go | 2 +- 7 files changed, 26 insertions(+), 24 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 04fb1355..4bb46375 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -104,8 +104,8 @@ func VerifyOwnerSignatureInPolicyCertificate( req := common.NewPolicyCertificateSigningRequest( c.Version, - c.RawSerialNumber, - c.RawDomain, + c.SerialNumberField, + c.DomainField, c.NotBefore, c.NotAfter, c.IsIssuer, @@ -132,7 +132,7 @@ func SignRequestAsIssuer( cert := common.NewPolicyCertificate( req.Version, req.SerialNumber(), - req.RawDomain, + req.DomainField, req.NotBefore, req.NotAfter, req.IsIssuer, diff --git a/pkg/common/json.go b/pkg/common/json.go index 64ca7a44..58fa2903 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -138,12 +138,12 @@ func (o *serializableObjectBase) UnmarshalJSON(data []byte) error { base := reflect.Indirect(reflect.ValueOf(obj)).FieldByName("PolicyPartBase") if base != (reflect.Value{}) { // It is a PolicyPartBase like object. Check the Raw field (should always be true). - if raw := base.FieldByName("RawJSON"); raw != (reflect.Value{}) { + if raw := base.FieldByName("JSONField"); raw != (reflect.Value{}) { // Set its value to the JSON data. raw.Set(reflect.ValueOf(data)) } else { // This should never happen, and the next line should ensure it: - _ = PolicyPartBase{}.RawJSON + _ = PolicyPartBase{}.JSONField // But terminate the control flow anyways with a panic. panic("logic error: structure PolicyPartBase has lost its Raw member") } diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index f4d3e267..e3e087f8 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -97,7 +97,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { rpc := obj.(*common.PolicyCertificate) - return [][]byte{rpc.RawJSON} + return [][]byte{rpc.JSONField} }, }, "spPtr": { @@ -105,7 +105,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { sp := obj.(*common.PolicyCertificate) - return [][]byte{sp.RawJSON} + return [][]byte{sp.JSONField} }, }, "spValue": { @@ -113,7 +113,7 @@ func TestPolicyObjectBaseRaw(t *testing.T) { rawElemsCount: 1, getRawElemsFcn: func(obj any) [][]byte { sp := obj.(common.PolicyCertificate) - return [][]byte{sp.RawJSON} + return [][]byte{sp.JSONField} }, }, "list": { @@ -125,8 +125,8 @@ func TestPolicyObjectBaseRaw(t *testing.T) { getRawElemsFcn: func(obj any) [][]byte { l := obj.([]any) return [][]byte{ - l[0].(*common.PolicyCertificate).RawJSON, - l[1].(*common.PolicyCertificateSigningRequest).RawJSON, + l[0].(*common.PolicyCertificate).JSONField, + l[1].(*common.PolicyCertificateSigningRequest).JSONField, } }, }, diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 050f2fd7..c445d569 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -15,16 +15,16 @@ type PolicyDocument interface { type PolicyCertificateBase struct { PolicyPartBase - RawSerialNumber int `json:"SerialNumber,omitempty"` - RawDomain string `json:"Domain,omitempty"` + SerialNumberField int `json:"SerialNumber,omitempty"` + DomainField string `json:"Domain,omitempty"` } -func (o PolicyCertificateBase) SerialNumber() int { return o.RawSerialNumber } -func (o PolicyCertificateBase) Domain() string { return o.RawDomain } +func (o PolicyCertificateBase) SerialNumber() int { return o.SerialNumberField } +func (o PolicyCertificateBase) Domain() string { return o.DomainField } func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { return p.PolicyPartBase.Equal(x.PolicyPartBase) && - p.RawSerialNumber == x.RawSerialNumber && - p.RawDomain == x.RawDomain + p.SerialNumberField == x.SerialNumberField && + p.DomainField == x.DomainField } // PolicyCertificateFields contains all the fields that a policy certificate or a signing request @@ -106,8 +106,8 @@ func NewPolicyCertificateFields( PolicyPartBase: PolicyPartBase{ Version: version, }, - RawSerialNumber: serialNumber, - RawDomain: domain, + SerialNumberField: serialNumber, + DomainField: domain, }, NotBefore: notBefore, NotAfter: notAfter, @@ -201,7 +201,7 @@ func NewPolicyCertificateRevocationFields( PolicyPartBase: PolicyPartBase{ Version: version, }, - RawSerialNumber: serialNumber, + SerialNumberField: serialNumber, }, TimeStamp: timeStamp, OwnerSignature: ownerSignature, diff --git a/pkg/common/policy_common.go b/pkg/common/policy_common.go index c24473ca..c411a533 100644 --- a/pkg/common/policy_common.go +++ b/pkg/common/policy_common.go @@ -5,11 +5,13 @@ type MarshallableDocument interface { Raw() []byte // Returns the Raw JSON this object was unmarshaled from (nil if none). } +// MarshallableDocumentBase is used to read and write document from and to json files. +// If changing the name of the field, check the file json.go where we introspect for it. type MarshallableDocumentBase struct { - RawJSON []byte `json:"-"` // omit from JSON (un)marshaling + JSONField []byte `json:"-"` // omit from JSON (un)marshaling } -func (o MarshallableDocumentBase) Raw() []byte { return o.RawJSON } +func (o MarshallableDocumentBase) Raw() []byte { return o.JSONField } // PolicyPart is an interface that is implemented by all objects that are part of the set // of "policy objects". A policy object is that one that represents functionality of policies diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index bd7b3917..7f81a366 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -61,12 +61,12 @@ func BuildTestRandomPolicyHierarchy(t tests.T, domainName string) []common.Polic docs := make([]common.PolicyDocument, 2) for i := range docs { pc := RandomPolicyCertificate(t) - pc.RawDomain = domainName + pc.DomainField = domainName pc.IssuerHash = RandomBytesForTest(t, 32) data, err := common.ToJSON(pc) require.NoError(t, err) - pc.RawJSON = data + pc.JSONField = data docs[i] = pc } return docs diff --git a/pkg/util/types_test.go b/pkg/util/types_test.go index 1dcaad79..f277ccc2 100644 --- a/pkg/util/types_test.go +++ b/pkg/util/types_test.go @@ -38,7 +38,7 @@ func TestToType(t *testing.T) { // *common.RPC { orig := &common.PolicyCertificate{} - orig.RawDomain = "a.com" + orig.DomainField = "a.com" orig.Version = 1 e := any(orig) r, err := ToType[*common.PolicyCertificate](e) From 0fd670be7c7dc98af475fc806bcf61de47a35d94 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 21 Jul 2023 11:51:50 +0200 Subject: [PATCH 180/187] Add a signing request for the revocation of pol certs. --- pkg/common/embedded_policies.go | 1 - pkg/common/json.go | 4 ++++ pkg/common/json_test.go | 8 +++++++- pkg/common/policy_issuance.go | 17 +++++++++++++++++ pkg/tests/random/random.go | 34 +++++++++++++++++++++++++++++++++ 5 files changed, 62 insertions(+), 2 deletions(-) diff --git a/pkg/common/embedded_policies.go b/pkg/common/embedded_policies.go index 03d97eea..f02f5655 100644 --- a/pkg/common/embedded_policies.go +++ b/pkg/common/embedded_policies.go @@ -82,7 +82,6 @@ func NewSignedPolicyCertificateRevocationTimestamp( logID []byte, addedTS time.Time, signature []byte, - reason int, ) *SignedPolicyCertificateRevocationTimestamp { return &SignedPolicyCertificateRevocationTimestamp{ SignedEntryTimestamp: *NewSignedEntryTimestamp( diff --git a/pkg/common/json.go b/pkg/common/json.go index 58fa2903..c3613085 100644 --- a/pkg/common/json.go +++ b/pkg/common/json.go @@ -67,6 +67,8 @@ func (*serializableObjectBase) marshalJSON(obj any) (string, []byte, error) { T = "pc" case SignedPolicyCertificateTimestamp: T = "spct" + case PolicyCertificateRevocationSigningRequest: + T = "pcrevsr" case PolicyCertificateRevocation: T = "pcrev" case SignedPolicyCertificateRevocationTimestamp: @@ -193,6 +195,8 @@ func (o *serializableObjectBase) unmarshalTypeObject(T string, data []byte) (boo obj, err = inflateObj[PolicyCertificateSigningRequest](data) case "pc": obj, err = inflateObj[PolicyCertificate](data) + case "pcrevsr": + obj, err = inflateObj[PolicyCertificateRevocationSigningRequest](data) case "pcrev": obj, err = inflateObj[PolicyCertificateRevocation](data) case "spct": diff --git a/pkg/common/json_test.go b/pkg/common/json_test.go index e3e087f8..a05cff58 100644 --- a/pkg/common/json_test.go +++ b/pkg/common/json_test.go @@ -24,9 +24,15 @@ func TestPolicyObjects(t *testing.T) { "rpcValue": { data: *random.RandomPolicyCertificate(t), }, - "rcsr": { + "pcsr": { data: random.RandomPolCertSignRequest(t), }, + "pcrev": { + data: random.RandomPolicyCertificateRevocation(t), + }, + "pcrevsr": { + data: random.RandomPolicyCertificateRevocationSigningRequest(t), + }, "spt": { data: *random.RandomSignedPolicyCertificateTimestamp(t), }, diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 0a6ecd9a..5f7c2971 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -1,6 +1,7 @@ package common import ( + "bytes" "time" ) @@ -51,3 +52,19 @@ func NewPolicyCertificateSigningRequest( func (req *PolicyCertificateSigningRequest) Equal(x *PolicyCertificateSigningRequest) bool { return req.PolicyCertificateFields.Equal(x.PolicyCertificateFields) } + +func NewPolicyCertificateRevocationSigningRequest( + polCertHash []byte, +) *PolicyCertificateRevocationSigningRequest { + return &PolicyCertificateRevocationSigningRequest{ + PolicyCertificateHash: polCertHash, + } +} + +func (req *PolicyCertificateRevocationSigningRequest) Equal( + x *PolicyCertificateRevocationSigningRequest, +) bool { + + return bytes.Equal(req.PolicyCertificateHash, x.PolicyCertificateHash) + +} diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index 7f81a366..e4c59842 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -130,6 +130,18 @@ func RandomSignedPolicyCertificateTimestamp(t tests.T) *common.SignedPolicyCerti ) } +func RandomSignedPolicyCertificateRevocationTimestamp( + t tests.T, +) *common.SignedPolicyCertificateRevocationTimestamp { + + return common.NewSignedPolicyCertificateRevocationTimestamp( + rand.Intn(10), // version + RandomBytesForTest(t, 10), // log id + RandomTimeWithoutMonotonic(), // timestamp + RandomBytesForTest(t, 32), // signature + ) +} + func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest { return common.NewPolicyCertificateSigningRequest( rand.Intn(10), @@ -172,6 +184,28 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { ) } +func RandomPolicyCertificateRevocationSigningRequest(t tests.T) *common.PolicyCertificateRevocationSigningRequest { + return common.NewPolicyCertificateRevocationSigningRequest( + RandomBytesForTest(t, 32), // hash of the pol cert to revoke + ) +} + +func RandomPolicyCertificateRevocation(t tests.T) *common.PolicyCertificateRevocation { + return common.NewPolicyCertificateRevocation( + rand.Intn(10), // version + rand.Intn(1000), // serial number + RandomTimeWithoutMonotonic(), // timestamp + RandomBytesForTest(t, 32), // owner signature + RandomBytesForTest(t, 32), // owner hash + []common.SignedPolicyCertificateRevocationTimestamp{ + *RandomSignedPolicyCertificateRevocationTimestamp(t), + *RandomSignedPolicyCertificateRevocationTimestamp(t), + }, + RandomBytesForTest(t, 32), // issuer signature + RandomBytesForTest(t, 32), // issuer hash + ) +} + // RandomRSAPrivateKey generates a NON-cryptographycally secure RSA private key. func RandomRSAPrivateKey(t tests.T) *rsa.PrivateKey { privateKeyPair, err := rsa.GenerateKey(NewRandReader(), 2048) From bf426672025682fbe38896a30ff76e9260a7a083 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 21 Jul 2023 13:16:30 +0200 Subject: [PATCH 181/187] Rename field. --- pkg/common/policies.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/common/policies.go b/pkg/common/policies.go index c445d569..b6200399 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -222,7 +222,7 @@ func NewPolicyCertificateRevocation( timeStamp time.Time, ownerSignature []byte, ownerHash []byte, - serverTimestamps []SignedPolicyCertificateRevocationTimestamp, + SPCRTs []SignedPolicyCertificateRevocationTimestamp, issuerSignature []byte, issuerHash []byte, ) *PolicyCertificateRevocation { @@ -236,7 +236,7 @@ func NewPolicyCertificateRevocation( ), IssuerSignature: issuerSignature, IssuerHash: issuerHash, - SPCRTs: serverTimestamps, + SPCRTs: SPCRTs, } } From e45deb49f97dc7c73ef2b26b24d16698a3ce3249 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 27 Jul 2023 12:36:01 +0200 Subject: [PATCH 182/187] Owner and Issuer hash are computed the same way. They are SPCT independent, thus always computed over a PolicyCertificate without SPCTs and IssuerSignature. The OwnerSignature is computed over the serialized owned PolicyCertificate, always without SPCTs, IssuerSignature, IssuerHash, OwnerSignature or OwnerHash. Since this step is done at the time of the request creation, no semantic changes are needed. --- pkg/common/crypto/crypto.go | 28 ++++++++----------------- pkg/common/crypto/crypto_test.go | 34 +++++-------------------------- pkg/common/policies.go | 35 +++++++++++++++++++++++++++++++- pkg/pca/pca.go | 2 +- pkg/pca/pca_test.go | 4 ++-- 5 files changed, 50 insertions(+), 53 deletions(-) diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 4bb46375..99c0f582 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -36,14 +36,14 @@ func SignAsOwner( return fmt.Errorf("there exists a non nil owner signature and hash") } // Owner identifier: - ownerHash, err := ComputeHashAsOwner(ownerPolCert) + ownerHash, err := ComputeHashAsSigner(ownerPolCert) if err != nil { return err } req.OwnerHash = ownerHash // Sign using the owner's private key and including the hash of its public key. - ownerSignature, err := signStructRSASHA256(req, ownerKey) + ownerSignature, err := signStructRSASHA256(common.NewPolicyCertificateFromRequest(req), ownerKey) if err != nil { req.OwnerHash = nil return fmt.Errorf("RCSRCreateSignature | SignStructRSASHA256 | %w", err) @@ -62,7 +62,7 @@ func VerifyOwnerSignature( ) error { // Check owner identification. - ownerHash, err := ComputeHashAsOwner(ownerPolCert) + ownerHash, err := ComputeHashAsSigner(ownerPolCert) if err != nil { return err } @@ -81,7 +81,7 @@ func VerifyOwnerSignature( // Serialize request without signature: sig := req.OwnerSignature req.OwnerSignature = nil - serializedStruct, err := common.ToJSON(req) + serializedStruct, err := common.ToJSON(common.NewPolicyCertificateFromRequest(req)) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } @@ -169,7 +169,7 @@ func SignPolicyCertificateAsIssuer( } // Identify the issuer of the child policy certificate with the hash of the modified policy // certificate of the issuer. - issuerHash, err := ComputeHashAsIssuer(issuerPolCert) + issuerHash, err := ComputeHashAsSigner(issuerPolCert) if err != nil { return err } @@ -195,7 +195,7 @@ func VerifyIssuerSignature( ) error { // Check owner identification. - issuerHash, err := ComputeHashAsIssuer(issuerPolCert) + issuerHash, err := ComputeHashAsSigner(issuerPolCert) if err != nil { return err } @@ -242,9 +242,9 @@ func signStructRSASHA256(s any, key *rsa.PrivateKey) ([]byte, error) { return SignBytes(data, key) } -// ComputeHashAsOwner computes the bytes of the policy certificate as being an owner certificate. +// ComputeHashAsSigner computes the bytes of the policy certificate as being an owner certificate. // This means: it serializes it but without SPCTs or issuer signature, and computes its sha256. -func ComputeHashAsOwner(p *common.PolicyCertificate) ([]byte, error) { +func ComputeHashAsSigner(p *common.PolicyCertificate) ([]byte, error) { // Remove SPCTs and issuer signature. SPCTs, issuerSignature := p.SPCTs, p.IssuerSignature p.SPCTs, p.IssuerSignature = nil, nil @@ -255,15 +255,3 @@ func ComputeHashAsOwner(p *common.PolicyCertificate) ([]byte, error) { return common.SHA256Hash(serializedPC), err } - -func ComputeHashAsIssuer(p *common.PolicyCertificate) ([]byte, error) { - // Remove SPCTs. - SPCTs := p.SPCTs - p.SPCTs = nil - - // Serialize and restore previously removed fields. - serializedPC, err := common.ToJSON(p) - p.SPCTs = SPCTs - - return common.SHA256Hash(serializedPC), err -} diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index c73c97de..95fb15c8 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -96,7 +96,7 @@ func TestComputeHashAsOwner(t *testing.T) { require.NotEmpty(t, pc.IssuerSignature) require.NotEmpty(t, pc.IssuerHash) - gotHash, err := crypto.ComputeHashAsOwner(pc) + gotHash, err := crypto.ComputeHashAsSigner(pc) require.NoError(t, err) // Remove SPCTs and issuer signature, and serialize. @@ -110,30 +110,6 @@ func TestComputeHashAsOwner(t *testing.T) { require.Equal(t, expected, gotHash) } -func TestComputeHashAsIssuer(t *testing.T) { - rand.Seed(2) - - // Get random policy certificate and check it contains SPCTs, owner, and issuer fields. - pc := random.RandomPolicyCertificate(t) - require.NotEmpty(t, pc.SPCTs) - require.NotEmpty(t, pc.OwnerSignature) - require.NotEmpty(t, pc.OwnerHash) - require.NotEmpty(t, pc.IssuerSignature) - require.NotEmpty(t, pc.IssuerHash) - - gotHash, err := crypto.ComputeHashAsIssuer(pc) - require.NoError(t, err) - - // Remove SPCTs, and serialize. - pc.SPCTs = nil - serializedPC, err := common.ToJSON(pc) - require.NoError(t, err) - - // Compare with the expected value. - expected := common.SHA256Hash(serializedPC) - require.Equal(t, expected, gotHash) -} - func TestSignAsOwner(t *testing.T) { rand.Seed(11) @@ -168,14 +144,14 @@ func TestSignAsOwner(t *testing.T) { // Manually do the steps to sign, and compare results. 3 stesps. // 1. Check the owner hash is correct. - ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) + ownerHash, err := crypto.ComputeHashAsSigner(ownerCert) require.NoError(t, err) require.Equal(t, ownerHash, request.OwnerHash) // 2. Sign the child request without owner signature. request.OwnerSignature = nil - serializedRequestWoutOwnerSignature, err := common.ToJSON(request) + serializedRequestWoutOwnerSignature, err := common.ToJSON(common.NewPolicyCertificateFromRequest(request)) require.NoError(t, err) - expectedSignature, err := crypto.SignStructRSASHA256(request, ownerKey) + expectedSignature, err := crypto.SignStructRSASHA256(common.NewPolicyCertificateFromRequest(request), ownerKey) require.NoError(t, err) // 3. Compare signatures. require.Equal(t, expectedSignature, gotSignature) @@ -226,7 +202,7 @@ func TestSignPolicyCertificateAsIssuer(t *testing.T) { // Manually do the steps to sign, and compare results. 3 stesps. // 1. Check that the issuer hash is correct. // Check that the issuer hash is correct. - issuerHash, err := crypto.ComputeHashAsIssuer(issuerCert) + issuerHash, err := crypto.ComputeHashAsSigner(issuerCert) require.NoError(t, err) require.Equal(t, issuerHash, childPolCert.IssuerHash) // 2. Sign the child policy certificate without issuer signature. diff --git a/pkg/common/policies.go b/pkg/common/policies.go index b6200399..4f71161d 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -40,6 +40,9 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { // The `OwnerHash` field is the SHA256 of the payload of the owner certificate that contained the // owner signature. The hash is computed on the owner's policy certificate, but without any // SPCTs or issuer signature, but preserving the owner's signature. +// The issuer signature is removed because the party verifying the validity of the signature cannot +// reconstruct the signature that would depend on SPCTs that were in the issuer certificate at the +// time of issuance. We want the hash to be SPCT independent, thus the signature has to be removed. type PolicyCertificateFields struct { PolicyCertificateBase NotBefore time.Time `json:",omitempty"` @@ -57,7 +60,16 @@ type PolicyCertificateFields struct { // PolicyCertificate can be a Root Policy Certificate, or a policy certificate that was issued by // a previously existing policy certificate. // The field `IssuerHash` has semantics analogouys to `OwnerHash`: it is the SHA256 of the issuer -// policy certificate that was used to sign this policy certificate, without SCPTs. +// policy certificate that was used to sign this policy certificate, without SCPTs or issuer +// signature. +// +// We want the hash identifying a certificate as owner or issuer to be SPCT independent because we +// want the signer certificate to "evolve" towards being more trusted, without changing its hash, +// by being logged into CT log servers. By logging a certificate into a CT log server the +// certificate can only remain or improve its trustworthiness, thus any certificate used to sign as +// owner or issuer will always remain at least as trusted as before, and thus there is no need to +// reissue existing issued certificates by that certificate (we want the `OwnerHash` and +// `IssuerHash` to be still valid). type PolicyCertificate struct { PolicyCertificateFields IssuerSignature []byte `json:",omitempty"` @@ -176,6 +188,27 @@ func NewPolicyCertificate( } } +func NewPolicyCertificateFromRequest(req *PolicyCertificateSigningRequest) *PolicyCertificate { + return NewPolicyCertificate( + req.Version, + req.SerialNumberField, + req.DomainField, + req.NotBefore, + req.NotAfter, + req.IsIssuer, + req.PublicKey, + req.PublicKeyAlgorithm, + req.SignatureAlgorithm, + req.TimeStamp, + req.PolicyAttributes, + req.OwnerSignature, + req.OwnerHash, + nil, // SPCTs + nil, // issuer signature + nil, // issuer hash + ) +} + func (c PolicyCertificate) Equal(x PolicyCertificate) bool { return c.PolicyCertificateFields.Equal(x.PolicyCertificateFields) && bytes.Equal(c.IssuerSignature, x.IssuerSignature) && diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index b95a4052..0539d113 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -132,7 +132,7 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( ownerHash, ) // Serialize it including the owner hash. - serializedReq, err := common.ToJSON(req) + serializedReq, err := common.ToJSON(common.NewPolicyCertificateFromRequest(req)) if err != nil { return nil, err } diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index d8c42819..344202d6 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -72,7 +72,7 @@ func TestPCAWorkflow(t *testing.T) { require.NoError(t, err) ownerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/owner_cert.json") require.NoError(t, err) - ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) + ownerHash, err := crypto.ComputeHashAsSigner(ownerCert) require.NoError(t, err) signingFunctionCallTimes := 0 // incremented when the owner is requested to sign @@ -180,7 +180,7 @@ func TestSignAndLogRequest(t *testing.T) { require.NoError(t, err) ownerCert, err := util.PolicyCertificateFromFile("../../tests/testdata/owner_cert.json") require.NoError(t, err) - ownerHash, err := crypto.ComputeHashAsOwner(ownerCert) + ownerHash, err := crypto.ComputeHashAsSigner(ownerCert) require.NoError(t, err) // Let's add the root policy certificate from the owner. From de17b6863ac256bd3e6f90d3af1eac674c7125d6 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 27 Jul 2023 13:33:36 +0200 Subject: [PATCH 183/187] Fix bug in unit test. --- pkg/domain/domain.go | 3 +-- pkg/domain/domain_test.go | 9 ++++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/pkg/domain/domain.go b/pkg/domain/domain.go index 553f7472..a25f0432 100644 --- a/pkg/domain/domain.go +++ b/pkg/domain/domain.go @@ -171,10 +171,9 @@ func removeWildCardAndWWW(domainName string) string { // uniqueValidDomainName: extract valid domain names func uniqueValidDomainName(domainNames []string) []string { - uniqueDomainName := make(map[string]struct{}) + uniqueDomainName := make(map[string]struct{}, len(domainNames)) for _, domainName := range domainNames { if !IsValidDomain(domainName) { - //fmt.Printf(" !!! invalid domain name: \"%s\"\n", domainName) continue } name := removeWildCardAndWWW(domainName) diff --git a/pkg/domain/domain_test.go b/pkg/domain/domain_test.go index bdb3b40a..62668d1c 100644 --- a/pkg/domain/domain_test.go +++ b/pkg/domain/domain_test.go @@ -54,7 +54,7 @@ func TestUniqueValidDomainName(t *testing.T) { }, "3": { input: []string{"com", "*.*.baidu.com", "12378.com"}, - length: 0, + length: 1, }, "4": { input: []string{"video.google.com", "mail.google.com", "audio.google.com"}, @@ -62,10 +62,13 @@ func TestUniqueValidDomainName(t *testing.T) { }, } - for name, v := range test { + for name, tc := range test { + name, tc := name, tc t.Run(name, func(t *testing.T) { t.Parallel() - assert.Equal(t, v.length, len(uniqueValidDomainName(v.input))) + got := uniqueValidDomainName(tc.input) + t.Logf("got %v", got) + assert.Equal(t, tc.length, len(got)) }) } } From b5a6e51317209f5c8fbedc14f61fbb36b7231b87 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 27 Jul 2023 13:39:58 +0200 Subject: [PATCH 184/187] CanOwn and CanIssue. Add a CanOwn field. Only PCerts with CanOwn == 1 can sign as owner. Rename IsIssuer to CanIssue. --- pkg/common/crypto/crypto.go | 57 +++++++++++--------------- pkg/common/crypto/crypto_test.go | 8 +++- pkg/common/policies.go | 23 ++++++++--- pkg/common/policy_issuance.go | 6 ++- pkg/domainowner/domainowner.go | 3 +- pkg/logverifier/logverifier_test.go | 2 +- pkg/mapserver/logfetcher/logfetcher.go | 3 +- pkg/pca/pca.go | 6 ++- pkg/pca/pca_test.go | 6 ++- pkg/pca/testdata/pca_config.json | 2 +- pkg/pca/testdata/rpc.json | 1 - pkg/pca/testdata/server_key.pem | 27 ------------ pkg/tests/random/random.go | 6 ++- tests/testdata/issuer_cert.json | 2 +- tests/testdata/owner_cert.json | 2 +- 15 files changed, 72 insertions(+), 82 deletions(-) delete mode 100644 pkg/pca/testdata/rpc.json delete mode 100644 pkg/pca/testdata/server_key.pem diff --git a/pkg/common/crypto/crypto.go b/pkg/common/crypto/crypto.go index 99c0f582..6f53f80c 100644 --- a/pkg/common/crypto/crypto.go +++ b/pkg/common/crypto/crypto.go @@ -35,6 +35,9 @@ func SignAsOwner( if req.OwnerSignature != nil || req.OwnerHash != nil { return fmt.Errorf("there exists a non nil owner signature and hash") } + if !ownerPolCert.CanOwn { + return fmt.Errorf("the owner certificate cannot sign as owner") + } // Owner identifier: ownerHash, err := ComputeHashAsSigner(ownerPolCert) if err != nil { @@ -61,15 +64,25 @@ func VerifyOwnerSignature( req *common.PolicyCertificateSigningRequest, ) error { + return VerifyOwnerSignatureInPolicyCertificate( + ownerPolCert, + common.NewPolicyCertificateFromRequest(req)) +} + +func VerifyOwnerSignatureInPolicyCertificate( + ownerPolCert *common.PolicyCertificate, + c *common.PolicyCertificate, +) error { + // Check owner identification. ownerHash, err := ComputeHashAsSigner(ownerPolCert) if err != nil { return err } - if subtle.ConstantTimeCompare(req.OwnerHash, ownerHash) != 1 { + if subtle.ConstantTimeCompare(c.OwnerHash, ownerHash) != 1 { // Not equal. return fmt.Errorf("request's owner is identified by %s, but policy certificate is %s", - hex.EncodeToString(req.OwnerHash), hex.EncodeToString(ownerHash)) + hex.EncodeToString(c.OwnerHash), hex.EncodeToString(ownerHash)) } // Reconstruct owner's public key. @@ -78,18 +91,20 @@ func VerifyOwnerSignature( return err } - // Serialize request without signature: - sig := req.OwnerSignature - req.OwnerSignature = nil - serializedStruct, err := common.ToJSON(common.NewPolicyCertificateFromRequest(req)) + // Serialize owned pol cert without SPCTs, issuer signature, issuer hash, and owner signature: + SPCTs, issuerSignature, issuerHash, ownerSignature := + c.SPCTs, c.IssuerSignature, c.IssuerHash, c.OwnerSignature + c.SPCTs, c.IssuerSignature, c.IssuerHash, c.OwnerSignature = nil, nil, nil, nil + serializedStruct, err := common.ToJSON(c) if err != nil { return fmt.Errorf("RCSRVerifySignature | ToJSON | %w", err) } - req.OwnerSignature = sig // restore previous signature + c.SPCTs, c.IssuerSignature, c.IssuerHash, c.OwnerSignature = + SPCTs, issuerSignature, issuerHash, ownerSignature // restore previous values // Hash serialized request and check the signature with the owner's public key. hashOutput := sha256.Sum256(serializedStruct) - err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], req.OwnerSignature) + err = rsa.VerifyPKCS1v15(pubKey, crypto.SHA256, hashOutput[:], c.OwnerSignature) if err != nil { return fmt.Errorf("bad owner signature: %w", err) } @@ -97,29 +112,6 @@ func VerifyOwnerSignature( return nil } -func VerifyOwnerSignatureInPolicyCertificate( - ownerPolCert *common.PolicyCertificate, - c *common.PolicyCertificate, -) error { - - req := common.NewPolicyCertificateSigningRequest( - c.Version, - c.SerialNumberField, - c.DomainField, - c.NotBefore, - c.NotAfter, - c.IsIssuer, - c.PublicKey, - c.PublicKeyAlgorithm, - c.SignatureAlgorithm, - c.TimeStamp, - c.PolicyAttributes, - c.OwnerSignature, - c.OwnerHash, - ) - return VerifyOwnerSignature(ownerPolCert, req) -} - // SignRequestAsIssuer is called by the Policy CA. It signs the request and generates a // PolicyCertificate. The SPTs field is (should be) empty. func SignRequestAsIssuer( @@ -135,7 +127,8 @@ func SignRequestAsIssuer( req.DomainField, req.NotBefore, req.NotAfter, - req.IsIssuer, + req.CanIssue, + req.CanOwn, req.PublicKey, req.PublicKeyAlgorithm, req.SignatureAlgorithm, diff --git a/pkg/common/crypto/crypto_test.go b/pkg/common/crypto/crypto_test.go index 95fb15c8..05ef788c 100644 --- a/pkg/common/crypto/crypto_test.go +++ b/pkg/common/crypto/crypto_test.go @@ -26,9 +26,13 @@ func TestUpdateGoldenFiles(t *testing.T) { t.Log("Updating policy certificate files for tests/testdata") // Obtain a new pair for the root issuer. issuerCert, issuerKey := randomPolCertAndKey(t) + issuerCert.CanIssue = true + issuerCert.CanOwn = true // Objain a new pair for the owner. ownerCert, ownerKey := randomPolCertAndKey(t) + ownerCert.CanIssue = false + ownerCert.CanOwn = true // The owner will be issued by the root issuer. err := crypto.SignPolicyCertificateAsIssuer(issuerCert, issuerKey, ownerCert) require.NoError(t, err) @@ -123,7 +127,7 @@ func TestSignAsOwner(t *testing.T) { request := random.RandomPolCertSignRequest(t) require.NotEmpty(t, request.OwnerSignature) require.NotEmpty(t, request.OwnerHash) - request.IsIssuer = true + request.CanIssue = true // Sign as owner. err = crypto.SignAsOwner(ownerCert, ownerKey, request) @@ -137,7 +141,7 @@ func TestSignAsOwner(t *testing.T) { request.OwnerSignature = nil // It should not fail now: err = crypto.SignAsOwner(ownerCert, ownerKey, request) - require.NoError(t, err, "RCSR sign signature error") + require.NoError(t, err) require.NotEmpty(t, request.OwnerSignature) require.NotEmpty(t, request.OwnerHash) gotSignature := request.OwnerSignature diff --git a/pkg/common/policies.go b/pkg/common/policies.go index 4f71161d..027c6e4c 100644 --- a/pkg/common/policies.go +++ b/pkg/common/policies.go @@ -43,11 +43,15 @@ func (p PolicyCertificateBase) Equal(x PolicyCertificateBase) bool { // The issuer signature is removed because the party verifying the validity of the signature cannot // reconstruct the signature that would depend on SPCTs that were in the issuer certificate at the // time of issuance. We want the hash to be SPCT independent, thus the signature has to be removed. +// +// Any domain with CanOwn can sign for its domain or subdomains. E.g. a.fpki.com can be the owner of +// a.fpki.com and b.a.fpki.com, as long as it has CanOwn == 1. type PolicyCertificateFields struct { PolicyCertificateBase NotBefore time.Time `json:",omitempty"` NotAfter time.Time `json:",omitempty"` - IsIssuer bool `json:",omitempty"` + CanIssue bool `json:",omitempty"` + CanOwn bool `json:",omitempty"` PublicKey []byte `json:",omitempty"` PublicKeyAlgorithm PublicKeyAlgorithm `json:",omitempty"` SignatureAlgorithm SignatureAlgorithm `json:",omitempty"` @@ -104,7 +108,8 @@ func NewPolicyCertificateFields( domain string, notBefore time.Time, notAfter time.Time, - isIssuer bool, + canIssue bool, + canOwn bool, publicKey []byte, publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, @@ -123,7 +128,8 @@ func NewPolicyCertificateFields( }, NotBefore: notBefore, NotAfter: notAfter, - IsIssuer: isIssuer, + CanIssue: canIssue, + CanOwn: canOwn, PublicKey: publicKey, PublicKeyAlgorithm: publicKeyAlgorithm, SignatureAlgorithm: signatureAlgorithm, @@ -140,6 +146,8 @@ func (c PolicyCertificateFields) Equal(x PolicyCertificateFields) bool { bytes.Equal(c.PublicKey, x.PublicKey) && c.NotBefore.Equal(x.NotBefore) && c.NotAfter.Equal(x.NotAfter) && + c.CanIssue == x.CanIssue && + c.CanOwn == x.CanOwn && c.SignatureAlgorithm == x.SignatureAlgorithm && c.TimeStamp.Equal(x.TimeStamp) && bytes.Equal(c.OwnerSignature, x.OwnerSignature) && @@ -153,7 +161,8 @@ func NewPolicyCertificate( domain string, notBefore time.Time, notAfter time.Time, - isIssuer bool, + canIssue bool, + canOwn bool, publicKey []byte, publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, @@ -173,7 +182,8 @@ func NewPolicyCertificate( domain, notBefore, notAfter, - isIssuer, + canIssue, + canOwn, publicKey, publicKeyAlgorithm, signatureAlgorithm, @@ -195,7 +205,8 @@ func NewPolicyCertificateFromRequest(req *PolicyCertificateSigningRequest) *Poli req.DomainField, req.NotBefore, req.NotAfter, - req.IsIssuer, + req.CanIssue, + req.CanOwn, req.PublicKey, req.PublicKeyAlgorithm, req.SignatureAlgorithm, diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 5f7c2971..9431f8f3 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -20,7 +20,8 @@ func NewPolicyCertificateSigningRequest( domain string, notBefore time.Time, notAfter time.Time, - isIssuer bool, + canIssue bool, + canOwn bool, publicKey []byte, publicKeyAlgorithm PublicKeyAlgorithm, signatureAlgorithm SignatureAlgorithm, @@ -37,7 +38,8 @@ func NewPolicyCertificateSigningRequest( domain, notBefore, notAfter, - isIssuer, + canIssue, + canOwn, publicKey, publicKeyAlgorithm, signatureAlgorithm, diff --git a/pkg/domainowner/domainowner.go b/pkg/domainowner/domainowner.go index 36555791..bcbf1ffc 100644 --- a/pkg/domainowner/domainowner.go +++ b/pkg/domainowner/domainowner.go @@ -57,7 +57,8 @@ func (do *DomainOwner) GeneratePolCertSignRequest( domainName, // domain time.Now(), // not before time.Now().Add(time.Microsecond), // not after - false, // is issuer + false, // can issue + false, // can own pubKeyBytes, // public key common.RSA, common.SHA256, diff --git a/pkg/logverifier/logverifier_test.go b/pkg/logverifier/logverifier_test.go index 06256acb..e90bc2f0 100644 --- a/pkg/logverifier/logverifier_test.go +++ b/pkg/logverifier/logverifier_test.go @@ -50,7 +50,7 @@ func TestVerifyInclusionByHash(t *testing.T) { // Create a mock STH with the correct root hash to pass the test. sth := &types.LogRootV1{ TreeSize: 2, - RootHash: tests.MustDecodeBase64(t, "7+1ODWJbmPz206K4n/kabPoCxAiyJ2e+jSe9rH5uYFk="), + RootHash: tests.MustDecodeBase64(t, "H6O9iWPoLSfuWISEZMKDsFf7j55JwHJw68Z7lNPSwGw="), TimestampNanos: 1661986742112252000, Revision: 0, Metadata: []byte{}, diff --git a/pkg/mapserver/logfetcher/logfetcher.go b/pkg/mapserver/logfetcher/logfetcher.go index 57baad60..f887d131 100644 --- a/pkg/mapserver/logfetcher/logfetcher.go +++ b/pkg/mapserver/logfetcher/logfetcher.go @@ -337,7 +337,8 @@ func GetPCAndRPCs( domainName, time.Now(), // not before time.Now().Add(time.Microsecond), // not after - false, // is issuer + false, // can issue + false, // can own generateRandomBytes(), // public key common.RSA, common.SHA256, diff --git a/pkg/pca/pca.go b/pkg/pca/pca.go index 0539d113..5fbf8c9a 100644 --- a/pkg/pca/pca.go +++ b/pkg/pca/pca.go @@ -94,7 +94,8 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( domain string, notBefore time.Time, notAfter time.Time, - isIssuer bool, + canIssue bool, + canOwn bool, publicKey []byte, publicKeyAlgorithm common.PublicKeyAlgorithm, signatureAlgorithm common.SignatureAlgorithm, @@ -122,7 +123,8 @@ func (pca *PCA) NewPolicyCertificateSigningRequest( domain, notBefore, notAfter, - isIssuer, + canIssue, + canOwn, publicKey, publicKeyAlgorithm, signatureAlgorithm, diff --git a/pkg/pca/pca_test.go b/pkg/pca/pca_test.go index 344202d6..770a908c 100644 --- a/pkg/pca/pca_test.go +++ b/pkg/pca/pca_test.go @@ -86,7 +86,8 @@ func TestPCAWorkflow(t *testing.T) { "fpki.com", notBefore, notAfter, - true, + true, // can issue + true, // can own ownerCert.PublicKey, // public key common.RSA, common.SHA256, @@ -191,7 +192,8 @@ func TestSignAndLogRequest(t *testing.T) { "fpki.com", pca.RootPolicyCert.NotBefore, pca.RootPolicyCert.NotAfter, - true, + true, // can issue + true, // can own ownerCert.PublicKey, // public key common.RSA, common.SHA256, diff --git a/pkg/pca/testdata/pca_config.json b/pkg/pca/testdata/pca_config.json index 377469b1..c3ec5def 100644 --- a/pkg/pca/testdata/pca_config.json +++ b/pkg/pca/testdata/pca_config.json @@ -13,5 +13,5 @@ } ], "KeyPEM": "LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBcGZtYTZZOVExWkJ1bjZjdWNlNEw0RkI0QXdnWWVuVzNzYm9oa2VxTU01Y0hxdmQzCjBsTk5QWm9ScURVd2dVTUlOVEhDU0FXNXpaRXZjVXpuUDRSNEMzbE1TWHVRMFYzNVpIazRHV3F1MnNtQUFkbUkKT3dBWUhyYlBoVjJaVEpxb2lqRlN5UTcySlN1SWtJcDE2ekdrRitBMzZMOGlyTzJjWVRRVTBOdmtYVVhJK3E2KwptSS9GTmlneWZjQ1huZy8zKy9VUHFZQS9UYnpMRDBGbGRBWi9QSmdMZ1NnS1BtRllraEljN2RaelBBcnJSUFRFCmNtMWFaYlltOTRvUkpPVjZRbkpuUGxWaGIrQ1NTYmZQQTVvNGtaNnkrUTlhcDJnTVNnUmd5amdVN1lHRExXRWoKTGtORlhOWVhORXlCWW9LSmZlTEt3d2ZGT0l4YjFoajVmbXluL3dJREFRQUJBb0lCQUZoaUVkYzhCU3lreS9QaQpuLzMxYUllYjhqeVREQTN5TDg3SnNtUzBoTkpZSTdJc003ZGh1cWtLS1Vyc2dCYmxiako5ZTlyRWljdm8rMTJYCk9URit4am85N3B6VzJ5aFNwYUtXVm9SYVppQ0YycytoV2tVbzZLODZaRW05clF1NDgvVWJETjJhUlFOSUttSG0KQ2FNai9TRGx1b0FMZ0ZpYjg0RmpyWTRHK3BXYjNzUEZKQjBiZElXNjBIb29DMEM5dVNpNmxpOE41MWJoSkdPLwo5VUxha0xpSmh2TmpHd2VqYjI0cFQ1YUx1dG9vVVZJSmdBQjlsVUY3VDdya3ZoWXhMMDNObWdXZGM4TGJZTTVLCkZuVTFKMytWbExOc3piNFA2eDRlNmViWldHSExJWTRkMWwwV0FlRHJibXViVFhtU3gvR0UrUjJ2UmY5VFdqNjUKbk1GNHMrRUNnWUVBM0ZMcUVycUNoYlh4aGczVnBWNmFMNU96RFc5M1B0dGREUTBFeDJMdkM4cVRPNE5MdzBQVgpiTnZoKytCVTNRbzZSSU42d0QvVElha0d3azg4bVEzVFRMNS9Xeit3WlMxenNlQkJkOWp1cmZJWGhTdmpzZGQ2Ci85RkpPckNDSkFpeUlTbnBkSVpSWUU2b01Yb1ZFZlBTVzJDcFR6Szl2OC9qNWhWV0dDN1E3REVDZ1lFQXdObkUKYzcvVk5UajJPdWdWVmJtK0JldEhPM0JEK2RUdjNDTGdnMjdnYkdyU0puWnM4YXRUM05wZVRPQTFHclhiK3kxSgpmOW42Z0ZhRFJOV3c0UnJlYUdsd2xHcjZjckFudzdJemozVHY0MWp1V1pnVWhhMG1yTHFlTEhLRTdIM1M2bjdFCktsNTRZL3MxSUt6Q0RZRkdQTnN3LzR1dCs5TUc0cFRKUjh0Q095OENnWUJ1VWxaQzZvRlEwcm9ObGYvVXZub1cKT0s3L20wRHFpSTBmYTlWb3dYRlJSaTVUTG50Uld6WVRQRWI4Q0doMkJoa0hGWTd4bFFQelp0K3JqR0diZ2dDNwovT2RvbVl1S0hpaEFlVHBPK0tFTEdKNE9ZTnV5d1dNbGxYWkZuUHlOYm8xRGJla29IOW0vajZOSW93ako1SDBBCnBIWWk0aFdCWm1lSFhrZXQ5RU5KQVFLQmdDNk81SHpGWFdYaUFLQXNFNnFFa3ptK21ZM2lCcWMwSDB4WUp6ZlQKOXQ1MTA1SnhtTStuZXpHZ2pvK2t1VzFmWm1KM2huMFZWUUxTNEJJb1BQRzdtT0pBUW11eWdCQmNNdDF1RWtDYQpES2dvZWpLcklwazdPbEVOSk02NlB4a1JMM0JwZGxaOFJEaHZGMTV5RnM5SDNIc290K1dhQlVEOHEzYmNVTlBDCllKVUZBb0dCQU04enY0YUpYd2ZSZmp4azd1cDRqQk9rT2szaWY3WVZjOGUrWDhwdjRjVGpyRlJ0aktFOThmaGIKbkZIUzE4NFNJZkozSmltd0Q0ZmxhSDhFOHZ0dndPclpFSXk3azRoMWlIeE9pLzh5eUxuZGRiNEh5YmZqUHdDZwpZVXV4R29ERzRFanF5c2lKZE5IRy9EOWcySi9HU0xuMDhmTUMvYVBBV0NpdXpnZFdIUHovCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==", - "CertJSON": "eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo0LCJTZXJpYWxOdW1iZXIiOjUxNCwiRG9tYWluIjoiZnBraS5jb20iLCJOb3RCZWZvcmUiOiIxOTcwLTAxLTAxVDAxOjAwOjAxKzAxOjAwIiwiTm90QWZ0ZXIiOiIxOTcwLTAxLTAxVDAzOjQ2OjQwKzAxOjAwIiwiSXNJc3N1ZXIiOnRydWUsIlB1YmxpY0tleSI6Ik1JSUJJakFOQmdrcWhraUc5dzBCQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBcGZtYTZZOVExWkJ1bjZjdWNlNEw0RkI0QXdnWWVuVzNzYm9oa2VxTU01Y0hxdmQzMGxOTlBab1JxRFV3Z1VNSU5USENTQVc1elpFdmNVem5QNFI0QzNsTVNYdVEwVjM1WkhrNEdXcXUyc21BQWRtSU93QVlIcmJQaFYyWlRKcW9pakZTeVE3MkpTdUlrSXAxNnpHa0YrQTM2TDhpck8yY1lUUVUwTnZrWFVYSStxNittSS9GTmlneWZjQ1huZy8zKy9VUHFZQS9UYnpMRDBGbGRBWi9QSmdMZ1NnS1BtRllraEljN2RaelBBcnJSUFRFY20xYVpiWW05NG9SSk9WNlFuSm5QbFZoYitDU1NiZlBBNW80a1o2eStROWFwMmdNU2dSZ3lqZ1U3WUdETFdFakxrTkZYTllYTkV5QllvS0pmZUxLd3dmRk9JeGIxaGo1Zm15bi93SURBUUFCIiwiVGltZVN0YW1wIjoiMTk5NC0xMi0wM1QwMjowMDo0MVoiLCJQb2xpY3lBdHRyaWJ1dGVzIjp7fSwiU1BDVHMiOlt7IlZlcnNpb24iOjgsIkxvZ0lEIjoibkp5aTc4M1MwUzBxZVE9PSIsIkFkZGVkVFMiOiIyMDEwLTA2LTIzVDA3OjI0OjM2WiIsIlNpZ25hdHVyZSI6IjBIU29LQXJwL2h3TXVXclRJdFlpZ2lsZnYrRWVKcVF6QjIyMXdVUk1PalE9In0seyJMb2dJRCI6IjB5cjM3VHVNL3BCUGt3PT0iLCJBZGRlZFRTIjoiMjA1MC0wNS0wN1QxNjoxOTowMFoiLCJTaWduYXR1cmUiOiIrUEJ0S2J6WmhrUWRKWmtHMXByTmlVdVdpdW53NjUyV1hPYWthVHhPdm9nPSJ9XX19" + "CertJSON": "eyJUIjoiKnBjIiwiTyI6eyJWZXJzaW9uIjo0LCJTZXJpYWxOdW1iZXIiOjUxNCwiRG9tYWluIjoiZnBraS5jb20iLCJOb3RCZWZvcmUiOiIxOTcwLTAxLTAxVDAxOjAwOjAxKzAxOjAwIiwiTm90QWZ0ZXIiOiIxOTcwLTAxLTAxVDAzOjQ2OjQwKzAxOjAwIiwiQ2FuSXNzdWUiOnRydWUsIkNhbk93biI6dHJ1ZSwiUHVibGljS2V5IjoiTUlJQklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUFwZm1hNlk5UTFaQnVuNmN1Y2U0TDRGQjRBd2dZZW5XM3Nib2hrZXFNTTVjSHF2ZDMwbE5OUFpvUnFEVXdnVU1JTlRIQ1NBVzV6WkV2Y1V6blA0UjRDM2xNU1h1UTBWMzVaSGs0R1dxdTJzbUFBZG1JT3dBWUhyYlBoVjJaVEpxb2lqRlN5UTcySlN1SWtJcDE2ekdrRitBMzZMOGlyTzJjWVRRVTBOdmtYVVhJK3E2K21JL0ZOaWd5ZmNDWG5nLzMrL1VQcVlBL1RiekxEMEZsZEFaL1BKZ0xnU2dLUG1GWWtoSWM3ZFp6UEFyclJQVEVjbTFhWmJZbTk0b1JKT1Y2UW5KblBsVmhiK0NTU2JmUEE1bzRrWjZ5K1E5YXAyZ01TZ1JneWpnVTdZR0RMV0VqTGtORlhOWVhORXlCWW9LSmZlTEt3d2ZGT0l4YjFoajVmbXluL3dJREFRQUIiLCJUaW1lU3RhbXAiOiIxOTk0LTEyLTAzVDAyOjAwOjQxWiIsIlBvbGljeUF0dHJpYnV0ZXMiOnt9LCJTUENUcyI6W3siVmVyc2lvbiI6OCwiTG9nSUQiOiJuSnlpNzgzUzBTMHFlUT09IiwiQWRkZWRUUyI6IjIwMTAtMDYtMjNUMDc6MjQ6MzZaIiwiU2lnbmF0dXJlIjoiMEhTb0tBcnAvaHdNdVdyVEl0WWlnaWxmditFZUpxUXpCMjIxd1VSTU9qUT0ifSx7IkxvZ0lEIjoiMHlyMzdUdU0vcEJQa3c9PSIsIkFkZGVkVFMiOiIyMDUwLTA1LTA3VDE2OjE5OjAwWiIsIlNpZ25hdHVyZSI6IitQQnRLYnpaaGtRZEpaa0cxcHJOaVV1V2l1bnc2NTJXWE9ha2FUeE92b2c9In1dfX0=" } \ No newline at end of file diff --git a/pkg/pca/testdata/rpc.json b/pkg/pca/testdata/rpc.json deleted file mode 100644 index dc4b1b32..00000000 --- a/pkg/pca/testdata/rpc.json +++ /dev/null @@ -1 +0,0 @@ -{"T":"*pc","O":{"Issuer":"pca root policy certificate","Subject":"pca root policy certificate","SerialNumber":13,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:10+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFASKM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlpffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiKlcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQAB","TimeStamp":"1970-01-01T01:00:01+01:00","PolicyAttributes":{"TrustedCA":["pca"],"AllowedSubdomains":[""]},"IssuerSignature":"p4dASSEofyVNzuW/X7Qnwf/7D9LNjWDAZgGTDBQX646S2X9zvAFMbbEYGBtdxncpSk2TN/p0ZuvjS+Zn2B9X5hOgwVAFNys1vGyKI4zHVWmBEirc3EQKiipXmPSjRjY1yMwOOYXvmy4XPiLSMAW+TRtkGZxY1vK9+9yUifMzu4gsWOq93+E1tZHF9AHEVDvVGgUaoFITL75FhIHaPY04NXqYeIfCri3DdvTsAXTQbQTNO9nd06jgEhvzDHhz+d93DiHpFyXwHFnk6W37V0KveewIvZ3i16+umUKz+G2uYNdjNz3wq9DRRr8M8NKB0z1JR9TBHLXFf+nxbinheAAAQQ=="}} \ No newline at end of file diff --git a/pkg/pca/testdata/server_key.pem b/pkg/pca/testdata/server_key.pem deleted file mode 100644 index 87562ab8..00000000 --- a/pkg/pca/testdata/server_key.pem +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEArrrQ5MN4mdcp5XouqmcmPG489eRtbkIn9elKOCDLgpA9OFAS -KM26Vskm0jwR9unrVE8NXXdRbotQfVpL7iAPGOPfoSglBXKmiAdmRG0idw6+xRlp -ffgHE3CDhNnz1tpVXBTE+U84f48v+sVd1gnK4oA/uT7X7D6vO5cHK1M9rmpo+SiK -lcYSHvF19/qgiwF9cc1z3ug6M4SciqEbUNdW1R3BSW+9ulTZluT4Hbml4C8hkktN -9zlHUpWdHzH1NlcRqzObBp7ZvB/OrKh8iA0WBXLXNzlBdB9EXSHjqJcI/sKn0Zf/ -5RO9QYT8wjDDbj8H+4+/wRd2q8Y10yQomIy6WQIDAQABAoIBAHOQ2C0WJCQMLYe7 -ojXOd3msp+EyrjYox2lcXVx2RGQcpoEiZIBcYYyZCnYuiOs4mA9xIbPaASAOQLRI -DsR2q8WYogZjGOlbhU2fJ0LGs+7u77pJLn7El1mCJ1qtFRvb2G1Ix2blwMaNm/xO -odu5KllY3czWBdYZN6lAINvE+JzsZ9/OLSEO7zvbEThfe2FQO8u4oz0/7EilPU+F -pBH4gXSYi2He8Ngasyg5K5CpZp6WcQgL/iP4F+xNkDxo6UNQ0KJyfNelOnZvX35M -JJyQa6ewnhfBKrmNIw0/RzRIdOm3NoNlpkwlepRxYyeNoZfBKE96yF6+IIVkhZkv -lIRFzKkCgYEA5EOmHHwK4nXRCau2KfE0JGE68Fo+x6m5UImpuuO9Vik/Ca4icJc4 -3kIJcVm8emzwXmKgjp3ZU6OJ0c+H0vNMsIN8BI9DCmMf2A9dAsET7H5na1fvloRK -hu11Rps+jVpAIPG4SHlJAqbCJXl5uuwHPMciOHlkvFvsg/hLE7g8CbcCgYEAw/Xu -dNyIf9vnhMOEPMPOwkAQxxJBqKIwRwC5df8/SNLumrlZf7KmjUk7jwXq3Y/3LC34 -yaxICIqS6F5jJWQE3RcdYBha1zprc8yakCIPkXLABu/jTX0IG0SYHfthtn0Xu9v2 -kwqg1usPDc1n09TBWuqz/9LaAb3TOk7bmWgvnG8CgYEAhe9DqncGE/a5XwlH67hB -tg7u/FSXfaQSvjToSGaUk9fDyzRHtPSmQ5NP4xiaRONi7p7EQqpP+Il+toMk2Rw1 -JWUhAbPlzFR5PNdW5eTQgfVxTUTdqd2ZtS3joDg7LcmE00/Bp5SyyEF8rWxnhswz -OvBFHgnsl7EASDcKJln1sGUCgYBus50tOXSHPpSQ5N7scPV5uTWBMGgVOuKpCdi7 -tyFqI3cVw0WzJRp/0HIr/vzKg8zvkVztmTzsTlRlnwTFl7ZDrA41Piz7T45qUZiz -efpvgXLHuHchTFPrC5ofveZacFqbDs1CDgwEva+4bWSNP7CnitV63QO5XiJY2zml -dT0IUwKBgHKA1JAcslRM7/2ctyIC7ocifVdOZHUDBpYNFzDAAIWFTMOGEf1CPXQl -RWBtTicGNC2Rs+UQfTcS9S72qcElJQk+P11L8KoFoqrYZYD8Ff0Gmo456GXTCZf6 -JGyRL6+owmzb2d4jJVBytNrxG2/yGwcYFIKR/fB5p0O3fzPLzG4b ------END RSA PRIVATE KEY----- diff --git a/pkg/tests/random/random.go b/pkg/tests/random/random.go index e4c59842..c1e2dd4c 100644 --- a/pkg/tests/random/random.go +++ b/pkg/tests/random/random.go @@ -149,7 +149,8 @@ func RandomPolCertSignRequest(t tests.T) *common.PolicyCertificateSigningRequest "domain", // domain RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), - true, + true, // can issue + true, // can own RandomBytesForTest(t, 32), // public key common.RSA, common.SHA256, @@ -167,7 +168,8 @@ func RandomPolicyCertificate(t tests.T) *common.PolicyCertificate { "fpki.com", RandomTimeWithoutMonotonic(), RandomTimeWithoutMonotonic(), - true, + true, // can issue + true, // can own RandomBytesForTest(t, 32), // public key common.RSA, common.SHA256, diff --git a/tests/testdata/issuer_cert.json b/tests/testdata/issuer_cert.json index 6ec65160..3d6ba344 100644 --- a/tests/testdata/issuer_cert.json +++ b/tests/testdata/issuer_cert.json @@ -1 +1 @@ -{"T":"*pc","O":{"Version":4,"SerialNumber":514,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB","TimeStamp":"1994-12-03T02:00:41Z","PolicyAttributes":{},"SPCTs":[{"Version":8,"LogID":"nJyi783S0S0qeQ==","AddedTS":"2010-06-23T07:24:36Z","Signature":"0HSoKArp/hwMuWrTItYigilfv+EeJqQzB221wURMOjQ="},{"LogID":"0yr37TuM/pBPkw==","AddedTS":"2050-05-07T16:19:00Z","Signature":"+PBtKbzZhkQdJZkG1prNiUuWiunw652WXOakaTxOvog="}]}} \ No newline at end of file +{"T":"*pc","O":{"Version":4,"SerialNumber":514,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","CanIssue":true,"CanOwn":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApfma6Y9Q1ZBun6cuce4L4FB4AwgYenW3sbohkeqMM5cHqvd30lNNPZoRqDUwgUMINTHCSAW5zZEvcUznP4R4C3lMSXuQ0V35ZHk4GWqu2smAAdmIOwAYHrbPhV2ZTJqoijFSyQ72JSuIkIp16zGkF+A36L8irO2cYTQU0NvkXUXI+q6+mI/FNigyfcCXng/3+/UPqYA/TbzLD0FldAZ/PJgLgSgKPmFYkhIc7dZzPArrRPTEcm1aZbYm94oRJOV6QnJnPlVhb+CSSbfPA5o4kZ6y+Q9ap2gMSgRgyjgU7YGDLWEjLkNFXNYXNEyBYoKJfeLKwwfFOIxb1hj5fmyn/wIDAQAB","TimeStamp":"1994-12-03T02:00:41Z","PolicyAttributes":{},"SPCTs":[{"Version":8,"LogID":"nJyi783S0S0qeQ==","AddedTS":"2010-06-23T07:24:36Z","Signature":"0HSoKArp/hwMuWrTItYigilfv+EeJqQzB221wURMOjQ="},{"LogID":"0yr37TuM/pBPkw==","AddedTS":"2050-05-07T16:19:00Z","Signature":"+PBtKbzZhkQdJZkG1prNiUuWiunw652WXOakaTxOvog="}]}} \ No newline at end of file diff --git a/tests/testdata/owner_cert.json b/tests/testdata/owner_cert.json index d923bcbc..6f6c2877 100644 --- a/tests/testdata/owner_cert.json +++ b/tests/testdata/owner_cert.json @@ -1 +1 @@ -{"T":"*pc","O":{"Version":9,"SerialNumber":40,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","IsIssuer":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnAkKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSNZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4FgIVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MViluBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQAB","TimeStamp":"1926-07-30T02:03:38Z","PolicyAttributes":{},"IssuerSignature":"QQt1BhHqDY7iyDRa4f65jJyPk70fDYFHW29t5DClhyNtvPHHTeNlpGghGixdvXR6iVfw+sn5ev5zf7w+pl6BprlZ7gTJj6G/L3/c/Ksk3kR2e/XfXelAGZ/fNWRFs7/AmORMUgZUFsngJIiholtB4NPsyuNWEj01Kbc48kUHcJFzVeWkjwc5mo9tTGh3v8Mrn+xzzX5ONr/JA8XuJlfLBT4WFel3wn8zTfeUdNaW0k+yFVx6Ajg/E1HrXvSQvbr5mQlLZ7uAziSbddrXcUKdZrbkM/aj/OfyGnKao4SGFPGnO1pKbgGW2AS9haq2zhVwSe+ecoSfltdbSAPK4Y2eVA==","IssuerHash":"jdUREDKTlHexuImHcB5NdzLubV1OglH/VdfbczHIiK8=","SPCTs":[{"Version":5,"LogID":"am09KiLqSxA07w==","AddedTS":"2077-01-09T19:27:28Z","Signature":"gOpvrqDfBQiaUWwlu2kw60dVdKW/LNFulHLZLnq6uGk="},{"Version":2,"LogID":"t6oQ1KNjhiBtow==","AddedTS":"2068-05-20T17:04:57Z","Signature":"VaDJqqnu7JAE1h96qQY7n/j61f6qUQczBt57EHjq6HM="}]}} \ No newline at end of file +{"T":"*pc","O":{"Version":9,"SerialNumber":40,"Domain":"fpki.com","NotBefore":"1970-01-01T01:00:01+01:00","NotAfter":"1970-01-01T03:46:40+01:00","CanOwn":true,"PublicKey":"MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5mtGaefk71aGYEzcI8vjbE2puxssY4Lf1V7t0H0Ji4CSmTnAkKEse/n2vo/klQma1GxORG6DpVm6ggdhoNBZR5Od+i67RB9qFj35SBHlh9uwUnSNZsIl/iOCM8ubD/sk9cMrEHUgTB4bgLNl+imypClt4zjaa48UWwvozOYaVUV/r4FgIVV6i+GHiSUG6yDkMzsh0X8TJPI6Zyjcr14YPvOy0y+Tnqv77u1n0MRySLqFmTrI0BTo5HK7H6pP5pFp9ETTgOgnu2rWDm4nMheeyepfoQbmFjS7USFm4GI4J6uE6MViluBSM1w2h6ItSLQS3ERsGe0PUATDWETpHfBRHQIDAQAB","TimeStamp":"1926-07-30T02:03:38Z","PolicyAttributes":{},"IssuerSignature":"evJb81LPTEwZBMAh0YmbPP2w/PwXk9O9A7LObpwhyiOpu/4NH//ZTSX0jpWP+Yq/VKK1jtxI1HZTzlsn0S94kCkSwDxpZW/DAkrCfpllhBUuO8uUdKWuMZQvfAfj82OSG58fiUw5uL6OaxW03x14/UAEZGwJ9Vqwejlhg3CxrluCMvDbB7Lm+6bIKNEyTC2Kxwair0XizD77GwSXERC53A8MTvZyFY5Y4AgZxcoFmKatoYc+aukv388kSx6Smz/kyqWiu9CoYkYFD4R/tugfU04rtQkCajSwBZYpN2jDxnwVSMHOnLz9UCU03zGRikOx7DS9PQFCGOkL+Pt2eabQUQ==","IssuerHash":"BJO0H8JUg+CHVReXhfJMU8Gc4q3MWJlwITS759t/UtE=","SPCTs":[{"Version":5,"LogID":"am09KiLqSxA07w==","AddedTS":"2077-01-09T19:27:28Z","Signature":"gOpvrqDfBQiaUWwlu2kw60dVdKW/LNFulHLZLnq6uGk="},{"Version":2,"LogID":"t6oQ1KNjhiBtow==","AddedTS":"2068-05-20T17:04:57Z","Signature":"VaDJqqnu7JAE1h96qQY7n/j61f6qUQczBt57EHjq6HM="}]}} \ No newline at end of file From 73a757e955be9eef12ea4961ae26968d800faa59 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Thu, 27 Jul 2023 13:44:27 +0200 Subject: [PATCH 185/187] Document how the hash of the revoked pol cert is computed. --- pkg/common/policy_issuance.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/pkg/common/policy_issuance.go b/pkg/common/policy_issuance.go index 9431f8f3..bb980086 100644 --- a/pkg/common/policy_issuance.go +++ b/pkg/common/policy_issuance.go @@ -10,6 +10,9 @@ type PolicyCertificateSigningRequest struct { PolicyCertificateFields } +// PolicyCertificateRevocationSigningRequest is a request to prepare a revocation. +// The hash of the certificate intended to be revoked must be computed without any SPCT and +// issuer signature (i.e. SPCT independent). type PolicyCertificateRevocationSigningRequest struct { PolicyCertificateHash []byte `json:",omitempty"` // Hash of the pol. cert. to revoke } From 7e3bfe34ae78ee59e5b05a7232e349fe2567a255 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Fri, 4 Aug 2023 17:59:12 +0200 Subject: [PATCH 186/187] Reconcile go modules after rebase. --- go.mod | 5 ++--- go.sum | 8 +------- 2 files changed, 3 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 9205012c..33120345 100644 --- a/go.mod +++ b/go.mod @@ -8,13 +8,12 @@ require ( github.com/google/certificate-transparency-go v1.1.3 github.com/google/trillian v1.4.1 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 - github.com/hashicorp/golang-lru v0.5.1 github.com/minio/sha256-simd v1.0.0 github.com/stretchr/testify v1.7.4 github.com/transparency-dev/merkle v0.0.1 go.uber.org/atomic v1.9.0 - golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 - golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/net v0.0.0-20220722155237-a158d28d115b + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 google.golang.org/grpc v1.47.0 google.golang.org/protobuf v1.28.0 ) diff --git a/go.sum b/go.sum index 1a34f551..b8ec6928 100644 --- a/go.sum +++ b/go.sum @@ -458,7 +458,6 @@ github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= @@ -806,7 +805,6 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= @@ -1026,9 +1024,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220421235706-1d1ef9303861/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= -golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1158,13 +1155,10 @@ golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= From d49b6b5847e74b32c7195b2c678866dce7972359 Mon Sep 17 00:00:00 2001 From: "Juan A. Garcia Pardo" Date: Mon, 9 Oct 2023 17:20:46 +0200 Subject: [PATCH 187/187] Cleanup create_schema.sh, remove redundant definition. --- tools/create_schema.sh | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/tools/create_schema.sh b/tools/create_schema.sh index c53dbea9..36f83374 100755 --- a/tools/create_schema.sh +++ b/tools/create_schema.sh @@ -2,14 +2,13 @@ create_new_db() { - set -e - - DBNAME=$1 - MYSQLCMD="mysql -u root" +set -e +DBNAME=$1 MYSQLCMD="mysql -u root" + CMD=$(cat <