diff --git a/api/identityfile/identityfile.go b/api/identityfile/identityfile.go index d69d045830684..00c921af849fb 100644 --- a/api/identityfile/identityfile.go +++ b/api/identityfile/identityfile.go @@ -36,6 +36,14 @@ import ( const ( // FilePermissions defines file permissions for identity files. + // + // Specifically, for postgres, this must be 0600 or 0640 (choosing 0600 as it's more restrictive) + // https://www.postgresql.org/docs/current/libpq-ssl.html + // On Unix systems, the permissions on the private key file must disallow any access to world or group; + // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. + // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). + // + // Other services should accept 0600 as well, if not, we must change the Write function (in `lib/client/identityfile/identity.go`) FilePermissions = 0600 ) diff --git a/api/types/constants.go b/api/types/constants.go index 871e35d3affc1..72f811d4c787e 100644 --- a/api/types/constants.go +++ b/api/types/constants.go @@ -256,6 +256,9 @@ const ( // KindSessionTracker is a resource that tracks a live session. KindSessionTracker = "session_tracker" + // KindDatabaseCertificate is a resource to control Database Certificates generation + KindDatabaseCertificate = "database_certificate" + // V5 is the fifth version of resources. V5 = "v5" diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index b0a57dda8ecf8..fe3182b44ea46 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -18,6 +18,7 @@ package auth import ( "context" + "fmt" "net/url" "time" @@ -3687,14 +3688,25 @@ func (a *ServerWithRoles) SignDatabaseCSR(ctx context.Context, req *proto.Databa // role Db. // - Database service when initiating connection to a database instance to // produce a client certificate. +// - Proxy service when generating mTLS files to a database func (a *ServerWithRoles) GenerateDatabaseCert(ctx context.Context, req *proto.DatabaseCertRequest) (*proto.DatabaseCertResponse, error) { - // Check if this is a local cluster admin, or a database service, or a - // user that is allowed to impersonate database service. - if !a.hasBuiltinRole(types.RoleDatabase, types.RoleAdmin) { - if err := a.canImpersonateBuiltinRole(types.RoleDatabase); err != nil { - log.WithError(err).Warnf("User %v tried to generate database certificate but is not allowed to impersonate %q system role.", - a.context.User.GetName(), types.RoleDatabase) - return nil, trace.AccessDenied(`access denied. The user must be able to impersonate the builtin role and user "Db" in order to generate database certificates, for more info see https://goteleport.com/docs/database-access/reference/cli/#tctl-auth-sign.`) + // Check if the User can `create` DatabaseCertificates + err := a.action(apidefaults.Namespace, types.KindDatabaseCertificate, types.VerbCreate) + if err != nil { + if !trace.IsAccessDenied(err) { + return nil, trace.Wrap(err) + } + + // Err is access denied, trying the old way + + // Check if this is a local cluster admin, or a database service, or a + // user that is allowed to impersonate database service. + if !a.hasBuiltinRole(types.RoleDatabase, types.RoleAdmin) { + if err := a.canImpersonateBuiltinRole(types.RoleDatabase); err != nil { + log.WithError(err).Warnf("User %v tried to generate database certificate but does not have '%s' permission for '%s' kind, nor is allowed to impersonate %q system role", + a.context.User.GetName(), types.VerbCreate, types.KindDatabaseCertificate, types.RoleDatabase) + return nil, trace.AccessDenied(fmt.Sprintf("access denied. User must have '%s' permission for '%s' kind to generate the certificate ", types.VerbCreate, types.KindDatabaseCertificate)) + } } } return a.authServer.GenerateDatabaseCert(ctx, req) diff --git a/lib/auth/permissions.go b/lib/auth/permissions.go index 89e4cbe99813e..e15ac3b492557 100644 --- a/lib/auth/permissions.go +++ b/lib/auth/permissions.go @@ -358,6 +358,80 @@ func (a *authorizer) authorizeRemoteBuiltinRole(r RemoteBuiltinRole) (*Context, }, nil } +func roleSpecForProxyWithRecordAtProxy(clusterName string) types.RoleSpecV5 { + base := roleSpecForProxy(clusterName) + base.Allow.Rules = append(base.Allow.Rules, types.NewRule(types.KindHostCert, services.RW())) + return base +} + +func roleSpecForProxy(clusterName string) types.RoleSpecV5 { + return types.RoleSpecV5{ + Allow: types.RoleConditions{ + Namespaces: []string{types.Wildcard}, + ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + AppLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + DatabaseLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + KubernetesLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + Rules: []types.Rule{ + types.NewRule(types.KindProxy, services.RW()), + types.NewRule(types.KindOIDCRequest, services.RW()), + types.NewRule(types.KindSSHSession, services.RW()), + types.NewRule(types.KindSession, services.RO()), + types.NewRule(types.KindEvent, services.RW()), + types.NewRule(types.KindSAMLRequest, services.RW()), + types.NewRule(types.KindOIDC, services.ReadNoSecrets()), + types.NewRule(types.KindSAML, services.ReadNoSecrets()), + types.NewRule(types.KindGithub, services.ReadNoSecrets()), + types.NewRule(types.KindGithubRequest, services.RW()), + types.NewRule(types.KindNamespace, services.RO()), + types.NewRule(types.KindNode, services.RO()), + types.NewRule(types.KindAuthServer, services.RO()), + types.NewRule(types.KindReverseTunnel, services.RO()), + types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), + types.NewRule(types.KindUser, services.RO()), + types.NewRule(types.KindRole, services.RO()), + types.NewRule(types.KindClusterAuthPreference, services.RO()), + types.NewRule(types.KindClusterName, services.RO()), + types.NewRule(types.KindClusterAuditConfig, services.RO()), + types.NewRule(types.KindClusterNetworkingConfig, services.RO()), + types.NewRule(types.KindSessionRecordingConfig, services.RO()), + types.NewRule(types.KindStaticTokens, services.RO()), + types.NewRule(types.KindTunnelConnection, services.RW()), + types.NewRule(types.KindRemoteCluster, services.RO()), + types.NewRule(types.KindSemaphore, services.RW()), + types.NewRule(types.KindAppServer, services.RO()), + types.NewRule(types.KindWebSession, services.RW()), + types.NewRule(types.KindWebToken, services.RW()), + types.NewRule(types.KindKubeService, services.RW()), + types.NewRule(types.KindDatabaseServer, services.RO()), + types.NewRule(types.KindLock, services.RO()), + types.NewRule(types.KindToken, []string{types.VerbRead, types.VerbDelete}), + types.NewRule(types.KindWindowsDesktopService, services.RO()), + types.NewRule(types.KindDatabaseCertificate, []string{types.VerbCreate}), + types.NewRule(types.KindWindowsDesktop, services.RO()), + // this rule allows local proxy to update the remote cluster's host certificate authorities + // during certificates renewal + { + Resources: []string{types.KindCertAuthority}, + Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, + // allow administrative access to the host certificate authorities + // matching any cluster name except local + Where: builder.And( + builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), + builder.Not( + builder.Equals( + services.ResourceNameExpr, + builder.String(clusterName), + ), + ), + ).String(), + }, + }, + }, + } +} + // RoleSetForBuiltinRole returns RoleSet for embedded builtin role func RoleSetForBuiltinRoles(clusterName string, recConfig types.SessionRecordingConfig, roles ...types.SystemRole) (services.RoleSet, error) { var definitions []types.Role @@ -481,128 +555,13 @@ func definitionForBuiltinRole(clusterName string, recConfig types.SessionRecordi if services.IsRecordAtProxy(recConfig.GetMode()) { return services.RoleFromSpec( role.String(), - types.RoleSpecV5{ - Allow: types.RoleConditions{ - Namespaces: []string{types.Wildcard}, - ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - Rules: []types.Rule{ - types.NewRule(types.KindProxy, services.RW()), - types.NewRule(types.KindOIDCRequest, services.RW()), - types.NewRule(types.KindSSHSession, services.RW()), - types.NewRule(types.KindSession, services.RO()), - types.NewRule(types.KindEvent, services.RW()), - types.NewRule(types.KindSAMLRequest, services.RW()), - types.NewRule(types.KindOIDC, services.ReadNoSecrets()), - types.NewRule(types.KindSAML, services.ReadNoSecrets()), - types.NewRule(types.KindGithub, services.ReadNoSecrets()), - types.NewRule(types.KindGithubRequest, services.RW()), - types.NewRule(types.KindNamespace, services.RO()), - types.NewRule(types.KindNode, services.RO()), - types.NewRule(types.KindAuthServer, services.RO()), - types.NewRule(types.KindReverseTunnel, services.RO()), - types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), - types.NewRule(types.KindUser, services.RO()), - types.NewRule(types.KindRole, services.RO()), - types.NewRule(types.KindClusterAuthPreference, services.RO()), - types.NewRule(types.KindClusterName, services.RO()), - types.NewRule(types.KindClusterAuditConfig, services.RO()), - types.NewRule(types.KindClusterNetworkingConfig, services.RO()), - types.NewRule(types.KindSessionRecordingConfig, services.RO()), - types.NewRule(types.KindStaticTokens, services.RO()), - types.NewRule(types.KindTunnelConnection, services.RW()), - types.NewRule(types.KindHostCert, services.RW()), - types.NewRule(types.KindRemoteCluster, services.RO()), - types.NewRule(types.KindSemaphore, services.RW()), - types.NewRule(types.KindAppServer, services.RO()), - types.NewRule(types.KindWebSession, services.RW()), - types.NewRule(types.KindWebToken, services.RW()), - types.NewRule(types.KindKubeService, services.RW()), - types.NewRule(types.KindDatabaseServer, services.RO()), - types.NewRule(types.KindLock, services.RO()), - types.NewRule(types.KindWindowsDesktopService, services.RO()), - types.NewRule(types.KindWindowsDesktop, services.RO()), - // this rule allows local proxy to update the remote cluster's host certificate authorities - // during certificates renewal - { - Resources: []string{types.KindCertAuthority}, - Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, - // allow administrative access to the host certificate authorities - // matching any cluster name except local - Where: builder.And( - builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), - builder.Not( - builder.Equals( - services.ResourceNameExpr, - builder.String(clusterName), - ), - ), - ).String(), - }, - }, - }, - }) + roleSpecForProxyWithRecordAtProxy(clusterName), + ) } return services.RoleFromSpec( role.String(), - types.RoleSpecV5{ - Allow: types.RoleConditions{ - Namespaces: []string{types.Wildcard}, - ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - Rules: []types.Rule{ - types.NewRule(types.KindProxy, services.RW()), - types.NewRule(types.KindOIDCRequest, services.RW()), - types.NewRule(types.KindSSHSession, services.RW()), - types.NewRule(types.KindSession, services.RO()), - types.NewRule(types.KindEvent, services.RW()), - types.NewRule(types.KindSAMLRequest, services.RW()), - types.NewRule(types.KindOIDC, services.ReadNoSecrets()), - types.NewRule(types.KindSAML, services.ReadNoSecrets()), - types.NewRule(types.KindGithub, services.ReadNoSecrets()), - types.NewRule(types.KindGithubRequest, services.RW()), - types.NewRule(types.KindNamespace, services.RO()), - types.NewRule(types.KindNode, services.RO()), - types.NewRule(types.KindAuthServer, services.RO()), - types.NewRule(types.KindReverseTunnel, services.RO()), - types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), - types.NewRule(types.KindUser, services.RO()), - types.NewRule(types.KindRole, services.RO()), - types.NewRule(types.KindClusterAuthPreference, services.RO()), - types.NewRule(types.KindClusterName, services.RO()), - types.NewRule(types.KindClusterAuditConfig, services.RO()), - types.NewRule(types.KindClusterNetworkingConfig, services.RO()), - types.NewRule(types.KindSessionRecordingConfig, services.RO()), - types.NewRule(types.KindStaticTokens, services.RO()), - types.NewRule(types.KindTunnelConnection, services.RW()), - types.NewRule(types.KindRemoteCluster, services.RO()), - types.NewRule(types.KindSemaphore, services.RW()), - types.NewRule(types.KindAppServer, services.RO()), - types.NewRule(types.KindWebSession, services.RW()), - types.NewRule(types.KindWebToken, services.RW()), - types.NewRule(types.KindKubeService, services.RW()), - types.NewRule(types.KindDatabaseServer, services.RO()), - types.NewRule(types.KindLock, services.RO()), - types.NewRule(types.KindWindowsDesktopService, services.RO()), - types.NewRule(types.KindWindowsDesktop, services.RO()), - // this rule allows local proxy to update the remote cluster's host certificate authorities - // during certificates renewal - { - Resources: []string{types.KindCertAuthority}, - Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, - // allow administrative access to the certificate authority names - // matching any cluster name except local - Where: builder.And( - builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), - builder.Not( - builder.Equals( - services.ResourceNameExpr, - builder.String(clusterName), - ), - ), - ).String(), - }, - }, - }, - }) + roleSpecForProxy(clusterName), + ) case types.RoleSignup: return services.RoleFromSpec( role.String(), diff --git a/lib/client/db/database_certificates.go b/lib/client/db/database_certificates.go new file mode 100644 index 0000000000000..b477db874f60c --- /dev/null +++ b/lib/client/db/database_certificates.go @@ -0,0 +1,126 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +import ( + "context" + "crypto/x509/pkix" + "time" + + "github.com/gravitational/teleport/api/client/proto" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/trace" +) + +// GenerateDatabaseCertificatesRequest contains the required fields used to generate database certificates +// Those certificates will be used by databases to set up mTLS authentication against Teleport +type GenerateDatabaseCertificatesRequest struct { + ClusterAPI auth.ClientI + Principals []string + OutputFormat identityfile.Format + OutputCanOverwrite bool + OutputLocation string + IdentityFileWriter identityfile.ConfigWriter + TTL time.Duration + Key *client.Key +} + +// GenerateDatabaseCertificates to be used by databases to set up mTLS authentication +func GenerateDatabaseCertificates(ctx context.Context, req GenerateDatabaseCertificatesRequest) ([]string, error) { + + if len(req.Principals) == 0 || + (len(req.Principals) == 1 && req.Principals[0] == "" && req.OutputFormat != identityfile.FormatSnowflake) { + + return nil, trace.BadParameter("at least one hostname must be specified") + } + + // For CockroachDB node certificates, CommonName must be "node": + // + // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates + if req.OutputFormat == identityfile.FormatCockroach { + req.Principals = append([]string{"node"}, req.Principals...) + } + + subject := pkix.Name{CommonName: req.Principals[0]} + + if req.OutputFormat == identityfile.FormatMongo { + // Include Organization attribute in MongoDB certificates as well. + // + // When using X.509 member authentication, MongoDB requires O or OU to + // be non-empty so this will make the certs we generate compatible: + // + // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 + // + // The actual O value doesn't matter as long as it matches on all + // MongoDB cluster members so set it to the Teleport cluster name + // to avoid hardcoding anything. + + clusterNameType, err := req.ClusterAPI.GetClusterName() + if err != nil { + return nil, trace.Wrap(err) + } + + subject.Organization = []string{clusterNameType.GetClusterName()} + } + + if req.Key == nil { + key, err := client.NewKey() + if err != nil { + return nil, trace.Wrap(err) + } + req.Key = key + } + + csr, err := tlsca.GenerateCertificateRequestPEM(subject, req.Key.Priv) + if err != nil { + return nil, trace.Wrap(err) + } + + resp, err := req.ClusterAPI.GenerateDatabaseCert(ctx, + &proto.DatabaseCertRequest{ + CSR: csr, + // Important to include SANs since CommonName has been deprecated + // since Go 1.15: + // https://golang.org/doc/go1.15#commonname + ServerNames: req.Principals, + // Include legacy ServerName for compatibility. + ServerName: req.Principals[0], + TTL: proto.Duration(req.TTL), + RequesterName: proto.DatabaseCertRequest_TCTL, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + req.Key.TLSCert = resp.Cert + req.Key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + filesWritten, err := identityfile.Write(identityfile.WriteConfig{ + OutputPath: req.OutputLocation, + Key: req.Key, + Format: req.OutputFormat, + OverwriteDestination: req.OutputCanOverwrite, + Writer: req.IdentityFileWriter, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + return filesWritten, nil +} diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go new file mode 100644 index 0000000000000..0f6c2faa04c58 --- /dev/null +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -0,0 +1,96 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identityfile + +import ( + "io/fs" + "os" + "sync" + "time" + + "github.com/gravitational/teleport/lib/utils" + "github.com/gravitational/trace" +) + +// NewInMemoryConfigWriter creates a new virtual file system +// It stores the files contents and their properties in memory +func NewInMemoryConfigWriter() *InMemoryConfigWriter { + return &InMemoryConfigWriter{ + mux: &sync.RWMutex{}, + files: make(map[string]*utils.InMemoryFile), + } +} + +// InMemoryConfigWriter is a basic virtual file system abstraction that writes into memory +// instead of writing to a more persistent storage. +type InMemoryConfigWriter struct { + mux *sync.RWMutex + files map[string]*utils.InMemoryFile +} + +// WriteFile writes the given data to path `name` +// It replaces the file if it already exists +func (m *InMemoryConfigWriter) WriteFile(name string, data []byte, perm os.FileMode) error { + m.mux.Lock() + defer m.mux.Unlock() + m.files[name] = utils.NewInMemoryFile(name, perm, time.Now(), data) + + return nil +} + +// Remove the file. +// If the file does not exist, Remove is a no-op +func (m *InMemoryConfigWriter) Remove(name string) error { + m.mux.Lock() + defer m.mux.Unlock() + + delete(m.files, name) + return nil +} + +// Stat returns the FileInfo of the given file. +// Returns fs.ErrNotExists if the file is not present +func (m *InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { + m.mux.RLock() + defer m.mux.RUnlock() + + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f, nil +} + +// ReadFile returns the file contents. +// Returns fs.ErrNotExists if the file is not present +func (m *InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { + m.mux.RLock() + defer m.mux.RUnlock() + + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f.Content(), nil +} + +// Open is not implemented but exists here to satisfy the io/fs.ReadFileFS interface. +func (m *InMemoryConfigWriter) Open(name string) (fs.File, error) { + return nil, trace.NotImplemented("Open is not implemented for InMemoryConfigWriter") +} diff --git a/lib/client/identityfile/inmemory_config_writer_test.go b/lib/client/identityfile/inmemory_config_writer_test.go new file mode 100644 index 0000000000000..44ed0786f668b --- /dev/null +++ b/lib/client/identityfile/inmemory_config_writer_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identityfile + +import ( + "bytes" + "io/fs" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestInMemory(t *testing.T) { + virtualFS := NewInMemoryConfigWriter() + + content := bytes.Repeat([]byte("A"), 4000) + filename := "test1" + fileMode := os.FileMode(0644) + fileSize := int64(len(content)) + + err := virtualFS.WriteFile(filename, content, fileMode) + require.NoError(t, err) + + bs, err := virtualFS.ReadFile(filename) + require.NoError(t, err) + require.Equal(t, bs, content) + + fileStat, err := virtualFS.Stat(filename) + require.NoError(t, err) + require.Equal(t, fileStat.Name(), filename) + require.Equal(t, fileStat.Mode(), fileMode) + require.Equal(t, fileStat.Size(), fileSize) + require.False(t, fileStat.IsDir()) + require.WithinDuration(t, fileStat.ModTime(), time.Now(), time.Second) + + err = virtualFS.Remove(filename) + require.NoError(t, err) + + _, err = virtualFS.ReadFile(filename) + require.ErrorIs(t, err, fs.ErrNotExist) + + _, err = virtualFS.Stat(filename) + require.ErrorIs(t, err, fs.ErrNotExist) + +} diff --git a/lib/services/presets.go b/lib/services/presets.go index c154696993710..6b4ede8ee7959 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -68,6 +68,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindTrustedCluster, RW()), types.NewRule(types.KindRemoteCluster, RW()), types.NewRule(types.KindToken, RW()), + types.NewRule(types.KindDatabaseCertificate, RW()), }, }, }, diff --git a/lib/services/provisioning.go b/lib/services/provisioning.go index 6b9b84c5fd00d..f6bb1938bac4d 100644 --- a/lib/services/provisioning.go +++ b/lib/services/provisioning.go @@ -38,6 +38,7 @@ type Provisioner interface { GetToken(ctx context.Context, token string) (types.ProvisionToken, error) // DeleteToken deletes provisioning token + // Imlementations must guarantee that this returns trace.NotFound error if the token doesn't exist DeleteToken(ctx context.Context, token string) error // DeleteAllTokens deletes all provisioning tokens diff --git a/lib/utils/archive.go b/lib/utils/archive.go new file mode 100644 index 0000000000000..001dcca479c82 --- /dev/null +++ b/lib/utils/archive.go @@ -0,0 +1,72 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "io/fs" + + "github.com/gravitational/trace" +) + +// ReadStatFS combines two interfaces: fs.ReadFileFS and fs.StatFS +// We need both when creating the archive to be able to: +// - read file contents - `ReadFile` provided by fs.ReadFileFS +// - set the correct file permissions - `Stat() ... Mode()` provided by fs.StatFS +type ReadStatFS interface { + fs.ReadFileFS + fs.StatFS +} + +// CompressTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader +func CompressTarGzArchive(files []string, fileReader ReadStatFS) (*bytes.Buffer, error) { + archiveBytes := &bytes.Buffer{} + + gzipWriter := gzip.NewWriter(archiveBytes) + defer gzipWriter.Close() + + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() + + for _, filename := range files { + bs, err := fileReader.ReadFile(filename) + if err != nil { + return nil, trace.Wrap(err) + } + + fileStat, err := fileReader.Stat(filename) + if err != nil { + return nil, trace.Wrap(err) + } + + if err := tarWriter.WriteHeader(&tar.Header{ + Name: filename, + Size: int64(len(bs)), + Mode: int64(fileStat.Mode()), + }); err != nil { + return nil, trace.Wrap(err) + } + + if _, err := tarWriter.Write(bs); err != nil { + return nil, trace.Wrap(err) + } + } + + return archiveBytes, nil +} diff --git a/lib/utils/archive_test.go b/lib/utils/archive_test.go new file mode 100644 index 0000000000000..f9b6babc736e8 --- /dev/null +++ b/lib/utils/archive_test.go @@ -0,0 +1,122 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "archive/tar" + "compress/gzip" + "io" + "io/fs" + "testing" + "time" + + "github.com/gravitational/teleport" + "github.com/gravitational/trace" + "github.com/stretchr/testify/require" +) + +type mockFileReader struct { + files map[string]*InMemoryFile +} + +func (m mockFileReader) ReadFile(name string) ([]byte, error) { + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f.Content(), nil +} + +func (m mockFileReader) Open(name string) (fs.File, error) { + return nil, trace.NotImplemented("Open is not implemented") +} + +func (m mockFileReader) Stat(name string) (fs.FileInfo, error) { + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f, nil +} + +// CompressAsTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader +func TestCompressAsTarGzArchive(t *testing.T) { + tests := []struct { + name string + fileNames []string + fsContents map[string]*InMemoryFile + assert require.ErrorAssertionFunc + }{ + { + name: "File Not Exists bubbles up", + fileNames: []string{"not", "found"}, + fsContents: map[string]*InMemoryFile{}, + assert: func(t require.TestingT, err error, i ...interface{}) { + require.Error(t, err) + require.ErrorIs(t, err, fs.ErrNotExist) + }, + }, + { + name: "Archive is created", + fileNames: []string{"file1", "file2"}, + fsContents: map[string]*InMemoryFile{ + "file1": NewInMemoryFile("file1", teleport.FileMaskOwnerOnly, time.Now(), []byte("contentsfile1")), + "file2": NewInMemoryFile("file2", teleport.FileMaskOwnerOnly, time.Now(), []byte("contentsfile2")), + }, + assert: require.NoError, + }, + } + + for _, tt := range tests { + fileReader := mockFileReader{ + files: tt.fsContents, + } + bs, err := CompressTarGzArchive(tt.fileNames, fileReader) + tt.assert(t, err) + if err != nil { + continue + } + + gzipReader, err := gzip.NewReader(bs) + require.NoError(t, err) + + tarContentFileNames := []string{} + + tarReader := tar.NewReader(gzipReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + require.Equal(t, byte(tar.TypeReg), header.Typeflag) + + tarContentFileNames = append(tarContentFileNames, header.Name) + require.Contains(t, tt.fsContents, header.Name) + + gotBytes, err := io.ReadAll(tarReader) + require.NoError(t, err) + t.Log(string(gotBytes)) + + require.Equal(t, tt.fsContents[header.Name].content, gotBytes) + require.Equal(t, tt.fsContents[header.Name].mode, fs.FileMode(header.Mode)) + } + require.ElementsMatch(t, tarContentFileNames, tt.fileNames) + } +} diff --git a/lib/utils/inmemory_fs.go b/lib/utils/inmemory_fs.go new file mode 100644 index 0000000000000..fdaf45b9c887d --- /dev/null +++ b/lib/utils/inmemory_fs.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "io/fs" + "time" +) + +// InMemoryFile stores the required properties to emulate a File in memory +// It contains the File properties like name, size, mode +// It also contains the File contents +// It does not support folders +type InMemoryFile struct { + name string + mode fs.FileMode + modTime time.Time + content []byte +} + +func NewInMemoryFile(name string, mode fs.FileMode, modTime time.Time, content []byte) *InMemoryFile { + return &InMemoryFile{ + name: name, + mode: mode, + modTime: modTime, + content: content, + } +} + +// Name returns the file's name +func (fi *InMemoryFile) Name() string { + return fi.name +} + +// Size returns the file size (calculated when writing the file) +func (fi *InMemoryFile) Size() int64 { + return int64(len(fi.content)) +} + +// Mode returns the fs.FileMode +func (fi *InMemoryFile) Mode() fs.FileMode { + return fi.mode +} + +// ModTime returns the last modification time +func (fi *InMemoryFile) ModTime() time.Time { + return fi.modTime +} + +// IsDir checks whether the file is a directory +func (fi *InMemoryFile) IsDir() bool { + return false +} + +// Sys is platform independent +// InMemoryFile's implementation is no-op +func (fi *InMemoryFile) Sys() interface{} { + return nil +} + +// Content returns the file bytes +func (fi *InMemoryFile) Content() []byte { + return fi.content +} diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index afb8508c7be9b..31f539ce369e2 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -365,6 +365,9 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.GET("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) h.POST("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) + // Sign required files to set up mTLS using the db format. + h.POST("/webapi/sites/:site/sign/db", h.WithProvisionTokenAuth(h.signDatabaseCertificate)) + // token generation h.POST("/webapi/token", h.WithAuth(h.createTokenHandle)) @@ -2687,6 +2690,59 @@ func (h *Handler) WithClusterAuth(fn ClusterHandler) httprouter.Handle { }) } +// ProvisionTokenHandler is a authenticated handler that is called for some existing Token +type ProvisionTokenHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, token types.ProvisionToken) (interface{}, error) + +// WithProvisionTokenAuth ensures that request is authenticated with a provision token. +// Provision tokens, when used like this are invalidated as soon as used. +// Doesn't matter if the underlying response was a success or an error. +func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenHandler) httprouter.Handle { + return httplib.MakeHandler(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { + ctx := r.Context() + logger := h.log.WithField("request", fmt.Sprintf("%v %v", r.Method, r.URL.Path)) + + creds, err := roundtrip.ParseAuthHeaders(r) + if err != nil { + logger.WithError(err).Warn("No auth headers.") + return nil, trace.AccessDenied("need auth") + } + + token, err := h.consumeTokenForAPICall(ctx, creds.Password) + if err != nil { + h.log.WithError(err).Warn("Failed to authenticate.") + return nil, trace.AccessDenied("need auth") + } + + site, err := h.cfg.Proxy.GetSite(h.auth.clusterName) + if err != nil { + h.log.WithError(err).WithField("cluster-name", h.auth.clusterName).Warn("Failed to query cluster.") + return nil, trace.Wrap(err) + } + + return fn(w, r, p, site, token) + }) +} + +// consumeTokenForAPICall will fetch a token, check if the requireRole is present and then delete the token +// If any of those calls returns an error, this method also returns an error +// +// If multiple clients reach here at the same time, only one of them will be able to actually make the request. +// This is possible because the latest call - DeleteToken - returns an error if the resource doesn't exist +// This is currently true for all the backends as explained here +// https://github.com/gravitational/teleport/commit/24fcadc375d8359e80790b3ebeaa36bd8dd2822f +func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) (types.ProvisionToken, error) { + token, err := h.GetProxyClient().GetToken(ctx, tokenName) + if err != nil { + return nil, trace.Wrap(err) + } + + if err := h.GetProxyClient().DeleteToken(ctx, token.GetName()); err != nil { + return nil, trace.Wrap(err) + } + + return token, nil +} + type redirectHandlerFunc func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (redirectURL string) func isValidRedirectURL(redirectURL string) bool { diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 7ec74ae075a40..c7d163d04a8ac 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -17,9 +17,11 @@ limitations under the License. package web import ( + "archive/tar" "bufio" "bytes" "compress/flate" + "compress/gzip" "context" "crypto/tls" "encoding/base32" @@ -2185,6 +2187,166 @@ func TestTokenGeneration(t *testing.T) { } } +func TestSignMTLS(t *testing.T) { + env := newWebPack(t, 1) + clusterName := env.server.ClusterName() + + proxy := env.proxies[0] + pack := proxy.authPack(t, "test-user@example.com", nil) + + endpoint := pack.clt.Endpoint("webapi", "token") + re, err := pack.clt.PostJSON(context.Background(), endpoint, types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleDatabase}, + }) + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(re.Bytes(), &responseToken) + require.NoError(t, err) + + // download mTLS files from /webapi/sites/:site/sign/db + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign", "db") + + bs, err := json.Marshal(struct { + Hostname string `json:"hostname"` + TTL string `json:"ttl"` + }{ + Hostname: "mypg.example.com", + TTL: "2h", + }) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + anonHTTPClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + resp, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode) + + gzipReader, err := gzip.NewReader(resp.Body) + require.NoError(t, err) + + tarReader := tar.NewReader(gzipReader) + + tarContentFileNames := []string{} + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + require.Equal(t, byte(tar.TypeReg), header.Typeflag) + require.Equal(t, int64(0600), header.Mode) + tarContentFileNames = append(tarContentFileNames, header.Name) + } + + expectedFileNames := []string{"server.cas", "server.key", "server.crt"} + require.ElementsMatch(t, tarContentFileNames, expectedFileNames) + + // the token is no longer valid, so trying again should return an error + req, err = http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + respSecondCall, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer respSecondCall.Body.Close() + require.Equal(t, http.StatusForbidden, respSecondCall.StatusCode) +} + +func TestSignMTLS_failsAccessDenied(t *testing.T) { + env := newWebPack(t, 1) + clusterName := env.server.ClusterName() + username := "test-user@example.com" + + roleUserUpdate, err := types.NewRole(services.RoleNameForUser(username), types.RoleSpecV5{ + Allow: types.RoleConditions{ + Rules: []types.Rule{ + types.NewRule(types.KindUser, []string{types.VerbUpdate}), + types.NewRule(types.KindToken, []string{types.VerbCreate}), + }, + }, + }) + require.NoError(t, err) + + proxy := env.proxies[0] + pack := proxy.authPack(t, username, []types.Role{roleUserUpdate}) + + endpoint := pack.clt.Endpoint("webapi", "token") + re, err := pack.clt.PostJSON(context.Background(), endpoint, types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleProxy}, + }) + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(re.Bytes(), &responseToken) + require.NoError(t, err) + + // download mTLS files from /webapi/sites/:site/sign/db + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign", "db") + + bs, err := json.Marshal(struct { + Hostname string `json:"hostname"` + TTL string `json:"ttl"` + Format string `json:"format"` + }{ + Hostname: "mypg.example.com", + TTL: "2h", + Format: "db", + }) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + anonHTTPClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + resp, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + // It fails because we passed a Provision Token with the wrong Role: Proxy + require.Equal(t, http.StatusForbidden, resp.StatusCode) + + // using a user token also returns Forbidden + endpointResetToken := pack.clt.Endpoint("webapi", "users", "password", "token") + _, err = pack.clt.PostJSON(context.Background(), endpointResetToken, auth.CreateUserTokenRequest{ + Name: username, + TTL: time.Minute, + Type: auth.UserTokenTypeResetPassword, + }) + require.NoError(t, err) + + req, err = http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + + resp, err = anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusForbidden, resp.StatusCode) +} + func TestClusterDatabasesGet(t *testing.T) { env := newWebPack(t, 1) diff --git a/lib/web/sign.go b/lib/web/sign.go new file mode 100644 index 0000000000000..3b1ec393f3e1d --- /dev/null +++ b/lib/web/sign.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package web + +import ( + "bytes" + "fmt" + "net/http" + "time" + + "github.com/gravitational/trace" + "github.com/julienschmidt/httprouter" + + apidefaults "github.com/gravitational/teleport/api/defaults" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/client/db" + "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/httplib" + "github.com/gravitational/teleport/lib/reversetunnel" + "github.com/gravitational/teleport/lib/utils" +) + +/* +signDatabaseCertificate returns the necessary files to set up mTLS using the `db` format +This is the equivalent of running the tctl command +As an example, requesting: +POST /webapi/sites/mycluster/sign/db +{ + "hostname": "pg.example.com", + "ttl": "2190h" +} + +Should be equivalent to running: + tctl auth sign --host=pg.example.com --ttl=2190h --format=db + +This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the database. +*/ +func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, token types.ProvisionToken) (interface{}, error) { + if !token.GetRoles().Include(types.RoleDatabase) { + return nil, trace.AccessDenied("required '%s' role was not provided by the token", types.RoleDatabase) + } + + req := &signDatabaseCertificateReq{} + if err := httplib.ReadJSON(r, &req); err != nil { + return nil, trace.Wrap(err) + } + + if err := req.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + virtualFS := identityfile.NewInMemoryConfigWriter() + + dbCertReq := db.GenerateDatabaseCertificatesRequest{ + ClusterAPI: h.auth.proxyClient, + Principals: []string{req.Hostname}, + OutputFormat: identityfile.FormatDatabase, + OutputCanOverwrite: true, + OutputLocation: "server", + IdentityFileWriter: virtualFS, + TTL: req.TTL, + } + filesWritten, err := db.GenerateDatabaseCertificates(r.Context(), dbCertReq) + if err != nil { + return nil, trace.Wrap(err) + } + + archiveName := fmt.Sprintf("teleport_mTLS_%s.tar.gz", req.Hostname) + archiveBytes, err := utils.CompressTarGzArchive(filesWritten, virtualFS) + if err != nil { + return nil, trace.Wrap(err) + } + + // Set file name + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment;filename="%v"`, archiveName)) + + // ServeContent sets the correct headers: Content-Type, Content-Length and Accept-Ranges. + // It also handles the Range negotiation + http.ServeContent(w, r, archiveName, time.Now(), bytes.NewReader(archiveBytes.Bytes())) + + return nil, nil +} + +type signDatabaseCertificateReq struct { + Hostname string `json:"hostname,omitempty"` + TTLRaw string `json:"ttl,omitempty"` + + TTL time.Duration `json:"-"` +} + +// CheckAndSetDefaults will validate and convert the received values +// Hostname must not be empty +// TTL must either be a valid time.Duration or empty (inherits apidefaults.CertDuration) +func (s *signDatabaseCertificateReq) CheckAndSetDefaults() error { + if s.Hostname == "" { + return trace.BadParameter("missing hostname") + } + + if s.TTLRaw == "" { + s.TTLRaw = apidefaults.CertDuration.String() + } + ttl, err := time.ParseDuration(s.TTLRaw) + if err != nil { + return trace.BadParameter("invalid ttl '%s', use https://pkg.go.dev/time#ParseDuration format (example: 2190h)", s.TTLRaw) + } + s.TTL = ttl + + return nil +} diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index a9a9e4fa7a6a6..58d925d078392 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -16,9 +16,9 @@ package common import ( "context" - "crypto/x509/pkix" "encoding/pem" "fmt" + "io" "net" "net/url" "os" @@ -33,13 +33,13 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/native" "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/db" "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/defaults" kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/services" "github.com/gravitational/teleport/lib/sshutils" - "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/kingpin" @@ -372,11 +372,9 @@ func (a *AuthCommand) generateSnowflakeKey(ctx context.Context, clusterAPI auth. return trace.Wrap(err) } - err = snowflakeAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - }) - - return trace.Wrap(err) + return trace.Wrap( + writeHelperMessageDBmTLS(os.Stdout, filesWritten, "", a.outputFormat), + ) } // RotateCertAuthority starts or restarts certificate authority rotation process @@ -469,92 +467,50 @@ func (a *AuthCommand) generateDatabaseKeys(ctx context.Context, clusterAPI auth. // for database access. func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI auth.ClientI, key *client.Key) error { principals := strings.Split(a.genHost, ",") - if a.outputFormat != identityfile.FormatSnowflake && len(principals) == 1 && principals[0] == "" { - return trace.BadParameter("at least one hostname must be specified via --host flag") - } - // For CockroachDB node certificates, CommonName must be "node": - // - // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates - if a.outputFormat == identityfile.FormatCockroach { - principals = append([]string{"node"}, principals...) - } - subject := pkix.Name{CommonName: principals[0]} - if a.outputFormat == identityfile.FormatMongo { - // Include Organization attribute in MongoDB certificates as well. - // - // When using X.509 member authentication, MongoDB requires O or OU to - // be non-empty so this will make the certs we generate compatible: - // - // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 - // - // The actual O value doesn't matter as long as it matches on all - // MongoDB cluster members so set it to the Teleport cluster name - // to avoid hardcoding anything. - clusterName, err := clusterAPI.GetClusterName() - if err != nil { - return trace.Wrap(err) - } - subject.Organization = []string{ - clusterName.GetClusterName(), - } + + dbCertReq := db.GenerateDatabaseCertificatesRequest{ + ClusterAPI: clusterAPI, + Principals: principals, + OutputFormat: a.outputFormat, + OutputCanOverwrite: a.signOverwrite, + OutputLocation: a.output, + TTL: a.genTTL, + Key: key, } - csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) + filesWritten, err := db.GenerateDatabaseCertificates(ctx, dbCertReq) if err != nil { return trace.Wrap(err) } - resp, err := clusterAPI.GenerateDatabaseCert(ctx, - &proto.DatabaseCertRequest{ - CSR: csr, - // Important to include SANs since CommonName has been deprecated - // since Go 1.15: - // https://golang.org/doc/go1.15#commonname - ServerNames: principals, - // Include legacy ServerName for compatibility. - ServerName: principals[0], - TTL: proto.Duration(a.genTTL), - RequesterName: proto.DatabaseCertRequest_TCTL, - }) - if err != nil { - return trace.Wrap(err) + + return trace.Wrap(writeHelperMessageDBmTLS(os.Stdout, filesWritten, a.output, a.outputFormat)) +} + +var mapIdentityFileFormatHelperTemplate = map[identityfile.Format]*template.Template{ + identityfile.FormatDatabase: dbAuthSignTpl, + identityfile.FormatMongo: mongoAuthSignTpl, + identityfile.FormatCockroach: cockroachAuthSignTpl, + identityfile.FormatRedis: redisAuthSignTpl, + identityfile.FormatSnowflake: snowflakeAuthSignTpl, +} + +func writeHelperMessageDBmTLS(writer io.Writer, filesWritten []string, output string, outputFormat identityfile.Format) error { + if writer == nil { + return nil } - key.TLSCert = resp.Cert - key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} - filesWritten, err := identityfile.Write(identityfile.WriteConfig{ - OutputPath: a.output, - Key: key, - Format: a.outputFormat, - OverwriteDestination: a.signOverwrite, - }) - if err != nil { - return trace.Wrap(err) + + tpl, found := mapIdentityFileFormatHelperTemplate[outputFormat] + if !found { + // This format doesn't have a recommended configuration. + // Consider adding one to ease the installation for the end-user + return nil } - switch a.outputFormat { - case identityfile.FormatDatabase: - err = dbAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatMongo: - err = mongoAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatCockroach: - err = cockroachAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatRedis: - err = redisAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatSnowflake: - err = snowflakeAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - }) + + tplVars := map[string]interface{}{ + "files": strings.Join(filesWritten, ", "), + "output": output, } - return trace.Wrap(err) + + return trace.Wrap(tpl.Execute(writer, tplVars)) } var (