From 05b281527733335d9b6df0c4ee9ba6a99e0fa321 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Fri, 15 Jul 2022 18:45:28 +0100 Subject: [PATCH 01/10] WebAPI: download mTLS files for database resources New endpoint to return an archive with mTLS files for a given resource. This endpoint is protect by a Provision Token which is consumed by the API Call: it is used as a one time password. --- api/types/constants.go | 3 + lib/auth/auth_with_roles.go | 25 +++-- lib/auth/permissions.go | 205 ++++++++++++++--------------------- lib/services/presets.go | 1 + lib/web/apiserver.go | 58 ++++++++++ lib/web/apiserver_test.go | 69 ++++++++++++ lib/web/sign.go | 206 ++++++++++++++++++++++++++++++++++++ 7 files changed, 433 insertions(+), 134 deletions(-) create mode 100644 lib/web/sign.go diff --git a/api/types/constants.go b/api/types/constants.go index 0ebe3b4b7fa92..30e1fdfe02053 100644 --- a/api/types/constants.go +++ b/api/types/constants.go @@ -259,6 +259,9 @@ const ( // KindConnectionDiagnostic is a resource that tracks the result of testing a connection KindConnectionDiagnostic = "connection_diagnostic" + // KindDatabaseCertificate is a resource to control Database Certificates generation + KindDatabaseCertificate = "database_certificate" + // V5 is the fifth version of resources. V5 = "v5" diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 9a271c5a11b21..1a3b04b03fba0 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -3692,14 +3692,25 @@ func (a *ServerWithRoles) SignDatabaseCSR(ctx context.Context, req *proto.Databa // role Db. // - Database service when initiating connection to a database instance to // produce a client certificate. +// - Proxy service when generating mTLS files to a database func (a *ServerWithRoles) GenerateDatabaseCert(ctx context.Context, req *proto.DatabaseCertRequest) (*proto.DatabaseCertResponse, error) { - // Check if this is a local cluster admin, or a database service, or a - // user that is allowed to impersonate database service. - if !a.hasBuiltinRole(types.RoleDatabase, types.RoleAdmin) { - if err := a.canImpersonateBuiltinRole(types.RoleDatabase); err != nil { - log.WithError(err).Warnf("User %v tried to generate database certificate but is not allowed to impersonate %q system role.", - a.context.User.GetName(), types.RoleDatabase) - return nil, trace.AccessDenied(`access denied. The user must be able to impersonate the builtin role and user "Db" in order to generate database certificates, for more info see https://goteleport.com/docs/database-access/reference/cli/#tctl-auth-sign.`) + // Check if the User can `create` DatabaseCertificates + err := a.action(apidefaults.Namespace, types.KindDatabaseCertificate, types.VerbCreate) + if err != nil { + if !trace.IsAccessDenied(err) { + return nil, trace.Wrap(err) + } + + // Err is access denied, trying the old way + + // Check if this is a local cluster admin, or a database service, or a + // user that is allowed to impersonate database service. + if !a.hasBuiltinRole(types.RoleDatabase, types.RoleAdmin) { + if err := a.canImpersonateBuiltinRole(types.RoleDatabase); err != nil { + log.WithError(err).Warnf("User %v tried to generate database certificate but is not allowed to impersonate %q system role.", + a.context.User.GetName(), types.RoleDatabase) + return nil, trace.AccessDenied(`access denied. The user must be able to impersonate the builtin role and user "Db" in order to generate database certificates, for more info see https://goteleport.com/docs/database-access/reference/cli/#tctl-auth-sign.`) + } } } return a.authServer.GenerateDatabaseCert(ctx, req) diff --git a/lib/auth/permissions.go b/lib/auth/permissions.go index 2ddaba528eb6e..0521ad94afd31 100644 --- a/lib/auth/permissions.go +++ b/lib/auth/permissions.go @@ -362,6 +362,80 @@ func (a *authorizer) authorizeRemoteBuiltinRole(r RemoteBuiltinRole) (*Context, }, nil } +func roleSpecForProxyWithRecordAtProxy(clusterName string) types.RoleSpecV5 { + base := roleSpecForProxy(clusterName) + base.Allow.Rules = append(base.Allow.Rules, types.NewRule(types.KindHostCert, services.RW())) + return base +} + +func roleSpecForProxy(clusterName string) types.RoleSpecV5 { + return types.RoleSpecV5{ + Allow: types.RoleConditions{ + Namespaces: []string{types.Wildcard}, + ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + AppLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + DatabaseLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + KubernetesLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, + Rules: []types.Rule{ + types.NewRule(types.KindProxy, services.RW()), + types.NewRule(types.KindOIDCRequest, services.RW()), + types.NewRule(types.KindSSHSession, services.RW()), + types.NewRule(types.KindSession, services.RO()), + types.NewRule(types.KindEvent, services.RW()), + types.NewRule(types.KindSAMLRequest, services.RW()), + types.NewRule(types.KindOIDC, services.ReadNoSecrets()), + types.NewRule(types.KindSAML, services.ReadNoSecrets()), + types.NewRule(types.KindGithub, services.ReadNoSecrets()), + types.NewRule(types.KindGithubRequest, services.RW()), + types.NewRule(types.KindNamespace, services.RO()), + types.NewRule(types.KindNode, services.RO()), + types.NewRule(types.KindAuthServer, services.RO()), + types.NewRule(types.KindReverseTunnel, services.RO()), + types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), + types.NewRule(types.KindUser, services.RO()), + types.NewRule(types.KindRole, services.RO()), + types.NewRule(types.KindClusterAuthPreference, services.RO()), + types.NewRule(types.KindClusterName, services.RO()), + types.NewRule(types.KindClusterAuditConfig, services.RO()), + types.NewRule(types.KindClusterNetworkingConfig, services.RO()), + types.NewRule(types.KindSessionRecordingConfig, services.RO()), + types.NewRule(types.KindStaticTokens, services.RO()), + types.NewRule(types.KindTunnelConnection, services.RW()), + types.NewRule(types.KindRemoteCluster, services.RO()), + types.NewRule(types.KindSemaphore, services.RW()), + types.NewRule(types.KindAppServer, services.RO()), + types.NewRule(types.KindWebSession, services.RW()), + types.NewRule(types.KindWebToken, services.RW()), + types.NewRule(types.KindKubeService, services.RW()), + types.NewRule(types.KindDatabaseServer, services.RO()), + types.NewRule(types.KindLock, services.RO()), + types.NewRule(types.KindToken, []string{types.VerbRead, types.VerbDelete}), + types.NewRule(types.KindWindowsDesktopService, services.RO()), + types.NewRule(types.KindDatabaseCertificate, []string{types.VerbCreate}), + types.NewRule(types.KindWindowsDesktop, services.RO()), + // this rule allows local proxy to update the remote cluster's host certificate authorities + // during certificates renewal + { + Resources: []string{types.KindCertAuthority}, + Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, + // allow administrative access to the host certificate authorities + // matching any cluster name except local + Where: builder.And( + builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), + builder.Not( + builder.Equals( + services.ResourceNameExpr, + builder.String(clusterName), + ), + ), + ).String(), + }, + }, + }, + } +} + // RoleSetForBuiltinRole returns RoleSet for embedded builtin role func RoleSetForBuiltinRoles(clusterName string, recConfig types.SessionRecordingConfig, roles ...types.SystemRole) (services.RoleSet, error) { var definitions []types.Role @@ -488,136 +562,13 @@ func definitionForBuiltinRole(clusterName string, recConfig types.SessionRecordi if services.IsRecordAtProxy(recConfig.GetMode()) { return services.RoleFromSpec( role.String(), - types.RoleSpecV5{ - Allow: types.RoleConditions{ - Namespaces: []string{types.Wildcard}, - ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - AppLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - DatabaseLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - KubernetesLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - Rules: []types.Rule{ - types.NewRule(types.KindProxy, services.RW()), - types.NewRule(types.KindOIDCRequest, services.RW()), - types.NewRule(types.KindSSHSession, services.RW()), - types.NewRule(types.KindSession, services.RO()), - types.NewRule(types.KindEvent, services.RW()), - types.NewRule(types.KindSAMLRequest, services.RW()), - types.NewRule(types.KindOIDC, services.ReadNoSecrets()), - types.NewRule(types.KindSAML, services.ReadNoSecrets()), - types.NewRule(types.KindGithub, services.ReadNoSecrets()), - types.NewRule(types.KindGithubRequest, services.RW()), - types.NewRule(types.KindNamespace, services.RO()), - types.NewRule(types.KindNode, services.RO()), - types.NewRule(types.KindAuthServer, services.RO()), - types.NewRule(types.KindReverseTunnel, services.RO()), - types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), - types.NewRule(types.KindUser, services.RO()), - types.NewRule(types.KindRole, services.RO()), - types.NewRule(types.KindClusterAuthPreference, services.RO()), - types.NewRule(types.KindClusterName, services.RO()), - types.NewRule(types.KindClusterAuditConfig, services.RO()), - types.NewRule(types.KindClusterNetworkingConfig, services.RO()), - types.NewRule(types.KindSessionRecordingConfig, services.RO()), - types.NewRule(types.KindStaticTokens, services.RO()), - types.NewRule(types.KindTunnelConnection, services.RW()), - types.NewRule(types.KindHostCert, services.RW()), - types.NewRule(types.KindRemoteCluster, services.RO()), - types.NewRule(types.KindSemaphore, services.RW()), - types.NewRule(types.KindAppServer, services.RO()), - types.NewRule(types.KindWebSession, services.RW()), - types.NewRule(types.KindWebToken, services.RW()), - types.NewRule(types.KindKubeService, services.RW()), - types.NewRule(types.KindDatabaseServer, services.RO()), - types.NewRule(types.KindLock, services.RO()), - types.NewRule(types.KindWindowsDesktopService, services.RO()), - types.NewRule(types.KindWindowsDesktop, services.RO()), - // this rule allows local proxy to update the remote cluster's host certificate authorities - // during certificates renewal - { - Resources: []string{types.KindCertAuthority}, - Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, - // allow administrative access to the host certificate authorities - // matching any cluster name except local - Where: builder.And( - builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), - builder.Not( - builder.Equals( - services.ResourceNameExpr, - builder.String(clusterName), - ), - ), - ).String(), - }, - }, - }, - }) + roleSpecForProxyWithRecordAtProxy(clusterName), + ) } return services.RoleFromSpec( role.String(), - types.RoleSpecV5{ - Allow: types.RoleConditions{ - Namespaces: []string{types.Wildcard}, - ClusterLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - NodeLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - AppLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - DatabaseLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - KubernetesLabels: types.Labels{types.Wildcard: []string{types.Wildcard}}, - Rules: []types.Rule{ - types.NewRule(types.KindProxy, services.RW()), - types.NewRule(types.KindOIDCRequest, services.RW()), - types.NewRule(types.KindSSHSession, services.RW()), - types.NewRule(types.KindSession, services.RO()), - types.NewRule(types.KindEvent, services.RW()), - types.NewRule(types.KindSAMLRequest, services.RW()), - types.NewRule(types.KindOIDC, services.ReadNoSecrets()), - types.NewRule(types.KindSAML, services.ReadNoSecrets()), - types.NewRule(types.KindGithub, services.ReadNoSecrets()), - types.NewRule(types.KindGithubRequest, services.RW()), - types.NewRule(types.KindNamespace, services.RO()), - types.NewRule(types.KindNode, services.RO()), - types.NewRule(types.KindAuthServer, services.RO()), - types.NewRule(types.KindReverseTunnel, services.RO()), - types.NewRule(types.KindCertAuthority, services.ReadNoSecrets()), - types.NewRule(types.KindUser, services.RO()), - types.NewRule(types.KindRole, services.RO()), - types.NewRule(types.KindClusterAuthPreference, services.RO()), - types.NewRule(types.KindClusterName, services.RO()), - types.NewRule(types.KindClusterAuditConfig, services.RO()), - types.NewRule(types.KindClusterNetworkingConfig, services.RO()), - types.NewRule(types.KindSessionRecordingConfig, services.RO()), - types.NewRule(types.KindStaticTokens, services.RO()), - types.NewRule(types.KindTunnelConnection, services.RW()), - types.NewRule(types.KindRemoteCluster, services.RO()), - types.NewRule(types.KindSemaphore, services.RW()), - types.NewRule(types.KindAppServer, services.RO()), - types.NewRule(types.KindWebSession, services.RW()), - types.NewRule(types.KindWebToken, services.RW()), - types.NewRule(types.KindKubeService, services.RW()), - types.NewRule(types.KindDatabaseServer, services.RO()), - types.NewRule(types.KindLock, services.RO()), - types.NewRule(types.KindWindowsDesktopService, services.RO()), - types.NewRule(types.KindWindowsDesktop, services.RO()), - // this rule allows local proxy to update the remote cluster's host certificate authorities - // during certificates renewal - { - Resources: []string{types.KindCertAuthority}, - Verbs: []string{types.VerbCreate, types.VerbRead, types.VerbUpdate}, - // allow administrative access to the certificate authority names - // matching any cluster name except local - Where: builder.And( - builder.Equals(services.CertAuthorityTypeExpr, builder.String(string(types.HostCA))), - builder.Not( - builder.Equals( - services.ResourceNameExpr, - builder.String(clusterName), - ), - ), - ).String(), - }, - }, - }, - }) + roleSpecForProxy(clusterName), + ) case types.RoleSignup: return services.RoleFromSpec( role.String(), diff --git a/lib/services/presets.go b/lib/services/presets.go index b3442eef06995..697aca7e4e96a 100644 --- a/lib/services/presets.go +++ b/lib/services/presets.go @@ -69,6 +69,7 @@ func NewPresetEditorRole() types.Role { types.NewRule(types.KindRemoteCluster, RW()), types.NewRule(types.KindToken, RW()), types.NewRule(types.KindConnectionDiagnostic, RW()), + types.NewRule(types.KindDatabaseCertificate, RW()), }, }, }, diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index bf951f72030d7..263856f184ad0 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -365,6 +365,10 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.GET("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) h.POST("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) + // Sign required files to setup mTLS in other services (eg DBs) + // GET /webapi/sites/:site/sign?hostname=&ttl=&format= + h.GET("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair)) + // token generation h.POST("/webapi/token", h.WithAuth(h.createTokenHandle)) @@ -2696,6 +2700,60 @@ func (h *Handler) WithClusterAuth(fn ClusterHandler) httprouter.Handle { }) } +// ProvisionTokenHandler is a authenticated handler that is called for some existing Token +type ProvisionTokenAuthedHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) + +// WithProvisionTokenAuth ensures that request is authenticated with a provision token. +// Provision tokens, when used like this are invalidated as soon as used. +// Doesn't matter if the underlying response was a success or an error. +func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler) httprouter.Handle { + return httplib.MakeHandler(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { + ctx := r.Context() + logger := h.log.WithField("request", fmt.Sprintf("%v %v", r.Method, r.URL.Path)) + + creds, err := roundtrip.ParseAuthHeaders(r) + if err != nil { + logger.WithError(err).Warn("No auth headers.") + return nil, trace.AccessDenied("need auth") + } + + if err := h.consumeTokenForAPICall(ctx, creds.Password); err != nil { + h.log.WithError(err).Warn("Failed to authenticate.") + return nil, trace.AccessDenied("need auth") + } + + clusterName := p.ByName("site") + if clusterName == currentSiteShortcut { + res, err := h.GetProxyClient().GetClusterName() + if err != nil { + h.log.WithError(err).Warn("Failed to query cluster name.") + return nil, trace.Wrap(err) + } + clusterName = res.GetClusterName() + } + + site, err := h.cfg.Proxy.GetSite(clusterName) + if err != nil { + h.log.WithError(err).WithField("cluster-name", clusterName).Warn("Failed to query site.") + return nil, trace.Wrap(err) + } + + return fn(w, r, p, site) + }) +} + +func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) error { + token, err := h.GetProxyClient().GetToken(ctx, tokenName) + if err != nil { + return trace.Wrap(err) + } + + if err := h.GetProxyClient().DeleteToken(ctx, token.GetName()); err != nil { + return trace.Wrap(err) + } + return nil +} + type redirectHandlerFunc func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (redirectURL string) func isValidRedirectURL(redirectURL string) bool { diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 2d282f091ce9b..4012ea2752160 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -17,6 +17,7 @@ limitations under the License. package web import ( + "archive/zip" "bufio" "bytes" "compress/flate" @@ -2185,6 +2186,74 @@ func TestTokenGeneration(t *testing.T) { } } +func TestSignMTLS(t *testing.T) { + env := newWebPack(t, 1) + clusterName := env.server.ClusterName() + + proxy := env.proxies[0] + pack := proxy.authPack(t, "test-user@example.com") + + endpoint := pack.clt.Endpoint("webapi", "token") + re, err := pack.clt.PostJSON(context.Background(), endpoint, types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleDatabase}, + }) + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(re.Bytes(), &responseToken) + require.NoError(t, err) + + // download mTLS files from /webapi/sites/:site/sign + + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") + endpointSignURL, err := url.Parse(endpointSign) + require.NoError(t, err) + + queryParams := endpointSignURL.Query() + queryParams.Set("hostname", "mypg.example.com") + queryParams.Set("ttl", "2h") + queryParams.Set("format", "db") + endpointSignURL.RawQuery = queryParams.Encode() + + req, err := http.NewRequest(http.MethodGet, endpointSignURL.String(), nil) + require.NoError(t, err) + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + anonHTTPClient := &http.Client{Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + }} + + resp, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + require.Equal(t, resp.StatusCode, http.StatusOK) + + archive, err := io.ReadAll(resp.Body) + require.NoError(t, err) + + zipReader, err := zip.NewReader(bytes.NewReader(archive), resp.ContentLength) + require.NoError(t, err) + require.Len(t, zipReader.File, 3) + + zipContentFileNames := []string{} + for _, zipContentFile := range zipReader.File { + zipContentFileNames = append(zipContentFileNames, zipContentFile.Name) + } + + expectedFileNames := []string{"server.cas", "server.key", "server.crt"} + require.ElementsMatch(t, zipContentFileNames, expectedFileNames) + + // the token is no longer valid, so trying again should return an error + req, err = http.NewRequest(http.MethodGet, endpointSignURL.String(), nil) + require.NoError(t, err) + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + resp2nd, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp2nd.Body.Close() + require.Equal(t, resp2nd.StatusCode, http.StatusForbidden) +} + func TestClusterDatabasesGet(t *testing.T) { env := newWebPack(t, 1) diff --git a/lib/web/sign.go b/lib/web/sign.go new file mode 100644 index 0000000000000..63f5c95a5c74c --- /dev/null +++ b/lib/web/sign.go @@ -0,0 +1,206 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package web + +import ( + "archive/zip" + "crypto/x509/pkix" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gravitational/trace" + "github.com/julienschmidt/httprouter" + "golang.org/x/exp/slices" + + "github.com/gravitational/teleport/api/client/proto" + apidefaults "github.com/gravitational/teleport/api/defaults" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/reversetunnel" + "github.com/gravitational/teleport/lib/tlsca" +) + +// signCertKeyPair returns the necessary files to set up mTLS for other services +// URL template: GET /webapi/sites/:site/sign?hostname=&ttl=&format= +// +// As an example, requesting: +// GET /webapi/sites/:site/sign?hostname=pg.example.com&ttl=2190h&format=db +// should be equivalent to running: +// tctl auth sign --host=pg.example.com --ttl=2190h --format=db +// +// This endpoint returns a zip compressed archive containing the required files to setup mTLS for the service. +// As an example, for db format it returns an archive with 3 files: server.cas, server.crt and server.key +func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) { + ctx := r.Context() + + req := signCertKeyPairReq{ + Hostname: r.URL.Query().Get("hostname"), + FormatString: r.URL.Query().Get("format"), + TTLString: r.URL.Query().Get("ttl"), + } + if err := req.CheckAndSetDefaults(); err != nil { + return nil, trace.Wrap(err) + } + + key, err := client.NewKey() + if err != nil { + return nil, trace.Wrap(err) + } + + subject := pkix.Name{CommonName: req.Hostname} + csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) + if err != nil { + return nil, trace.Wrap(err) + } + + clusterAPI := h.auth.proxyClient + resp, err := clusterAPI.GenerateDatabaseCert( + ctx, + &proto.DatabaseCertRequest{ + CSR: csr, + // Important to include SANs since CommonName has been deprecated + ServerNames: []string{req.Hostname}, + // Include legacy ServerName for compatibility. + ServerName: req.Hostname, + TTL: proto.Duration(req.TTL), + RequesterName: proto.DatabaseCertRequest_TCTL, + }, + ) + if err != nil { + return nil, trace.Wrap(err) + } + + outputDir, err := os.MkdirTemp("", "teleport-auth-sign") + if err != nil { + return nil, trace.Wrap(err) + } + defer os.RemoveAll(outputDir) + + key.TLSCert = resp.Cert + key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + filesWritten, err := identityfile.Write(identityfile.WriteConfig{ + OutputPath: outputDir + "/server", + Key: key, + Format: req.Format, + OverwriteDestination: true, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + archiveBaseName := fmt.Sprintf("teleport_mTLS_%s.zip", req.Hostname) + archiveFullPath := fmt.Sprintf("%s/%s", outputDir, archiveBaseName) + + if err := buildArchiveFromFiles(filesWritten, archiveFullPath); err != nil { + return nil, trace.Wrap(err) + } + + // Set file name + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment;filename="%v"`, archiveBaseName)) + + // ServeFile sets the correct headers: Content-Type, Content-Length and Accept-Ranges. + // It also handles the Range negotiation + http.ServeFile(w, r, archiveFullPath) + + return nil, nil +} + +func buildArchiveFromFiles(files []string, zipLocation string) error { + // We remove the entire directory above. + // No need to remove each file seperatly. + archive, err := os.Create(zipLocation) + if err != nil { + return trace.Wrap(err) + } + defer archive.Close() + + zipWriter := zip.NewWriter(archive) + defer zipWriter.Close() + + for _, fullFilename := range files { + baseFilename := filepath.Base(fullFilename) + if err := addFileToZipWriter(fullFilename, baseFilename, zipWriter); err != nil { + return trace.Wrap(err) + } + } + + return nil +} + +func addFileToZipWriter(fullFilename string, baseFilename string, zipWriter *zip.Writer) error { + f, err := os.Open(fullFilename) + if err != nil { + return trace.Wrap(err) + } + defer f.Close() + + zipFileWriter, err := zipWriter.Create(baseFilename) + if err != nil { + return trace.Wrap(err) + } + + if _, err := io.Copy(zipFileWriter, f); err != nil { + return trace.Wrap(err) + } + + return nil +} + +type signCertKeyPairReq struct { + Hostname string + + FormatString string + Format identityfile.Format + + TTLString string + TTL time.Duration +} + +// TODO(marco): only format db is supported +var supportedFormats = []identityfile.Format{ + identityfile.FormatDatabase, +} + +func (s *signCertKeyPairReq) CheckAndSetDefaults() error { + if s.Hostname == "" { + return trace.BadParameter("missing hostname") + } + + if s.FormatString == "" { + return trace.BadParameter("missing format") + } + s.Format = identityfile.Format(s.FormatString) + if !slices.Contains(supportedFormats, s.Format) { + return trace.BadParameter("invalid format") + } + + if s.TTLString == "" { + s.TTLString = apidefaults.CertDuration.String() + } + ttl, err := time.ParseDuration(s.TTLString) + if err != nil { + return trace.BadParameter("invalid ttl (please use https://pkg.go.dev/time#ParseDuration notation)") + } + s.TTL = ttl + + return nil +} From 5505cb5d1d925f1e93d9ad6221ef0e260b316d62 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Tue, 19 Jul 2022 15:26:04 +0100 Subject: [PATCH 02/10] Write and serve archive from memory It also changes the format from zip to tar.gz --- .../identityfile/inmemory_config_writer.go | 133 ++++++++++++++++++ .../inmemory_config_writer_test.go | 61 ++++++++ lib/web/apiserver_test.go | 44 +++--- lib/web/sign.go | 85 +++++------ 4 files changed, 256 insertions(+), 67 deletions(-) create mode 100644 lib/client/identityfile/inmemory_config_writer.go create mode 100644 lib/client/identityfile/inmemory_config_writer_test.go diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go new file mode 100644 index 0000000000000..a6bed540bfe3c --- /dev/null +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -0,0 +1,133 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identityfile + +import ( + "io/fs" + "os" + "sync" + "time" +) + +type InMemoryFileInfo struct { + name string + size int64 + mode fs.FileMode + modTime time.Time + isDir bool + content []byte +} + +// Name returns the file's name +func (fi InMemoryFileInfo) Name() string { + return fi.name +} + +// Size returns the file size (calculated when writting the file) +func (fi InMemoryFileInfo) Size() int64 { + return fi.size +} + +// Mode returns the fs.FileMode +func (fi InMemoryFileInfo) Mode() fs.FileMode { + return fi.mode +} + +// ModTime returns the last modification time +func (fi InMemoryFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir checks whether the file is a directory +func (fi InMemoryFileInfo) IsDir() bool { + return fi.isDir +} + +// Sys is platform independent +// InMemoryFileInfo's implementation is no-op +func (fi InMemoryFileInfo) Sys() interface{} { + return nil +} + +func NewInMemoryConfigWriter() InMemoryConfigWriter { + return InMemoryConfigWriter{ + mux: &sync.RWMutex{}, + files: make(map[string]InMemoryFileInfo), + } +} + +// InMemoryConfigWriter is a basic virtual file system abstraction that writes into memory +// instead of writting to a more presistent storage. +type InMemoryConfigWriter struct { + mux *sync.RWMutex + files map[string]InMemoryFileInfo +} + +// WriteFile writes the given data to path `name` +// It replaces the file if it already exists +func (m InMemoryConfigWriter) WriteFile(name string, data []byte, perm os.FileMode) error { + m.mux.Lock() + defer m.mux.Unlock() + m.files[name] = InMemoryFileInfo{ + name: name, + size: int64(len(data)), + mode: perm, + modTime: time.Now(), + content: data, + isDir: false, + } + + return nil +} + +// Remove the file. +// If the file does not exist, Remove is a no-op +func (m InMemoryConfigWriter) Remove(name string) error { + m.mux.Lock() + defer m.mux.Unlock() + + delete(m.files, name) + return nil +} + +// Stat returns the FileInfo of the given file. +// Returns fs.ErrNotExists if the file is not present +func (m InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { + m.mux.Lock() + defer m.mux.Unlock() + + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f, nil +} + +// Read returns the file contents. +// Returns fs.ErrNotExists if the file is not present +func (m InMemoryConfigWriter) Read(name string) ([]byte, error) { + m.mux.RLock() + defer m.mux.RUnlock() + + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f.content, nil +} diff --git a/lib/client/identityfile/inmemory_config_writer_test.go b/lib/client/identityfile/inmemory_config_writer_test.go new file mode 100644 index 0000000000000..ca3f9735dd2c8 --- /dev/null +++ b/lib/client/identityfile/inmemory_config_writer_test.go @@ -0,0 +1,61 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package identityfile + +import ( + "bytes" + "io/fs" + "os" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestInMemory(t *testing.T) { + virtualFS := NewInMemoryConfigWriter() + + content := bytes.Repeat([]byte("A"), 4000) + filename := "test1" + fileMode := os.FileMode(0644) + fileSize := int64(len(content)) + + err := virtualFS.WriteFile(filename, content, fileMode) + require.NoError(t, err) + + bs, err := virtualFS.Read(filename) + require.NoError(t, err) + require.Equal(t, bs, content) + + fileStat, err := virtualFS.Stat(filename) + require.NoError(t, err) + require.Equal(t, fileStat.Name(), filename) + require.Equal(t, fileStat.Mode(), fileMode) + require.Equal(t, fileStat.Size(), fileSize) + require.False(t, fileStat.IsDir()) + require.WithinDuration(t, fileStat.ModTime(), time.Now(), time.Second) + + err = virtualFS.Remove(filename) + require.NoError(t, err) + + _, err = virtualFS.Read(filename) + require.ErrorIs(t, err, fs.ErrNotExist) + + _, err = virtualFS.Stat(filename) + require.ErrorIs(t, err, fs.ErrNotExist) + +} diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 4012ea2752160..75c94139dada3 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -17,10 +17,11 @@ limitations under the License. package web import ( - "archive/zip" + "archive/tar" "bufio" "bytes" "compress/flate" + "compress/gzip" "context" "crypto/tls" "encoding/base32" @@ -2191,7 +2192,7 @@ func TestSignMTLS(t *testing.T) { clusterName := env.server.ClusterName() proxy := env.proxies[0] - pack := proxy.authPack(t, "test-user@example.com") + pack := proxy.authPack(t, "test-user@example.com", nil) endpoint := pack.clt.Endpoint("webapi", "token") re, err := pack.clt.PostJSON(context.Background(), endpoint, types.ProvisionTokenSpecV2{ @@ -2204,7 +2205,6 @@ func TestSignMTLS(t *testing.T) { require.NoError(t, err) // download mTLS files from /webapi/sites/:site/sign - endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") endpointSignURL, err := url.Parse(endpointSign) require.NoError(t, err) @@ -2219,39 +2219,47 @@ func TestSignMTLS(t *testing.T) { require.NoError(t, err) req.Header.Add("Authorization", "Bearer "+responseToken.ID) - anonHTTPClient := &http.Client{Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - }} + anonHTTPClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } resp, err := anonHTTPClient.Do(req) require.NoError(t, err) defer resp.Body.Close() - require.Equal(t, resp.StatusCode, http.StatusOK) + require.Equal(t, http.StatusOK, resp.StatusCode) - archive, err := io.ReadAll(resp.Body) + gzipReader, err := gzip.NewReader(resp.Body) require.NoError(t, err) - zipReader, err := zip.NewReader(bytes.NewReader(archive), resp.ContentLength) - require.NoError(t, err) - require.Len(t, zipReader.File, 3) + tarReader := tar.NewReader(gzipReader) - zipContentFileNames := []string{} - for _, zipContentFile := range zipReader.File { - zipContentFileNames = append(zipContentFileNames, zipContentFile.Name) + tarContentFileNames := []string{} + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + require.Equal(t, byte(tar.TypeReg), header.Typeflag) + tarContentFileNames = append(tarContentFileNames, header.Name) } expectedFileNames := []string{"server.cas", "server.key", "server.crt"} - require.ElementsMatch(t, zipContentFileNames, expectedFileNames) + require.ElementsMatch(t, tarContentFileNames, expectedFileNames) // the token is no longer valid, so trying again should return an error req, err = http.NewRequest(http.MethodGet, endpointSignURL.String(), nil) require.NoError(t, err) req.Header.Add("Authorization", "Bearer "+responseToken.ID) - resp2nd, err := anonHTTPClient.Do(req) + respSecondCall, err := anonHTTPClient.Do(req) require.NoError(t, err) - defer resp2nd.Body.Close() - require.Equal(t, resp2nd.StatusCode, http.StatusForbidden) + defer respSecondCall.Body.Close() + require.Equal(t, http.StatusForbidden, respSecondCall.StatusCode) } func TestClusterDatabasesGet(t *testing.T) { diff --git a/lib/web/sign.go b/lib/web/sign.go index 63f5c95a5c74c..6ac358c769702 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -17,13 +17,12 @@ limitations under the License. package web import ( - "archive/zip" + "archive/tar" + "bytes" + "compress/gzip" "crypto/x509/pkix" "fmt" - "io" "net/http" - "os" - "path/filepath" "time" "github.com/gravitational/trace" @@ -89,80 +88,68 @@ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p http return nil, trace.Wrap(err) } - outputDir, err := os.MkdirTemp("", "teleport-auth-sign") - if err != nil { - return nil, trace.Wrap(err) - } - defer os.RemoveAll(outputDir) - key.TLSCert = resp.Cert key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + + virtualFS := identityfile.NewInMemoryConfigWriter() + filesWritten, err := identityfile.Write(identityfile.WriteConfig{ - OutputPath: outputDir + "/server", + OutputPath: "server", Key: key, Format: req.Format, OverwriteDestination: true, + Writer: virtualFS, }) if err != nil { return nil, trace.Wrap(err) } - archiveBaseName := fmt.Sprintf("teleport_mTLS_%s.zip", req.Hostname) - archiveFullPath := fmt.Sprintf("%s/%s", outputDir, archiveBaseName) + archiveName := fmt.Sprintf("teleport_mTLS_%s.tar.gz", req.Hostname) - if err := buildArchiveFromFiles(filesWritten, archiveFullPath); err != nil { + archiveBytes, err := archiveFromFiles(filesWritten, virtualFS) + if err != nil { return nil, trace.Wrap(err) } // Set file name - w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment;filename="%v"`, archiveBaseName)) + w.Header().Set("Content-Disposition", fmt.Sprintf(`attachment;filename="%v"`, archiveName)) - // ServeFile sets the correct headers: Content-Type, Content-Length and Accept-Ranges. + // ServeContent sets the correct headers: Content-Type, Content-Length and Accept-Ranges. // It also handles the Range negotiation - http.ServeFile(w, r, archiveFullPath) + http.ServeContent(w, r, archiveName, time.Now(), bytes.NewReader(archiveBytes.Bytes())) return nil, nil } -func buildArchiveFromFiles(files []string, zipLocation string) error { - // We remove the entire directory above. - // No need to remove each file seperatly. - archive, err := os.Create(zipLocation) - if err != nil { - return trace.Wrap(err) - } - defer archive.Close() +// archiveFromFiles builds a Tar Gzip archive in memory, reading the files from the virtual FS +func archiveFromFiles(files []string, virtualFS identityfile.InMemoryConfigWriter) (*bytes.Buffer, error) { + archiveBytes := &bytes.Buffer{} - zipWriter := zip.NewWriter(archive) - defer zipWriter.Close() - - for _, fullFilename := range files { - baseFilename := filepath.Base(fullFilename) - if err := addFileToZipWriter(fullFilename, baseFilename, zipWriter); err != nil { - return trace.Wrap(err) - } - } + gzipWriter := gzip.NewWriter(archiveBytes) + defer gzipWriter.Close() - return nil -} + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() -func addFileToZipWriter(fullFilename string, baseFilename string, zipWriter *zip.Writer) error { - f, err := os.Open(fullFilename) - if err != nil { - return trace.Wrap(err) - } - defer f.Close() + for _, filename := range files { + bs, err := virtualFS.Read(filename) + if err != nil { + return nil, trace.Wrap(err) + } - zipFileWriter, err := zipWriter.Create(baseFilename) - if err != nil { - return trace.Wrap(err) - } + if err := tarWriter.WriteHeader(&tar.Header{ + Name: filename, + Size: int64(len(bs)), + }); err != nil { + return nil, trace.Wrap(err) + } - if _, err := io.Copy(zipFileWriter, f); err != nil { - return trace.Wrap(err) + if _, err := tarWriter.Write(bs); err != nil { + return nil, trace.Wrap(err) + } } - return nil + return archiveBytes, nil } type signCertKeyPairReq struct { From c01e4213a55f4c29cdbf4a71b00bf9cc5df898a2 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Tue, 19 Jul 2022 16:26:48 +0100 Subject: [PATCH 03/10] Change from GET to POST for auth sign requests --- .../identityfile/inmemory_config_writer.go | 4 +- lib/web/apiserver.go | 4 +- lib/web/apiserver_test.go | 23 +++--- lib/web/sign.go | 80 ++++++++++--------- 4 files changed, 61 insertions(+), 50 deletions(-) diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go index a6bed540bfe3c..cb84333340df5 100644 --- a/lib/client/identityfile/inmemory_config_writer.go +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -37,7 +37,7 @@ func (fi InMemoryFileInfo) Name() string { return fi.name } -// Size returns the file size (calculated when writting the file) +// Size returns the file size (calculated when writing the file) func (fi InMemoryFileInfo) Size() int64 { return fi.size } @@ -71,7 +71,7 @@ func NewInMemoryConfigWriter() InMemoryConfigWriter { } // InMemoryConfigWriter is a basic virtual file system abstraction that writes into memory -// instead of writting to a more presistent storage. +// instead of writing to a more persistent storage. type InMemoryConfigWriter struct { mux *sync.RWMutex files map[string]InMemoryFileInfo diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 263856f184ad0..6decc533a18d3 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -366,8 +366,8 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.POST("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) // Sign required files to setup mTLS in other services (eg DBs) - // GET /webapi/sites/:site/sign?hostname=&ttl=&format= - h.GET("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair)) + // POST /webapi/sites/:site/sign + h.POST("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair)) // token generation h.POST("/webapi/token", h.WithAuth(h.createTokenHandle)) diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 75c94139dada3..23c106d3b6244 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -2206,17 +2206,21 @@ func TestSignMTLS(t *testing.T) { // download mTLS files from /webapi/sites/:site/sign endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") - endpointSignURL, err := url.Parse(endpointSign) - require.NoError(t, err) - queryParams := endpointSignURL.Query() - queryParams.Set("hostname", "mypg.example.com") - queryParams.Set("ttl", "2h") - queryParams.Set("format", "db") - endpointSignURL.RawQuery = queryParams.Encode() + bs, err := json.Marshal(struct { + Hostname string `json:"hostname"` + TTL string `json:"ttl"` + Format string `json:"format"` + }{ + Hostname: "mypg.example.com", + TTL: "2h", + Format: "db", + }) + require.NoError(t, err) - req, err := http.NewRequest(http.MethodGet, endpointSignURL.String(), nil) + req, err := http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") req.Header.Add("Authorization", "Bearer "+responseToken.ID) anonHTTPClient := &http.Client{ @@ -2252,8 +2256,9 @@ func TestSignMTLS(t *testing.T) { require.ElementsMatch(t, tarContentFileNames, expectedFileNames) // the token is no longer valid, so trying again should return an error - req, err = http.NewRequest(http.MethodGet, endpointSignURL.String(), nil) + req, err = http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") req.Header.Add("Authorization", "Bearer "+responseToken.ID) respSecondCall, err := anonHTTPClient.Do(req) diff --git a/lib/web/sign.go b/lib/web/sign.go index 6ac358c769702..70a64af7f819b 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -34,29 +34,30 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/reversetunnel" "github.com/gravitational/teleport/lib/tlsca" ) -// signCertKeyPair returns the necessary files to set up mTLS for other services -// URL template: GET /webapi/sites/:site/sign?hostname=&ttl=&format= -// -// As an example, requesting: -// GET /webapi/sites/:site/sign?hostname=pg.example.com&ttl=2190h&format=db -// should be equivalent to running: -// tctl auth sign --host=pg.example.com --ttl=2190h --format=db -// -// This endpoint returns a zip compressed archive containing the required files to setup mTLS for the service. -// As an example, for db format it returns an archive with 3 files: server.cas, server.crt and server.key +/* signCertKeyPair returns the necessary files to set up mTLS for other services +This is the equivalent of running the tctl command +As an example, requesting: +POST /webapi/sites/mycluster/sign +{ + "hostname": "pg.example.com", + "ttl": "2190h", + "format": "db" +} + +Should be equivalent to running: + tctl auth sign --host=pg.example.com --ttl=2190h --format=db + +This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the service. +*/ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) { ctx := r.Context() - - req := signCertKeyPairReq{ - Hostname: r.URL.Query().Get("hostname"), - FormatString: r.URL.Query().Get("format"), - TTLString: r.URL.Query().Get("ttl"), - } - if err := req.CheckAndSetDefaults(); err != nil { + req, err := parseSignCertKeyPair(r) + if err != nil { return nil, trace.Wrap(err) } @@ -154,12 +155,8 @@ func archiveFromFiles(files []string, virtualFS identityfile.InMemoryConfigWrite type signCertKeyPairReq struct { Hostname string - - FormatString string - Format identityfile.Format - - TTLString string - TTL time.Duration + Format identityfile.Format + TTL time.Duration } // TODO(marco): only format db is supported @@ -167,27 +164,36 @@ var supportedFormats = []identityfile.Format{ identityfile.FormatDatabase, } -func (s *signCertKeyPairReq) CheckAndSetDefaults() error { - if s.Hostname == "" { - return trace.BadParameter("missing hostname") +func parseSignCertKeyPair(r *http.Request) (*signCertKeyPairReq, error) { + reqRaw := struct { + Hostname string `json:"hostname,omitempty"` + Format string `json:"format,omitempty"` + TTL string `json:"ttl,omitempty"` + }{} + if err := httplib.ReadJSON(r, &reqRaw); err != nil { + return nil, trace.Wrap(err) } - if s.FormatString == "" { - return trace.BadParameter("missing format") + ret := &signCertKeyPairReq{} + + ret.Hostname = reqRaw.Hostname + + if reqRaw.Format == "" { + return nil, trace.BadParameter("missing format") } - s.Format = identityfile.Format(s.FormatString) - if !slices.Contains(supportedFormats, s.Format) { - return trace.BadParameter("invalid format") + ret.Format = identityfile.Format(reqRaw.Format) + if !slices.Contains(supportedFormats, ret.Format) { + return nil, trace.BadParameter("invalid format") } - if s.TTLString == "" { - s.TTLString = apidefaults.CertDuration.String() + if reqRaw.TTL == "" { + reqRaw.TTL = apidefaults.CertDuration.String() } - ttl, err := time.ParseDuration(s.TTLString) + ttl, err := time.ParseDuration(reqRaw.TTL) if err != nil { - return trace.BadParameter("invalid ttl (please use https://pkg.go.dev/time#ParseDuration notation)") + return nil, trace.BadParameter("invalid ttl (please use https://pkg.go.dev/time#ParseDuration notation)") } - s.TTL = ttl + ret.TTL = ttl - return nil + return ret, nil } From df16f7b1b2185b16d7be27d9095157432a237fb1 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Wed, 20 Jul 2022 10:42:00 +0100 Subject: [PATCH 04/10] set the correct file mode when generating the files --- lib/web/apiserver_test.go | 1 + lib/web/sign.go | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 23c106d3b6244..fddda8dfbac46 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -2249,6 +2249,7 @@ func TestSignMTLS(t *testing.T) { } require.NoError(t, err) require.Equal(t, byte(tar.TypeReg), header.Typeflag) + require.Equal(t, int64(0600), header.Mode) tarContentFileNames = append(tarContentFileNames, header.Name) } diff --git a/lib/web/sign.go b/lib/web/sign.go index 70a64af7f819b..0eacaf4beb11a 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -141,6 +141,12 @@ func archiveFromFiles(files []string, virtualFS identityfile.InMemoryConfigWrite if err := tarWriter.WriteHeader(&tar.Header{ Name: filename, Size: int64(len(bs)), + + // https://www.postgresql.org/docs/current/libpq-ssl.html + // On Unix systems, the permissions on the private key file must disallow any access to world or group; + // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. + // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). + Mode: 0600, }); err != nil { return nil, trace.Wrap(err) } From 9c350cbc9a2ea585600ab72d2c85dc7ad75520b7 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Wed, 20 Jul 2022 11:57:02 +0100 Subject: [PATCH 05/10] change to readers lock when stat'ing a file --- lib/client/identityfile/inmemory_config_writer.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go index cb84333340df5..1c51a370d2eec 100644 --- a/lib/client/identityfile/inmemory_config_writer.go +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -107,8 +107,8 @@ func (m InMemoryConfigWriter) Remove(name string) error { // Stat returns the FileInfo of the given file. // Returns fs.ErrNotExists if the file is not present func (m InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { - m.mux.Lock() - defer m.mux.Unlock() + m.mux.RLock() + defer m.mux.RUnlock() f, found := m.files[name] if !found { From ddd54aa7fcfef657987c94a60f13f5455ca1aebc Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Tue, 26 Jul 2022 16:02:25 +0100 Subject: [PATCH 06/10] roman review part 1 --- lib/auth/auth_with_roles.go | 7 +- .../identityfile/inmemory_config_writer.go | 11 +- .../inmemory_config_writer_test.go | 4 +- lib/srv/mtls.go | 216 ++++++++++++++++++ lib/utils/archive.go | 58 +++++ lib/utils/archive_test.go | 115 ++++++++++ lib/web/apiserver.go | 14 +- lib/web/sign.go | 152 ++++-------- tool/tctl/common/auth_command.go | 166 ++------------ 9 files changed, 463 insertions(+), 280 deletions(-) create mode 100644 lib/srv/mtls.go create mode 100644 lib/utils/archive.go create mode 100644 lib/utils/archive_test.go diff --git a/lib/auth/auth_with_roles.go b/lib/auth/auth_with_roles.go index 1a3b04b03fba0..16debe5ce9ed2 100644 --- a/lib/auth/auth_with_roles.go +++ b/lib/auth/auth_with_roles.go @@ -18,6 +18,7 @@ package auth import ( "context" + "fmt" "net/url" "time" @@ -3707,9 +3708,9 @@ func (a *ServerWithRoles) GenerateDatabaseCert(ctx context.Context, req *proto.D // user that is allowed to impersonate database service. if !a.hasBuiltinRole(types.RoleDatabase, types.RoleAdmin) { if err := a.canImpersonateBuiltinRole(types.RoleDatabase); err != nil { - log.WithError(err).Warnf("User %v tried to generate database certificate but is not allowed to impersonate %q system role.", - a.context.User.GetName(), types.RoleDatabase) - return nil, trace.AccessDenied(`access denied. The user must be able to impersonate the builtin role and user "Db" in order to generate database certificates, for more info see https://goteleport.com/docs/database-access/reference/cli/#tctl-auth-sign.`) + log.WithError(err).Warnf("User %v tried to generate database certificate but does not have '%s' permission for '%s' kind, nor is allowed to impersonate %q system role", + a.context.User.GetName(), types.VerbCreate, types.KindDatabaseCertificate, types.RoleDatabase) + return nil, trace.AccessDenied(fmt.Sprintf("access denied. User must have '%s' permission for '%s' kind to generate the certificate ", types.VerbCreate, types.KindDatabaseCertificate)) } } } diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go index 1c51a370d2eec..c8e420c050906 100644 --- a/lib/client/identityfile/inmemory_config_writer.go +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -21,6 +21,8 @@ import ( "os" "sync" "time" + + "github.com/gravitational/trace" ) type InMemoryFileInfo struct { @@ -118,9 +120,9 @@ func (m InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { return f, nil } -// Read returns the file contents. +// ReadFile returns the file contents. // Returns fs.ErrNotExists if the file is not present -func (m InMemoryConfigWriter) Read(name string) ([]byte, error) { +func (m InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { m.mux.RLock() defer m.mux.RUnlock() @@ -131,3 +133,8 @@ func (m InMemoryConfigWriter) Read(name string) ([]byte, error) { return f.content, nil } + +// Open is not implemented but exists here to satisfy the io/fs.ReadFileFS interface. +func (m InMemoryConfigWriter) Open(name string) (fs.File, error) { + return nil, trace.NotImplemented("Open is not implemented for InMemoryConfigWriter") +} diff --git a/lib/client/identityfile/inmemory_config_writer_test.go b/lib/client/identityfile/inmemory_config_writer_test.go index ca3f9735dd2c8..44ed0786f668b 100644 --- a/lib/client/identityfile/inmemory_config_writer_test.go +++ b/lib/client/identityfile/inmemory_config_writer_test.go @@ -37,7 +37,7 @@ func TestInMemory(t *testing.T) { err := virtualFS.WriteFile(filename, content, fileMode) require.NoError(t, err) - bs, err := virtualFS.Read(filename) + bs, err := virtualFS.ReadFile(filename) require.NoError(t, err) require.Equal(t, bs, content) @@ -52,7 +52,7 @@ func TestInMemory(t *testing.T) { err = virtualFS.Remove(filename) require.NoError(t, err) - _, err = virtualFS.Read(filename) + _, err = virtualFS.ReadFile(filename) require.ErrorIs(t, err, fs.ErrNotExist) _, err = virtualFS.Stat(filename) diff --git a/lib/srv/mtls.go b/lib/srv/mtls.go new file mode 100644 index 0000000000000..7d7f3695587e7 --- /dev/null +++ b/lib/srv/mtls.go @@ -0,0 +1,216 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package srv + +import ( + "context" + "crypto/x509/pkix" + "io" + "strings" + "text/template" + "time" + + "github.com/gravitational/teleport/api/client/proto" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/trace" +) + +type GenerateMTLSFilesRequest struct { + ClusterAPI auth.ClientI + Principals []string + OutputFormat identityfile.Format + OutputCanOverwrite bool + OutputLocation string + IdentityFileWriter identityfile.ConfigWriter + TTL time.Duration + HelperMessageWriter io.Writer +} + +func GenerateMTLSFiles(ctx context.Context, req GenerateMTLSFilesRequest) ([]string, error) { + if req.OutputFormat == identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { + return nil, trace.BadParameter("at least one hostname must be specified") + } + + // For CockroachDB node certificates, CommonName must be "node": + // + // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates + if req.OutputFormat == identityfile.FormatCockroach { + req.Principals = append([]string{"node"}, req.Principals...) + } + + subject := pkix.Name{CommonName: req.Principals[0]} + + if req.OutputFormat == identityfile.FormatMongo { + // Include Organization attribute in MongoDB certificates as well. + // + // When using X.509 member authentication, MongoDB requires O or OU to + // be non-empty so this will make the certs we generate compatible: + // + // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 + // + // The actual O value doesn't matter as long as it matches on all + // MongoDB cluster members so set it to the Teleport cluster name + // to avoid hardcoding anything. + + clusterNameType, err := req.ClusterAPI.GetClusterName() + if err != nil { + return nil, trace.Wrap(err) + } + + subject.Organization = []string{clusterNameType.GetClusterName()} + } + + key, err := client.NewKey() + if err != nil { + return nil, trace.Wrap(err) + } + + csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) + if err != nil { + return nil, trace.Wrap(err) + } + + resp, err := req.ClusterAPI.GenerateDatabaseCert(ctx, + &proto.DatabaseCertRequest{ + CSR: csr, + // Important to include SANs since CommonName has been deprecated + // since Go 1.15: + // https://golang.org/doc/go1.15#commonname + ServerNames: req.Principals, + // Include legacy ServerName for compatibility. + ServerName: req.Principals[0], + TTL: proto.Duration(req.TTL), + RequesterName: proto.DatabaseCertRequest_TCTL, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + key.TLSCert = resp.Cert + key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + filesWritten, err := identityfile.Write(identityfile.WriteConfig{ + OutputPath: req.OutputLocation, + Key: key, + Format: req.OutputFormat, + OverwriteDestination: req.OutputCanOverwrite, + Writer: req.IdentityFileWriter, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + if err := WriteHelperMessageDBmTLS(req.HelperMessageWriter, filesWritten, req.OutputLocation, req.OutputFormat); err != nil { + return nil, trace.Wrap(err) + } + + return filesWritten, nil +} + +var mapIdentityFileFormatHelperTemplate = map[identityfile.Format]*template.Template{ + identityfile.FormatDatabase: dbAuthSignTpl, + identityfile.FormatMongo: mongoAuthSignTpl, + identityfile.FormatCockroach: cockroachAuthSignTpl, + identityfile.FormatRedis: redisAuthSignTpl, + identityfile.FormatSnowflake: snowflakeAuthSignTpl, +} + +func WriteHelperMessageDBmTLS(writer io.Writer, filesWritten []string, output string, outputFormat identityfile.Format) error { + if writer == nil { + return nil + } + + tpl, found := mapIdentityFileFormatHelperTemplate[outputFormat] + if !found { + // This format doesn't have a recommended configuration. + // Consider adding one to ease the installation for the end-user + return nil + } + + tplVars := map[string]interface{}{ + "files": strings.Join(filesWritten, ", "), + "output": output, + } + + if outputFormat == identityfile.FormatSnowflake { + delete(tplVars, "output") + } + + return trace.Wrap(tpl.Execute(writer, tplVars)) +} + +var ( + // dbAuthSignTpl is printed when user generates credentials for a self-hosted database. + dbAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your PostgreSQL server, add the following to its +postgresql.conf configuration file: + +ssl = on +ssl_cert_file = '/path/to/{{.output}}.crt' +ssl_key_file = '/path/to/{{.output}}.key' +ssl_ca_file = '/path/to/{{.output}}.cas' + +To enable mutual TLS on your MySQL server, add the following to its +mysql.cnf configuration file: + +[mysqld] +require_secure_transport=ON +ssl-cert=/path/to/{{.output}}.crt +ssl-key=/path/to/{{.output}}.key +ssl-ca=/path/to/{{.output}}.cas +`)) + // mongoAuthSignTpl is printed when user generates credentials for a MongoDB database. + mongoAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your MongoDB server, add the following to its +mongod.yaml configuration file: + +net: + tls: + mode: requireTLS + certificateKeyFile: /path/to/{{.output}}.crt + CAFile: /path/to/{{.output}}.cas +`)) + cockroachAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your CockroachDB server, point it to the certs +directory using --certs-dir flag: + +cockroach start \ + --certs-dir={{.output}} \ + # other flags... +`)) + + redisAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your Redis server, add the following to your redis.conf: + +tls-ca-cert-file /path/to/{{.output}}.cas +tls-cert-file /path/to/{{.output}}.crt +tls-key-file /path/to/{{.output}}.key +tls-protocols "TLSv1.2 TLSv1.3" +`)) + + snowflakeAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +Please add the generated key to the Snowflake users as described here: +https://docs.snowflake.com/en/user-guide/key-pair-auth.html#step-4-assign-the-public-key-to-a-snowflake-user +`)) +) diff --git a/lib/utils/archive.go b/lib/utils/archive.go new file mode 100644 index 0000000000000..d71f90dd3b9cd --- /dev/null +++ b/lib/utils/archive.go @@ -0,0 +1,58 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "io/fs" + + "github.com/gravitational/trace" +) + +// CompressTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader +func CompressTarGzArchive(files []string, fileReader fs.ReadFileFS, fileMode fs.FileMode) (*bytes.Buffer, error) { + archiveBytes := &bytes.Buffer{} + + gzipWriter := gzip.NewWriter(archiveBytes) + defer gzipWriter.Close() + + tarWriter := tar.NewWriter(gzipWriter) + defer tarWriter.Close() + + for _, filename := range files { + bs, err := fileReader.ReadFile(filename) + if err != nil { + return nil, trace.Wrap(err) + } + + if err := tarWriter.WriteHeader(&tar.Header{ + Name: filename, + Size: int64(len(bs)), + Mode: int64(fileMode), + }); err != nil { + return nil, trace.Wrap(err) + } + + if _, err := tarWriter.Write(bs); err != nil { + return nil, trace.Wrap(err) + } + } + + return archiveBytes, nil +} diff --git a/lib/utils/archive_test.go b/lib/utils/archive_test.go new file mode 100644 index 0000000000000..7aa5863441483 --- /dev/null +++ b/lib/utils/archive_test.go @@ -0,0 +1,115 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "archive/tar" + "compress/gzip" + "io" + "io/fs" + "testing" + + "github.com/gravitational/teleport" + "github.com/gravitational/trace" + "github.com/stretchr/testify/require" +) + +type mockFileReader struct { + files map[string][]byte +} + +func (m mockFileReader) ReadFile(name string) ([]byte, error) { + contents, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return contents, nil +} + +func (m mockFileReader) Open(name string) (fs.File, error) { + return nil, trace.NotImplemented("Open is not implemented") +} + +// CompressAsTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader +func TestCompressAsTarGzArchive(t *testing.T) { + tests := []struct { + name string + fileNames []string + fsContents map[string][]byte + fileMode fs.FileMode + assert require.ErrorAssertionFunc + }{ + { + name: "File Not Exists bubbles up", + fileNames: []string{"not", "found"}, + fsContents: map[string][]byte{}, + fileMode: 0600, + assert: func(t require.TestingT, err error, i ...interface{}) { + require.Error(t, err) + require.ErrorIs(t, err, fs.ErrNotExist) + }, + }, + { + name: "Archive is created", + fileNames: []string{"file1", "file2"}, + fsContents: map[string][]byte{ + "file1": []byte("contentsfile1"), + "file2": []byte("contentsfile2"), + }, + fileMode: teleport.FileMaskOwnerOnly, + assert: require.NoError, + }, + } + + for _, tt := range tests { + fileReader := mockFileReader{ + files: tt.fsContents, + } + bs, err := CompressTarGzArchive(tt.fileNames, fileReader, tt.fileMode) + tt.assert(t, err) + if err != nil { + continue + } + + gzipReader, err := gzip.NewReader(bs) + require.NoError(t, err) + + tarContentFileNames := []string{} + + tarReader := tar.NewReader(gzipReader) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + require.NoError(t, err) + require.Equal(t, byte(tar.TypeReg), header.Typeflag) + require.Equal(t, tt.fileMode, fs.FileMode(header.Mode)) + + tarContentFileNames = append(tarContentFileNames, header.Name) + require.Contains(t, tt.fsContents, header.Name) + + gotBytes, err := io.ReadAll(tarReader) + require.NoError(t, err) + t.Log(string(gotBytes)) + + require.Equal(t, tt.fsContents[header.Name], gotBytes) + } + require.ElementsMatch(t, tarContentFileNames, tt.fileNames) + } +} diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 6decc533a18d3..4b522f0ca0e14 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -2722,19 +2722,9 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler) httprou return nil, trace.AccessDenied("need auth") } - clusterName := p.ByName("site") - if clusterName == currentSiteShortcut { - res, err := h.GetProxyClient().GetClusterName() - if err != nil { - h.log.WithError(err).Warn("Failed to query cluster name.") - return nil, trace.Wrap(err) - } - clusterName = res.GetClusterName() - } - - site, err := h.cfg.Proxy.GetSite(clusterName) + site, err := h.cfg.Proxy.GetSite(h.auth.clusterName) if err != nil { - h.log.WithError(err).WithField("cluster-name", clusterName).Warn("Failed to query site.") + h.log.WithError(err).WithField("cluster-name", h.auth.clusterName).Warn("Failed to query site.") return nil, trace.Wrap(err) } diff --git a/lib/web/sign.go b/lib/web/sign.go index 0eacaf4beb11a..a3e543274ab33 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -17,26 +17,23 @@ limitations under the License. package web import ( - "archive/tar" "bytes" - "compress/gzip" - "crypto/x509/pkix" "fmt" + "io/fs" "net/http" "time" + "github.com/gravitational/teleport" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" "golang.org/x/exp/slices" - "github.com/gravitational/teleport/api/client/proto" apidefaults "github.com/gravitational/teleport/api/defaults" - "github.com/gravitational/teleport/lib/auth" - "github.com/gravitational/teleport/lib/client" "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/reversetunnel" - "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/teleport/lib/srv" + "github.com/gravitational/teleport/lib/utils" ) /* signCertKeyPair returns the necessary files to set up mTLS for other services @@ -55,59 +52,40 @@ Should be equivalent to running: This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the service. */ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) { - ctx := r.Context() - req, err := parseSignCertKeyPair(r) - if err != nil { - return nil, trace.Wrap(err) - } - - key, err := client.NewKey() - if err != nil { + req := &signCertKeyPairReq{} + if err := httplib.ReadJSON(r, &req); err != nil { return nil, trace.Wrap(err) } - subject := pkix.Name{CommonName: req.Hostname} - csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) - if err != nil { - return nil, trace.Wrap(err) - } - - clusterAPI := h.auth.proxyClient - resp, err := clusterAPI.GenerateDatabaseCert( - ctx, - &proto.DatabaseCertRequest{ - CSR: csr, - // Important to include SANs since CommonName has been deprecated - ServerNames: []string{req.Hostname}, - // Include legacy ServerName for compatibility. - ServerName: req.Hostname, - TTL: proto.Duration(req.TTL), - RequesterName: proto.DatabaseCertRequest_TCTL, - }, - ) - if err != nil { + if err := req.CheckAndSetDefaults(); err != nil { return nil, trace.Wrap(err) } - key.TLSCert = resp.Cert - key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} - virtualFS := identityfile.NewInMemoryConfigWriter() - filesWritten, err := identityfile.Write(identityfile.WriteConfig{ - OutputPath: "server", - Key: key, - Format: req.Format, - OverwriteDestination: true, - Writer: virtualFS, - }) + mTLSReq := srv.GenerateMTLSFilesRequest{ + ClusterAPI: h.auth.proxyClient, + Principals: []string{req.Hostname}, + OutputFormat: req.Format, + OutputCanOverwrite: true, + OutputLocation: "server", + IdentityFileWriter: virtualFS, + TTL: req.TTL, + HelperMessageWriter: nil, + } + filesWritten, err := srv.GenerateMTLSFiles(r.Context(), mTLSReq) if err != nil { return nil, trace.Wrap(err) } archiveName := fmt.Sprintf("teleport_mTLS_%s.tar.gz", req.Hostname) - archiveBytes, err := archiveFromFiles(filesWritten, virtualFS) + // https://www.postgresql.org/docs/current/libpq-ssl.html + // On Unix systems, the permissions on the private key file must disallow any access to world or group; + // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. + // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). + fileMode := fs.FileMode(teleport.FileMaskOwnerOnly) // 0600 + archiveBytes, err := utils.CompressTarGzArchive(filesWritten, virtualFS, fileMode) if err != nil { return nil, trace.Wrap(err) } @@ -122,47 +100,12 @@ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p http return nil, nil } -// archiveFromFiles builds a Tar Gzip archive in memory, reading the files from the virtual FS -func archiveFromFiles(files []string, virtualFS identityfile.InMemoryConfigWriter) (*bytes.Buffer, error) { - archiveBytes := &bytes.Buffer{} - - gzipWriter := gzip.NewWriter(archiveBytes) - defer gzipWriter.Close() - - tarWriter := tar.NewWriter(gzipWriter) - defer tarWriter.Close() - - for _, filename := range files { - bs, err := virtualFS.Read(filename) - if err != nil { - return nil, trace.Wrap(err) - } - - if err := tarWriter.WriteHeader(&tar.Header{ - Name: filename, - Size: int64(len(bs)), - - // https://www.postgresql.org/docs/current/libpq-ssl.html - // On Unix systems, the permissions on the private key file must disallow any access to world or group; - // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. - // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). - Mode: 0600, - }); err != nil { - return nil, trace.Wrap(err) - } - - if _, err := tarWriter.Write(bs); err != nil { - return nil, trace.Wrap(err) - } - } - - return archiveBytes, nil -} - type signCertKeyPairReq struct { - Hostname string - Format identityfile.Format - TTL time.Duration + Hostname string `json:"hostname,omitempty"` + FormatRaw string `json:"format,omitempty"` + TTLRaw string `json:"ttl,omitempty"` + Format identityfile.Format + TTL time.Duration } // TODO(marco): only format db is supported @@ -170,36 +113,27 @@ var supportedFormats = []identityfile.Format{ identityfile.FormatDatabase, } -func parseSignCertKeyPair(r *http.Request) (*signCertKeyPairReq, error) { - reqRaw := struct { - Hostname string `json:"hostname,omitempty"` - Format string `json:"format,omitempty"` - TTL string `json:"ttl,omitempty"` - }{} - if err := httplib.ReadJSON(r, &reqRaw); err != nil { - return nil, trace.Wrap(err) +func (s *signCertKeyPairReq) CheckAndSetDefaults() error { + if s.Hostname == "" { + return trace.BadParameter("missing hostname") } - ret := &signCertKeyPairReq{} - - ret.Hostname = reqRaw.Hostname - - if reqRaw.Format == "" { - return nil, trace.BadParameter("missing format") + if s.FormatRaw == "" { + return trace.BadParameter("missing format") } - ret.Format = identityfile.Format(reqRaw.Format) - if !slices.Contains(supportedFormats, ret.Format) { - return nil, trace.BadParameter("invalid format") + s.Format = identityfile.Format(s.FormatRaw) + if !slices.Contains(supportedFormats, s.Format) { + return trace.BadParameter("provided format '%s' is not valid, supported formats are: %q", s.Format, supportedFormats) } - if reqRaw.TTL == "" { - reqRaw.TTL = apidefaults.CertDuration.String() + if s.TTLRaw == "" { + s.TTLRaw = apidefaults.CertDuration.String() } - ttl, err := time.ParseDuration(reqRaw.TTL) + ttl, err := time.ParseDuration(s.TTLRaw) if err != nil { - return nil, trace.BadParameter("invalid ttl (please use https://pkg.go.dev/time#ParseDuration notation)") + return trace.BadParameter("invalid ttl '%s', use https://pkg.go.dev/time#ParseDuration format (example: 2190h)", s.TTLRaw) } - ret.TTL = ttl + s.TTL = ttl - return ret, nil + return nil } diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index a9a9e4fa7a6a6..c6cf13be335e4 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -16,7 +16,6 @@ package common import ( "context" - "crypto/x509/pkix" "encoding/pem" "fmt" "net" @@ -24,7 +23,6 @@ import ( "os" "strconv" "strings" - "text/template" "time" "github.com/gravitational/teleport/api/client/proto" @@ -38,8 +36,8 @@ import ( kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/services" + "github.com/gravitational/teleport/lib/srv" "github.com/gravitational/teleport/lib/sshutils" - "github.com/gravitational/teleport/lib/tlsca" "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/kingpin" @@ -372,11 +370,9 @@ func (a *AuthCommand) generateSnowflakeKey(ctx context.Context, clusterAPI auth. return trace.Wrap(err) } - err = snowflakeAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - }) - - return trace.Wrap(err) + return trace.Wrap( + srv.WriteHelperMessageDBmTLS(os.Stdout, filesWritten, "", a.outputFormat), + ) } // RotateCertAuthority starts or restarts certificate authority rotation process @@ -469,154 +465,20 @@ func (a *AuthCommand) generateDatabaseKeys(ctx context.Context, clusterAPI auth. // for database access. func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI auth.ClientI, key *client.Key) error { principals := strings.Split(a.genHost, ",") - if a.outputFormat != identityfile.FormatSnowflake && len(principals) == 1 && principals[0] == "" { - return trace.BadParameter("at least one hostname must be specified via --host flag") - } - // For CockroachDB node certificates, CommonName must be "node": - // - // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates - if a.outputFormat == identityfile.FormatCockroach { - principals = append([]string{"node"}, principals...) - } - subject := pkix.Name{CommonName: principals[0]} - if a.outputFormat == identityfile.FormatMongo { - // Include Organization attribute in MongoDB certificates as well. - // - // When using X.509 member authentication, MongoDB requires O or OU to - // be non-empty so this will make the certs we generate compatible: - // - // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 - // - // The actual O value doesn't matter as long as it matches on all - // MongoDB cluster members so set it to the Teleport cluster name - // to avoid hardcoding anything. - clusterName, err := clusterAPI.GetClusterName() - if err != nil { - return trace.Wrap(err) - } - subject.Organization = []string{ - clusterName.GetClusterName(), - } - } - csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) - if err != nil { - return trace.Wrap(err) - } - resp, err := clusterAPI.GenerateDatabaseCert(ctx, - &proto.DatabaseCertRequest{ - CSR: csr, - // Important to include SANs since CommonName has been deprecated - // since Go 1.15: - // https://golang.org/doc/go1.15#commonname - ServerNames: principals, - // Include legacy ServerName for compatibility. - ServerName: principals[0], - TTL: proto.Duration(a.genTTL), - RequesterName: proto.DatabaseCertRequest_TCTL, - }) - if err != nil { - return trace.Wrap(err) - } - key.TLSCert = resp.Cert - key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} - filesWritten, err := identityfile.Write(identityfile.WriteConfig{ - OutputPath: a.output, - Key: key, - Format: a.outputFormat, - OverwriteDestination: a.signOverwrite, - }) - if err != nil { - return trace.Wrap(err) - } - switch a.outputFormat { - case identityfile.FormatDatabase: - err = dbAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatMongo: - err = mongoAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatCockroach: - err = cockroachAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatRedis: - err = redisAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": a.output, - }) - case identityfile.FormatSnowflake: - err = snowflakeAuthSignTpl.Execute(os.Stdout, map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - }) + + genMTLSReq := srv.GenerateMTLSFilesRequest{ + ClusterAPI: clusterAPI, + Principals: principals, + OutputFormat: a.outputFormat, + OutputCanOverwrite: a.signOverwrite, + OutputLocation: a.output, + TTL: a.genTTL, + HelperMessageWriter: os.Stdout, } + _, err := srv.GenerateMTLSFiles(ctx, genMTLSReq) return trace.Wrap(err) } -var ( - // dbAuthSignTpl is printed when user generates credentials for a self-hosted database. - dbAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your PostgreSQL server, add the following to its -postgresql.conf configuration file: - -ssl = on -ssl_cert_file = '/path/to/{{.output}}.crt' -ssl_key_file = '/path/to/{{.output}}.key' -ssl_ca_file = '/path/to/{{.output}}.cas' - -To enable mutual TLS on your MySQL server, add the following to its -mysql.cnf configuration file: - -[mysqld] -require_secure_transport=ON -ssl-cert=/path/to/{{.output}}.crt -ssl-key=/path/to/{{.output}}.key -ssl-ca=/path/to/{{.output}}.cas -`)) - // mongoAuthSignTpl is printed when user generates credentials for a MongoDB database. - mongoAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your MongoDB server, add the following to its -mongod.yaml configuration file: - -net: - tls: - mode: requireTLS - certificateKeyFile: /path/to/{{.output}}.crt - CAFile: /path/to/{{.output}}.cas -`)) - cockroachAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your CockroachDB server, point it to the certs -directory using --certs-dir flag: - -cockroach start \ - --certs-dir={{.output}} \ - # other flags... -`)) - - redisAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your Redis server, add the following to your redis.conf: - -tls-ca-cert-file /path/to/{{.output}}.cas -tls-cert-file /path/to/{{.output}}.crt -tls-key-file /path/to/{{.output}}.key -tls-protocols "TLSv1.2 TLSv1.3" -`)) - - snowflakeAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -Please add the generated key to the Snowflake users as described here: -https://docs.snowflake.com/en/user-guide/key-pair-auth.html#step-4-assign-the-public-key-to-a-snowflake-user -`)) -) - func (a *AuthCommand) generateUserKeys(ctx context.Context, clusterAPI auth.ClientI) error { // Validate --proxy flag. if err := a.checkProxyAddr(clusterAPI); err != nil { From edb7830aa1361a26f5778c1a6918420e165f0ca5 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Wed, 27 Jul 2022 08:05:55 +0100 Subject: [PATCH 07/10] extract generation of mtls files --- lib/srv/mtls.go | 20 ++++++++++++-------- tool/tctl/common/auth_command.go | 1 + 2 files changed, 13 insertions(+), 8 deletions(-) diff --git a/lib/srv/mtls.go b/lib/srv/mtls.go index 7d7f3695587e7..3037f162f44db 100644 --- a/lib/srv/mtls.go +++ b/lib/srv/mtls.go @@ -40,11 +40,12 @@ type GenerateMTLSFilesRequest struct { OutputLocation string IdentityFileWriter identityfile.ConfigWriter TTL time.Duration + Key *client.Key HelperMessageWriter io.Writer } func GenerateMTLSFiles(ctx context.Context, req GenerateMTLSFilesRequest) ([]string, error) { - if req.OutputFormat == identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { + if req.OutputFormat != identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { return nil, trace.BadParameter("at least one hostname must be specified") } @@ -77,12 +78,15 @@ func GenerateMTLSFiles(ctx context.Context, req GenerateMTLSFilesRequest) ([]str subject.Organization = []string{clusterNameType.GetClusterName()} } - key, err := client.NewKey() - if err != nil { - return nil, trace.Wrap(err) + if req.Key == nil { + key, err := client.NewKey() + if err != nil { + return nil, trace.Wrap(err) + } + req.Key = key } - csr, err := tlsca.GenerateCertificateRequestPEM(subject, key.Priv) + csr, err := tlsca.GenerateCertificateRequestPEM(subject, req.Key.Priv) if err != nil { return nil, trace.Wrap(err) } @@ -103,11 +107,11 @@ func GenerateMTLSFiles(ctx context.Context, req GenerateMTLSFilesRequest) ([]str return nil, trace.Wrap(err) } - key.TLSCert = resp.Cert - key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + req.Key.TLSCert = resp.Cert + req.Key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} filesWritten, err := identityfile.Write(identityfile.WriteConfig{ OutputPath: req.OutputLocation, - Key: key, + Key: req.Key, Format: req.OutputFormat, OverwriteDestination: req.OutputCanOverwrite, Writer: req.IdentityFileWriter, diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index c6cf13be335e4..9f34fb7ec9474 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -473,6 +473,7 @@ func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI OutputCanOverwrite: a.signOverwrite, OutputLocation: a.output, TTL: a.genTTL, + Key: key, HelperMessageWriter: os.Stdout, } _, err := srv.GenerateMTLSFiles(ctx, genMTLSReq) From 9a34f5fa5d0910ded8f46c04605bab2fe077d13a Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Wed, 27 Jul 2022 17:53:13 +0100 Subject: [PATCH 08/10] validate token type and role --- lib/web/apiserver.go | 12 ++++-- lib/web/apiserver_test.go | 81 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 4 deletions(-) diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 4b522f0ca0e14..b3c9d77f5e406 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -367,7 +367,7 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { // Sign required files to setup mTLS in other services (eg DBs) // POST /webapi/sites/:site/sign - h.POST("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair)) + h.POST("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair, types.RoleDatabase)) // token generation h.POST("/webapi/token", h.WithAuth(h.createTokenHandle)) @@ -2706,7 +2706,7 @@ type ProvisionTokenAuthedHandler func(w http.ResponseWriter, r *http.Request, p // WithProvisionTokenAuth ensures that request is authenticated with a provision token. // Provision tokens, when used like this are invalidated as soon as used. // Doesn't matter if the underlying response was a success or an error. -func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler) httprouter.Handle { +func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler, requiredRole types.SystemRole) httprouter.Handle { return httplib.MakeHandler(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { ctx := r.Context() logger := h.log.WithField("request", fmt.Sprintf("%v %v", r.Method, r.URL.Path)) @@ -2717,7 +2717,7 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler) httprou return nil, trace.AccessDenied("need auth") } - if err := h.consumeTokenForAPICall(ctx, creds.Password); err != nil { + if err := h.consumeTokenForAPICall(ctx, creds.Password, requiredRole); err != nil { h.log.WithError(err).Warn("Failed to authenticate.") return nil, trace.AccessDenied("need auth") } @@ -2732,12 +2732,16 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler) httprou }) } -func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) error { +func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string, requiredRole types.SystemRole) error { token, err := h.GetProxyClient().GetToken(ctx, tokenName) if err != nil { return trace.Wrap(err) } + if !token.GetRoles().Include(requiredRole) { + return trace.AccessDenied("invalid auth") + } + if err := h.GetProxyClient().DeleteToken(ctx, token.GetName()); err != nil { return trace.Wrap(err) } diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index fddda8dfbac46..8095b8e03b5f2 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -2268,6 +2268,87 @@ func TestSignMTLS(t *testing.T) { require.Equal(t, http.StatusForbidden, respSecondCall.StatusCode) } +func TestSignMTLS_failsAccessDenied(t *testing.T) { + env := newWebPack(t, 1) + clusterName := env.server.ClusterName() + username := "test-user@example.com" + + roleUserUpdate, err := types.NewRole(services.RoleNameForUser(username), types.RoleSpecV5{ + Allow: types.RoleConditions{ + Rules: []types.Rule{ + types.NewRule(types.KindUser, []string{types.VerbUpdate}), + types.NewRule(types.KindToken, []string{types.VerbCreate}), + }, + }, + }) + require.NoError(t, err) + + proxy := env.proxies[0] + pack := proxy.authPack(t, username, []types.Role{roleUserUpdate}) + + endpoint := pack.clt.Endpoint("webapi", "token") + re, err := pack.clt.PostJSON(context.Background(), endpoint, types.ProvisionTokenSpecV2{ + Roles: types.SystemRoles{types.RoleProxy}, + }) + require.NoError(t, err) + + var responseToken nodeJoinToken + err = json.Unmarshal(re.Bytes(), &responseToken) + require.NoError(t, err) + + // download mTLS files from /webapi/sites/:site/sign + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") + + bs, err := json.Marshal(struct { + Hostname string `json:"hostname"` + TTL string `json:"ttl"` + Format string `json:"format"` + }{ + Hostname: "mypg.example.com", + TTL: "2h", + Format: "db", + }) + require.NoError(t, err) + + req, err := http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Authorization", "Bearer "+responseToken.ID) + + anonHTTPClient := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + resp, err := anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + // It fails because we passed a Provision Token with the wrong Role: Proxy + require.Equal(t, http.StatusForbidden, resp.StatusCode) + + // using a user token also returns Forbidden + endpointResetToken := pack.clt.Endpoint("webapi", "users", "password", "token") + _, err = pack.clt.PostJSON(context.Background(), endpointResetToken, auth.CreateUserTokenRequest{ + Name: username, + TTL: time.Minute, + Type: auth.UserTokenTypeResetPassword, + }) + require.NoError(t, err) + + req, err = http.NewRequest(http.MethodPost, endpointSign, bytes.NewReader(bs)) + require.NoError(t, err) + + resp, err = anonHTTPClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + require.Equal(t, http.StatusForbidden, resp.StatusCode) +} + func TestClusterDatabasesGet(t *testing.T) { env := newWebPack(t, 1) From 4a0b08502e01602c639cbe11e2a686ac4200fb3e Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Fri, 29 Jul 2022 12:59:00 +0100 Subject: [PATCH 09/10] improve docs and code location --- api/identityfile/identityfile.go | 8 + lib/client/db/database_certificates.go | 123 ++++++++++ .../identityfile/inmemory_config_writer.go | 72 ++---- lib/services/provisioning.go | 1 + lib/srv/mtls.go | 220 ------------------ lib/utils/archive.go | 18 +- lib/utils/archive_test.go | 37 +-- lib/utils/inmemory_fs.go | 78 +++++++ lib/web/apiserver.go | 36 +-- lib/web/apiserver_test.go | 10 +- lib/web/sign.go | 78 +++---- tool/tctl/common/auth_command.go | 125 ++++++++-- 12 files changed, 429 insertions(+), 377 deletions(-) create mode 100644 lib/client/db/database_certificates.go delete mode 100644 lib/srv/mtls.go create mode 100644 lib/utils/inmemory_fs.go diff --git a/api/identityfile/identityfile.go b/api/identityfile/identityfile.go index d69d045830684..00c921af849fb 100644 --- a/api/identityfile/identityfile.go +++ b/api/identityfile/identityfile.go @@ -36,6 +36,14 @@ import ( const ( // FilePermissions defines file permissions for identity files. + // + // Specifically, for postgres, this must be 0600 or 0640 (choosing 0600 as it's more restrictive) + // https://www.postgresql.org/docs/current/libpq-ssl.html + // On Unix systems, the permissions on the private key file must disallow any access to world or group; + // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. + // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). + // + // Other services should accept 0600 as well, if not, we must change the Write function (in `lib/client/identityfile/identity.go`) FilePermissions = 0600 ) diff --git a/lib/client/db/database_certificates.go b/lib/client/db/database_certificates.go new file mode 100644 index 0000000000000..a9319e9b6c6d3 --- /dev/null +++ b/lib/client/db/database_certificates.go @@ -0,0 +1,123 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package db + +import ( + "context" + "crypto/x509/pkix" + "time" + + "github.com/gravitational/teleport/api/client/proto" + "github.com/gravitational/teleport/lib/auth" + "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/identityfile" + "github.com/gravitational/teleport/lib/tlsca" + "github.com/gravitational/trace" +) + +// GenerateDatabaseCertificatesRequest contains the required fields used to generate database certificates +// Those certificates will be used by databases to set up mTLS authentication against Teleport +type GenerateDatabaseCertificatesRequest struct { + ClusterAPI auth.ClientI + Principals []string + OutputFormat identityfile.Format + OutputCanOverwrite bool + OutputLocation string + IdentityFileWriter identityfile.ConfigWriter + TTL time.Duration + Key *client.Key +} + +// GenerateDatabaseCertificates to be used by databases to set up mTLS authentication +func GenerateDatabaseCertificates(ctx context.Context, req GenerateDatabaseCertificatesRequest) ([]string, error) { + if req.OutputFormat != identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { + return nil, trace.BadParameter("at least one hostname must be specified") + } + + // For CockroachDB node certificates, CommonName must be "node": + // + // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates + if req.OutputFormat == identityfile.FormatCockroach { + req.Principals = append([]string{"node"}, req.Principals...) + } + + subject := pkix.Name{CommonName: req.Principals[0]} + + if req.OutputFormat == identityfile.FormatMongo { + // Include Organization attribute in MongoDB certificates as well. + // + // When using X.509 member authentication, MongoDB requires O or OU to + // be non-empty so this will make the certs we generate compatible: + // + // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 + // + // The actual O value doesn't matter as long as it matches on all + // MongoDB cluster members so set it to the Teleport cluster name + // to avoid hardcoding anything. + + clusterNameType, err := req.ClusterAPI.GetClusterName() + if err != nil { + return nil, trace.Wrap(err) + } + + subject.Organization = []string{clusterNameType.GetClusterName()} + } + + if req.Key == nil { + key, err := client.NewKey() + if err != nil { + return nil, trace.Wrap(err) + } + req.Key = key + } + + csr, err := tlsca.GenerateCertificateRequestPEM(subject, req.Key.Priv) + if err != nil { + return nil, trace.Wrap(err) + } + + resp, err := req.ClusterAPI.GenerateDatabaseCert(ctx, + &proto.DatabaseCertRequest{ + CSR: csr, + // Important to include SANs since CommonName has been deprecated + // since Go 1.15: + // https://golang.org/doc/go1.15#commonname + ServerNames: req.Principals, + // Include legacy ServerName for compatibility. + ServerName: req.Principals[0], + TTL: proto.Duration(req.TTL), + RequesterName: proto.DatabaseCertRequest_TCTL, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + req.Key.TLSCert = resp.Cert + req.Key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} + filesWritten, err := identityfile.Write(identityfile.WriteConfig{ + OutputPath: req.OutputLocation, + Key: req.Key, + Format: req.OutputFormat, + OverwriteDestination: req.OutputCanOverwrite, + Writer: req.IdentityFileWriter, + }) + if err != nil { + return nil, trace.Wrap(err) + } + + return filesWritten, nil +} diff --git a/lib/client/identityfile/inmemory_config_writer.go b/lib/client/identityfile/inmemory_config_writer.go index c8e420c050906..0f6c2faa04c58 100644 --- a/lib/client/identityfile/inmemory_config_writer.go +++ b/lib/client/identityfile/inmemory_config_writer.go @@ -22,53 +22,16 @@ import ( "sync" "time" + "github.com/gravitational/teleport/lib/utils" "github.com/gravitational/trace" ) -type InMemoryFileInfo struct { - name string - size int64 - mode fs.FileMode - modTime time.Time - isDir bool - content []byte -} - -// Name returns the file's name -func (fi InMemoryFileInfo) Name() string { - return fi.name -} - -// Size returns the file size (calculated when writing the file) -func (fi InMemoryFileInfo) Size() int64 { - return fi.size -} - -// Mode returns the fs.FileMode -func (fi InMemoryFileInfo) Mode() fs.FileMode { - return fi.mode -} - -// ModTime returns the last modification time -func (fi InMemoryFileInfo) ModTime() time.Time { - return fi.modTime -} - -// IsDir checks whether the file is a directory -func (fi InMemoryFileInfo) IsDir() bool { - return fi.isDir -} - -// Sys is platform independent -// InMemoryFileInfo's implementation is no-op -func (fi InMemoryFileInfo) Sys() interface{} { - return nil -} - -func NewInMemoryConfigWriter() InMemoryConfigWriter { - return InMemoryConfigWriter{ +// NewInMemoryConfigWriter creates a new virtual file system +// It stores the files contents and their properties in memory +func NewInMemoryConfigWriter() *InMemoryConfigWriter { + return &InMemoryConfigWriter{ mux: &sync.RWMutex{}, - files: make(map[string]InMemoryFileInfo), + files: make(map[string]*utils.InMemoryFile), } } @@ -76,29 +39,22 @@ func NewInMemoryConfigWriter() InMemoryConfigWriter { // instead of writing to a more persistent storage. type InMemoryConfigWriter struct { mux *sync.RWMutex - files map[string]InMemoryFileInfo + files map[string]*utils.InMemoryFile } // WriteFile writes the given data to path `name` // It replaces the file if it already exists -func (m InMemoryConfigWriter) WriteFile(name string, data []byte, perm os.FileMode) error { +func (m *InMemoryConfigWriter) WriteFile(name string, data []byte, perm os.FileMode) error { m.mux.Lock() defer m.mux.Unlock() - m.files[name] = InMemoryFileInfo{ - name: name, - size: int64(len(data)), - mode: perm, - modTime: time.Now(), - content: data, - isDir: false, - } + m.files[name] = utils.NewInMemoryFile(name, perm, time.Now(), data) return nil } // Remove the file. // If the file does not exist, Remove is a no-op -func (m InMemoryConfigWriter) Remove(name string) error { +func (m *InMemoryConfigWriter) Remove(name string) error { m.mux.Lock() defer m.mux.Unlock() @@ -108,7 +64,7 @@ func (m InMemoryConfigWriter) Remove(name string) error { // Stat returns the FileInfo of the given file. // Returns fs.ErrNotExists if the file is not present -func (m InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { +func (m *InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { m.mux.RLock() defer m.mux.RUnlock() @@ -122,7 +78,7 @@ func (m InMemoryConfigWriter) Stat(name string) (fs.FileInfo, error) { // ReadFile returns the file contents. // Returns fs.ErrNotExists if the file is not present -func (m InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { +func (m *InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { m.mux.RLock() defer m.mux.RUnlock() @@ -131,10 +87,10 @@ func (m InMemoryConfigWriter) ReadFile(name string) ([]byte, error) { return nil, fs.ErrNotExist } - return f.content, nil + return f.Content(), nil } // Open is not implemented but exists here to satisfy the io/fs.ReadFileFS interface. -func (m InMemoryConfigWriter) Open(name string) (fs.File, error) { +func (m *InMemoryConfigWriter) Open(name string) (fs.File, error) { return nil, trace.NotImplemented("Open is not implemented for InMemoryConfigWriter") } diff --git a/lib/services/provisioning.go b/lib/services/provisioning.go index 6b9b84c5fd00d..3c3ba5eee28e7 100644 --- a/lib/services/provisioning.go +++ b/lib/services/provisioning.go @@ -38,6 +38,7 @@ type Provisioner interface { GetToken(ctx context.Context, token string) (types.ProvisionToken, error) // DeleteToken deletes provisioning token + // Imlementations must guarantee that this returns trace.NotFound error is the token doesn't exist DeleteToken(ctx context.Context, token string) error // DeleteAllTokens deletes all provisioning tokens diff --git a/lib/srv/mtls.go b/lib/srv/mtls.go deleted file mode 100644 index 3037f162f44db..0000000000000 --- a/lib/srv/mtls.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Copyright 2022 Gravitational, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package srv - -import ( - "context" - "crypto/x509/pkix" - "io" - "strings" - "text/template" - "time" - - "github.com/gravitational/teleport/api/client/proto" - "github.com/gravitational/teleport/lib/auth" - "github.com/gravitational/teleport/lib/client" - "github.com/gravitational/teleport/lib/client/identityfile" - "github.com/gravitational/teleport/lib/tlsca" - "github.com/gravitational/trace" -) - -type GenerateMTLSFilesRequest struct { - ClusterAPI auth.ClientI - Principals []string - OutputFormat identityfile.Format - OutputCanOverwrite bool - OutputLocation string - IdentityFileWriter identityfile.ConfigWriter - TTL time.Duration - Key *client.Key - HelperMessageWriter io.Writer -} - -func GenerateMTLSFiles(ctx context.Context, req GenerateMTLSFilesRequest) ([]string, error) { - if req.OutputFormat != identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { - return nil, trace.BadParameter("at least one hostname must be specified") - } - - // For CockroachDB node certificates, CommonName must be "node": - // - // https://www.cockroachlabs.com/docs/v21.1/cockroach-cert#node-key-and-certificates - if req.OutputFormat == identityfile.FormatCockroach { - req.Principals = append([]string{"node"}, req.Principals...) - } - - subject := pkix.Name{CommonName: req.Principals[0]} - - if req.OutputFormat == identityfile.FormatMongo { - // Include Organization attribute in MongoDB certificates as well. - // - // When using X.509 member authentication, MongoDB requires O or OU to - // be non-empty so this will make the certs we generate compatible: - // - // https://docs.mongodb.com/manual/core/security-internal-authentication/#x.509 - // - // The actual O value doesn't matter as long as it matches on all - // MongoDB cluster members so set it to the Teleport cluster name - // to avoid hardcoding anything. - - clusterNameType, err := req.ClusterAPI.GetClusterName() - if err != nil { - return nil, trace.Wrap(err) - } - - subject.Organization = []string{clusterNameType.GetClusterName()} - } - - if req.Key == nil { - key, err := client.NewKey() - if err != nil { - return nil, trace.Wrap(err) - } - req.Key = key - } - - csr, err := tlsca.GenerateCertificateRequestPEM(subject, req.Key.Priv) - if err != nil { - return nil, trace.Wrap(err) - } - - resp, err := req.ClusterAPI.GenerateDatabaseCert(ctx, - &proto.DatabaseCertRequest{ - CSR: csr, - // Important to include SANs since CommonName has been deprecated - // since Go 1.15: - // https://golang.org/doc/go1.15#commonname - ServerNames: req.Principals, - // Include legacy ServerName for compatibility. - ServerName: req.Principals[0], - TTL: proto.Duration(req.TTL), - RequesterName: proto.DatabaseCertRequest_TCTL, - }) - if err != nil { - return nil, trace.Wrap(err) - } - - req.Key.TLSCert = resp.Cert - req.Key.TrustedCA = []auth.TrustedCerts{{TLSCertificates: resp.CACerts}} - filesWritten, err := identityfile.Write(identityfile.WriteConfig{ - OutputPath: req.OutputLocation, - Key: req.Key, - Format: req.OutputFormat, - OverwriteDestination: req.OutputCanOverwrite, - Writer: req.IdentityFileWriter, - }) - if err != nil { - return nil, trace.Wrap(err) - } - - if err := WriteHelperMessageDBmTLS(req.HelperMessageWriter, filesWritten, req.OutputLocation, req.OutputFormat); err != nil { - return nil, trace.Wrap(err) - } - - return filesWritten, nil -} - -var mapIdentityFileFormatHelperTemplate = map[identityfile.Format]*template.Template{ - identityfile.FormatDatabase: dbAuthSignTpl, - identityfile.FormatMongo: mongoAuthSignTpl, - identityfile.FormatCockroach: cockroachAuthSignTpl, - identityfile.FormatRedis: redisAuthSignTpl, - identityfile.FormatSnowflake: snowflakeAuthSignTpl, -} - -func WriteHelperMessageDBmTLS(writer io.Writer, filesWritten []string, output string, outputFormat identityfile.Format) error { - if writer == nil { - return nil - } - - tpl, found := mapIdentityFileFormatHelperTemplate[outputFormat] - if !found { - // This format doesn't have a recommended configuration. - // Consider adding one to ease the installation for the end-user - return nil - } - - tplVars := map[string]interface{}{ - "files": strings.Join(filesWritten, ", "), - "output": output, - } - - if outputFormat == identityfile.FormatSnowflake { - delete(tplVars, "output") - } - - return trace.Wrap(tpl.Execute(writer, tplVars)) -} - -var ( - // dbAuthSignTpl is printed when user generates credentials for a self-hosted database. - dbAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your PostgreSQL server, add the following to its -postgresql.conf configuration file: - -ssl = on -ssl_cert_file = '/path/to/{{.output}}.crt' -ssl_key_file = '/path/to/{{.output}}.key' -ssl_ca_file = '/path/to/{{.output}}.cas' - -To enable mutual TLS on your MySQL server, add the following to its -mysql.cnf configuration file: - -[mysqld] -require_secure_transport=ON -ssl-cert=/path/to/{{.output}}.crt -ssl-key=/path/to/{{.output}}.key -ssl-ca=/path/to/{{.output}}.cas -`)) - // mongoAuthSignTpl is printed when user generates credentials for a MongoDB database. - mongoAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your MongoDB server, add the following to its -mongod.yaml configuration file: - -net: - tls: - mode: requireTLS - certificateKeyFile: /path/to/{{.output}}.crt - CAFile: /path/to/{{.output}}.cas -`)) - cockroachAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your CockroachDB server, point it to the certs -directory using --certs-dir flag: - -cockroach start \ - --certs-dir={{.output}} \ - # other flags... -`)) - - redisAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -To enable mutual TLS on your Redis server, add the following to your redis.conf: - -tls-ca-cert-file /path/to/{{.output}}.cas -tls-cert-file /path/to/{{.output}}.crt -tls-key-file /path/to/{{.output}}.key -tls-protocols "TLSv1.2 TLSv1.3" -`)) - - snowflakeAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. - -Please add the generated key to the Snowflake users as described here: -https://docs.snowflake.com/en/user-guide/key-pair-auth.html#step-4-assign-the-public-key-to-a-snowflake-user -`)) -) diff --git a/lib/utils/archive.go b/lib/utils/archive.go index d71f90dd3b9cd..001dcca479c82 100644 --- a/lib/utils/archive.go +++ b/lib/utils/archive.go @@ -25,8 +25,17 @@ import ( "github.com/gravitational/trace" ) +// ReadStatFS combines two interfaces: fs.ReadFileFS and fs.StatFS +// We need both when creating the archive to be able to: +// - read file contents - `ReadFile` provided by fs.ReadFileFS +// - set the correct file permissions - `Stat() ... Mode()` provided by fs.StatFS +type ReadStatFS interface { + fs.ReadFileFS + fs.StatFS +} + // CompressTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader -func CompressTarGzArchive(files []string, fileReader fs.ReadFileFS, fileMode fs.FileMode) (*bytes.Buffer, error) { +func CompressTarGzArchive(files []string, fileReader ReadStatFS) (*bytes.Buffer, error) { archiveBytes := &bytes.Buffer{} gzipWriter := gzip.NewWriter(archiveBytes) @@ -41,10 +50,15 @@ func CompressTarGzArchive(files []string, fileReader fs.ReadFileFS, fileMode fs. return nil, trace.Wrap(err) } + fileStat, err := fileReader.Stat(filename) + if err != nil { + return nil, trace.Wrap(err) + } + if err := tarWriter.WriteHeader(&tar.Header{ Name: filename, Size: int64(len(bs)), - Mode: int64(fileMode), + Mode: int64(fileStat.Mode()), }); err != nil { return nil, trace.Wrap(err) } diff --git a/lib/utils/archive_test.go b/lib/utils/archive_test.go index 7aa5863441483..f9b6babc736e8 100644 --- a/lib/utils/archive_test.go +++ b/lib/utils/archive_test.go @@ -22,6 +22,7 @@ import ( "io" "io/fs" "testing" + "time" "github.com/gravitational/teleport" "github.com/gravitational/trace" @@ -29,36 +30,43 @@ import ( ) type mockFileReader struct { - files map[string][]byte + files map[string]*InMemoryFile } func (m mockFileReader) ReadFile(name string) ([]byte, error) { - contents, found := m.files[name] + f, found := m.files[name] if !found { return nil, fs.ErrNotExist } - return contents, nil + return f.Content(), nil } func (m mockFileReader) Open(name string) (fs.File, error) { return nil, trace.NotImplemented("Open is not implemented") } +func (m mockFileReader) Stat(name string) (fs.FileInfo, error) { + f, found := m.files[name] + if !found { + return nil, fs.ErrNotExist + } + + return f, nil +} + // CompressAsTarGzArchive creates a Tar Gzip archive in memory, reading the files using the provided file reader func TestCompressAsTarGzArchive(t *testing.T) { tests := []struct { name string fileNames []string - fsContents map[string][]byte - fileMode fs.FileMode + fsContents map[string]*InMemoryFile assert require.ErrorAssertionFunc }{ { name: "File Not Exists bubbles up", fileNames: []string{"not", "found"}, - fsContents: map[string][]byte{}, - fileMode: 0600, + fsContents: map[string]*InMemoryFile{}, assert: func(t require.TestingT, err error, i ...interface{}) { require.Error(t, err) require.ErrorIs(t, err, fs.ErrNotExist) @@ -67,12 +75,11 @@ func TestCompressAsTarGzArchive(t *testing.T) { { name: "Archive is created", fileNames: []string{"file1", "file2"}, - fsContents: map[string][]byte{ - "file1": []byte("contentsfile1"), - "file2": []byte("contentsfile2"), + fsContents: map[string]*InMemoryFile{ + "file1": NewInMemoryFile("file1", teleport.FileMaskOwnerOnly, time.Now(), []byte("contentsfile1")), + "file2": NewInMemoryFile("file2", teleport.FileMaskOwnerOnly, time.Now(), []byte("contentsfile2")), }, - fileMode: teleport.FileMaskOwnerOnly, - assert: require.NoError, + assert: require.NoError, }, } @@ -80,7 +87,7 @@ func TestCompressAsTarGzArchive(t *testing.T) { fileReader := mockFileReader{ files: tt.fsContents, } - bs, err := CompressTarGzArchive(tt.fileNames, fileReader, tt.fileMode) + bs, err := CompressTarGzArchive(tt.fileNames, fileReader) tt.assert(t, err) if err != nil { continue @@ -99,7 +106,6 @@ func TestCompressAsTarGzArchive(t *testing.T) { } require.NoError(t, err) require.Equal(t, byte(tar.TypeReg), header.Typeflag) - require.Equal(t, tt.fileMode, fs.FileMode(header.Mode)) tarContentFileNames = append(tarContentFileNames, header.Name) require.Contains(t, tt.fsContents, header.Name) @@ -108,7 +114,8 @@ func TestCompressAsTarGzArchive(t *testing.T) { require.NoError(t, err) t.Log(string(gotBytes)) - require.Equal(t, tt.fsContents[header.Name], gotBytes) + require.Equal(t, tt.fsContents[header.Name].content, gotBytes) + require.Equal(t, tt.fsContents[header.Name].mode, fs.FileMode(header.Mode)) } require.ElementsMatch(t, tarContentFileNames, tt.fileNames) } diff --git a/lib/utils/inmemory_fs.go b/lib/utils/inmemory_fs.go new file mode 100644 index 0000000000000..fdaf45b9c887d --- /dev/null +++ b/lib/utils/inmemory_fs.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 Gravitational, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package utils + +import ( + "io/fs" + "time" +) + +// InMemoryFile stores the required properties to emulate a File in memory +// It contains the File properties like name, size, mode +// It also contains the File contents +// It does not support folders +type InMemoryFile struct { + name string + mode fs.FileMode + modTime time.Time + content []byte +} + +func NewInMemoryFile(name string, mode fs.FileMode, modTime time.Time, content []byte) *InMemoryFile { + return &InMemoryFile{ + name: name, + mode: mode, + modTime: modTime, + content: content, + } +} + +// Name returns the file's name +func (fi *InMemoryFile) Name() string { + return fi.name +} + +// Size returns the file size (calculated when writing the file) +func (fi *InMemoryFile) Size() int64 { + return int64(len(fi.content)) +} + +// Mode returns the fs.FileMode +func (fi *InMemoryFile) Mode() fs.FileMode { + return fi.mode +} + +// ModTime returns the last modification time +func (fi *InMemoryFile) ModTime() time.Time { + return fi.modTime +} + +// IsDir checks whether the file is a directory +func (fi *InMemoryFile) IsDir() bool { + return false +} + +// Sys is platform independent +// InMemoryFile's implementation is no-op +func (fi *InMemoryFile) Sys() interface{} { + return nil +} + +// Content returns the file bytes +func (fi *InMemoryFile) Content() []byte { + return fi.content +} diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index b3c9d77f5e406..163478feb8c76 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -365,9 +365,8 @@ func NewHandler(cfg Config, opts ...HandlerOption) (*APIHandler, error) { h.GET("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) h.POST("/webapi/sites/:site/nodes/:server/:login/scp", h.WithClusterAuth(h.transferFile)) - // Sign required files to setup mTLS in other services (eg DBs) - // POST /webapi/sites/:site/sign - h.POST("/webapi/sites/:site/sign", h.WithProvisionTokenAuth(h.signCertKeyPair, types.RoleDatabase)) + // Sign required files to set up mTLS using the db format. + h.POST("/webapi/sites/:site/sign/db", h.WithProvisionTokenAuth(h.signDatabaseCertificate)) // token generation h.POST("/webapi/token", h.WithAuth(h.createTokenHandle)) @@ -2701,12 +2700,12 @@ func (h *Handler) WithClusterAuth(fn ClusterHandler) httprouter.Handle { } // ProvisionTokenHandler is a authenticated handler that is called for some existing Token -type ProvisionTokenAuthedHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) +type ProvisionTokenHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, roles types.SystemRoles) (interface{}, error) // WithProvisionTokenAuth ensures that request is authenticated with a provision token. // Provision tokens, when used like this are invalidated as soon as used. // Doesn't matter if the underlying response was a success or an error. -func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler, requiredRole types.SystemRole) httprouter.Handle { +func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenHandler) httprouter.Handle { return httplib.MakeHandler(func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (interface{}, error) { ctx := r.Context() logger := h.log.WithField("request", fmt.Sprintf("%v %v", r.Method, r.URL.Path)) @@ -2717,35 +2716,42 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenAuthedHandler, require return nil, trace.AccessDenied("need auth") } - if err := h.consumeTokenForAPICall(ctx, creds.Password, requiredRole); err != nil { + tokenRoles, err := h.consumeTokenForAPICall(ctx, creds.Password) + if err != nil { h.log.WithError(err).Warn("Failed to authenticate.") return nil, trace.AccessDenied("need auth") } site, err := h.cfg.Proxy.GetSite(h.auth.clusterName) if err != nil { - h.log.WithError(err).WithField("cluster-name", h.auth.clusterName).Warn("Failed to query site.") + h.log.WithError(err).WithField("cluster-name", h.auth.clusterName).Warn("Failed to query cluster.") return nil, trace.Wrap(err) } - return fn(w, r, p, site) + return fn(w, r, p, site, tokenRoles) }) } -func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string, requiredRole types.SystemRole) error { +// consumeTokenForAPICall will fetch a token, check if the requireRole is present and then delete the token +// If any of those calls returns an error, this method also returns an error +// +// If multiple clients reach here at the same time, only one of them will be able to actually make the request. +// This is possible because the latest call - DeleteToken - returns an error if the resource doesn't exist +// This is currently true for all the backends as explained here +// https://github.com/gravitational/teleport/commit/24fcadc375d8359e80790b3ebeaa36bd8dd2822f +func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) (types.SystemRoles, error) { token, err := h.GetProxyClient().GetToken(ctx, tokenName) if err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } - if !token.GetRoles().Include(requiredRole) { - return trace.AccessDenied("invalid auth") - } + roles := token.GetRoles() if err := h.GetProxyClient().DeleteToken(ctx, token.GetName()); err != nil { - return trace.Wrap(err) + return nil, trace.Wrap(err) } - return nil + + return roles, nil } type redirectHandlerFunc func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (redirectURL string) diff --git a/lib/web/apiserver_test.go b/lib/web/apiserver_test.go index 8095b8e03b5f2..0846e2baccef6 100644 --- a/lib/web/apiserver_test.go +++ b/lib/web/apiserver_test.go @@ -2204,17 +2204,15 @@ func TestSignMTLS(t *testing.T) { err = json.Unmarshal(re.Bytes(), &responseToken) require.NoError(t, err) - // download mTLS files from /webapi/sites/:site/sign - endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") + // download mTLS files from /webapi/sites/:site/sign/db + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign", "db") bs, err := json.Marshal(struct { Hostname string `json:"hostname"` TTL string `json:"ttl"` - Format string `json:"format"` }{ Hostname: "mypg.example.com", TTL: "2h", - Format: "db", }) require.NoError(t, err) @@ -2296,8 +2294,8 @@ func TestSignMTLS_failsAccessDenied(t *testing.T) { err = json.Unmarshal(re.Bytes(), &responseToken) require.NoError(t, err) - // download mTLS files from /webapi/sites/:site/sign - endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign") + // download mTLS files from /webapi/sites/:site/sign/db + endpointSign := pack.clt.Endpoint("webapi", "sites", clusterName, "sign", "db") bs, err := json.Marshal(struct { Hostname string `json:"hostname"` diff --git a/lib/web/sign.go b/lib/web/sign.go index a3e543274ab33..85a3bbff432af 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -19,40 +19,42 @@ package web import ( "bytes" "fmt" - "io/fs" "net/http" "time" - "github.com/gravitational/teleport" "github.com/gravitational/trace" "github.com/julienschmidt/httprouter" - "golang.org/x/exp/slices" apidefaults "github.com/gravitational/teleport/api/defaults" + "github.com/gravitational/teleport/api/types" + "github.com/gravitational/teleport/lib/client/db" "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/httplib" "github.com/gravitational/teleport/lib/reversetunnel" - "github.com/gravitational/teleport/lib/srv" "github.com/gravitational/teleport/lib/utils" ) -/* signCertKeyPair returns the necessary files to set up mTLS for other services +/* +signDatabaseCertificate returns the necessary files to set up mTLS using the `db` format This is the equivalent of running the tctl command As an example, requesting: -POST /webapi/sites/mycluster/sign +POST /webapi/sites/mycluster/sign/db { "hostname": "pg.example.com", - "ttl": "2190h", - "format": "db" + "ttl": "2190h" } Should be equivalent to running: tctl auth sign --host=pg.example.com --ttl=2190h --format=db -This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the service. +This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the database. */ -func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite) (interface{}, error) { - req := &signCertKeyPairReq{} +func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, tokenRoles types.SystemRoles) (interface{}, error) { + if !tokenRoles.Include(types.RoleDatabase) { + return nil, trace.AccessDenied("required '%s' role was not provided by the token", types.RoleDatabase) + } + + req := &signDatabaseCertificateReq{} if err := httplib.ReadJSON(r, &req); err != nil { return nil, trace.Wrap(err) } @@ -63,29 +65,22 @@ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p http virtualFS := identityfile.NewInMemoryConfigWriter() - mTLSReq := srv.GenerateMTLSFilesRequest{ - ClusterAPI: h.auth.proxyClient, - Principals: []string{req.Hostname}, - OutputFormat: req.Format, - OutputCanOverwrite: true, - OutputLocation: "server", - IdentityFileWriter: virtualFS, - TTL: req.TTL, - HelperMessageWriter: nil, + mTLSReq := db.GenerateDatabaseCertificatesRequest{ + ClusterAPI: h.auth.proxyClient, + Principals: []string{req.Hostname}, + OutputFormat: identityfile.FormatDatabase, + OutputCanOverwrite: true, + OutputLocation: "server", + IdentityFileWriter: virtualFS, + TTL: req.TTL, } - filesWritten, err := srv.GenerateMTLSFiles(r.Context(), mTLSReq) + filesWritten, err := db.GenerateDatabaseCertificates(r.Context(), mTLSReq) if err != nil { return nil, trace.Wrap(err) } archiveName := fmt.Sprintf("teleport_mTLS_%s.tar.gz", req.Hostname) - - // https://www.postgresql.org/docs/current/libpq-ssl.html - // On Unix systems, the permissions on the private key file must disallow any access to world or group; - // achieve this by a command such as chmod 0600 ~/.postgresql/postgresql.key. - // Alternatively, the file can be owned by root and have group read access (that is, 0640 permissions). - fileMode := fs.FileMode(teleport.FileMaskOwnerOnly) // 0600 - archiveBytes, err := utils.CompressTarGzArchive(filesWritten, virtualFS, fileMode) + archiveBytes, err := utils.CompressTarGzArchive(filesWritten, virtualFS) if err != nil { return nil, trace.Wrap(err) } @@ -100,32 +95,21 @@ func (h *Handler) signCertKeyPair(w http.ResponseWriter, r *http.Request, p http return nil, nil } -type signCertKeyPairReq struct { - Hostname string `json:"hostname,omitempty"` - FormatRaw string `json:"format,omitempty"` - TTLRaw string `json:"ttl,omitempty"` - Format identityfile.Format - TTL time.Duration -} +type signDatabaseCertificateReq struct { + Hostname string `json:"hostname,omitempty"` + TTLRaw string `json:"ttl,omitempty"` -// TODO(marco): only format db is supported -var supportedFormats = []identityfile.Format{ - identityfile.FormatDatabase, + TTL time.Duration `json:"-"` } -func (s *signCertKeyPairReq) CheckAndSetDefaults() error { +// CheckAndSetDefaults will validate and convert the received values +// Hostname must not be empty +// TTL must either be a valid time.Duration or empty (inherits apidefaults.CertDuration) +func (s *signDatabaseCertificateReq) CheckAndSetDefaults() error { if s.Hostname == "" { return trace.BadParameter("missing hostname") } - if s.FormatRaw == "" { - return trace.BadParameter("missing format") - } - s.Format = identityfile.Format(s.FormatRaw) - if !slices.Contains(supportedFormats, s.Format) { - return trace.BadParameter("provided format '%s' is not valid, supported formats are: %q", s.Format, supportedFormats) - } - if s.TTLRaw == "" { s.TTLRaw = apidefaults.CertDuration.String() } diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index 9f34fb7ec9474..7a01a340508fa 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -18,11 +18,13 @@ import ( "context" "encoding/pem" "fmt" + "io" "net" "net/url" "os" "strconv" "strings" + "text/template" "time" "github.com/gravitational/teleport/api/client/proto" @@ -31,12 +33,12 @@ import ( "github.com/gravitational/teleport/lib/auth" "github.com/gravitational/teleport/lib/auth/native" "github.com/gravitational/teleport/lib/client" + "github.com/gravitational/teleport/lib/client/db" "github.com/gravitational/teleport/lib/client/identityfile" "github.com/gravitational/teleport/lib/defaults" kubeutils "github.com/gravitational/teleport/lib/kube/utils" "github.com/gravitational/teleport/lib/service" "github.com/gravitational/teleport/lib/services" - "github.com/gravitational/teleport/lib/srv" "github.com/gravitational/teleport/lib/sshutils" "github.com/gravitational/teleport/lib/utils" @@ -371,7 +373,7 @@ func (a *AuthCommand) generateSnowflakeKey(ctx context.Context, clusterAPI auth. } return trace.Wrap( - srv.WriteHelperMessageDBmTLS(os.Stdout, filesWritten, "", a.outputFormat), + writeHelperMessageDBmTLS(os.Stdout, filesWritten, "", a.outputFormat), ) } @@ -466,20 +468,115 @@ func (a *AuthCommand) generateDatabaseKeys(ctx context.Context, clusterAPI auth. func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI auth.ClientI, key *client.Key) error { principals := strings.Split(a.genHost, ",") - genMTLSReq := srv.GenerateMTLSFilesRequest{ - ClusterAPI: clusterAPI, - Principals: principals, - OutputFormat: a.outputFormat, - OutputCanOverwrite: a.signOverwrite, - OutputLocation: a.output, - TTL: a.genTTL, - Key: key, - HelperMessageWriter: os.Stdout, - } - _, err := srv.GenerateMTLSFiles(ctx, genMTLSReq) - return trace.Wrap(err) + genMTLSReq := db.GenerateDatabaseCertificatesRequest{ + ClusterAPI: clusterAPI, + Principals: principals, + OutputFormat: a.outputFormat, + OutputCanOverwrite: a.signOverwrite, + OutputLocation: a.output, + TTL: a.genTTL, + Key: key, + } + filesWritten, err := db.GenerateDatabaseCertificates(ctx, genMTLSReq) + if err != nil { + return trace.Wrap(err) + } + + return trace.Wrap(writeHelperMessageDBmTLS(os.Stdout, filesWritten, a.output, a.outputFormat)) +} + +var mapIdentityFileFormatHelperTemplate = map[identityfile.Format]*template.Template{ + identityfile.FormatDatabase: dbAuthSignTpl, + identityfile.FormatMongo: mongoAuthSignTpl, + identityfile.FormatCockroach: cockroachAuthSignTpl, + identityfile.FormatRedis: redisAuthSignTpl, + identityfile.FormatSnowflake: snowflakeAuthSignTpl, +} + +func writeHelperMessageDBmTLS(writer io.Writer, filesWritten []string, output string, outputFormat identityfile.Format) error { + if writer == nil { + return nil + } + + tpl, found := mapIdentityFileFormatHelperTemplate[outputFormat] + if !found { + // This format doesn't have a recommended configuration. + // Consider adding one to ease the installation for the end-user + return nil + } + + tplVars := map[string]interface{}{ + "files": strings.Join(filesWritten, ", "), + "output": output, + } + + if outputFormat == identityfile.FormatSnowflake { + delete(tplVars, "output") + } + + return trace.Wrap(tpl.Execute(writer, tplVars)) } +var ( + // dbAuthSignTpl is printed when user generates credentials for a self-hosted database. + dbAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your PostgreSQL server, add the following to its +postgresql.conf configuration file: + +ssl = on +ssl_cert_file = '/path/to/{{.output}}.crt' +ssl_key_file = '/path/to/{{.output}}.key' +ssl_ca_file = '/path/to/{{.output}}.cas' + +To enable mutual TLS on your MySQL server, add the following to its +mysql.cnf configuration file: + +[mysqld] +require_secure_transport=ON +ssl-cert=/path/to/{{.output}}.crt +ssl-key=/path/to/{{.output}}.key +ssl-ca=/path/to/{{.output}}.cas +`)) + // mongoAuthSignTpl is printed when user generates credentials for a MongoDB database. + mongoAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your MongoDB server, add the following to its +mongod.yaml configuration file: + +net: + tls: + mode: requireTLS + certificateKeyFile: /path/to/{{.output}}.crt + CAFile: /path/to/{{.output}}.cas +`)) + cockroachAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your CockroachDB server, point it to the certs +directory using --certs-dir flag: + +cockroach start \ + --certs-dir={{.output}} \ + # other flags... +`)) + + redisAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +To enable mutual TLS on your Redis server, add the following to your redis.conf: + +tls-ca-cert-file /path/to/{{.output}}.cas +tls-cert-file /path/to/{{.output}}.crt +tls-key-file /path/to/{{.output}}.key +tls-protocols "TLSv1.2 TLSv1.3" +`)) + + snowflakeAuthSignTpl = template.Must(template.New("").Parse(`Database credentials have been written to {{.files}}. + +Please add the generated key to the Snowflake users as described here: +https://docs.snowflake.com/en/user-guide/key-pair-auth.html#step-4-assign-the-public-key-to-a-snowflake-user +`)) +) + func (a *AuthCommand) generateUserKeys(ctx context.Context, clusterAPI auth.ClientI) error { // Validate --proxy flag. if err := a.checkProxyAddr(clusterAPI); err != nil { From 25ecbc9af3053b7a0f00e3af12abdd84d20dbc37 Mon Sep 17 00:00:00 2001 From: Marco Dinis Date: Mon, 1 Aug 2022 08:21:57 +0100 Subject: [PATCH 10/10] check for empty list of principals --- lib/client/db/database_certificates.go | 5 ++++- lib/services/provisioning.go | 2 +- lib/web/apiserver.go | 12 +++++------- lib/web/sign.go | 8 ++++---- tool/tctl/common/auth_command.go | 8 ++------ 5 files changed, 16 insertions(+), 19 deletions(-) diff --git a/lib/client/db/database_certificates.go b/lib/client/db/database_certificates.go index a9319e9b6c6d3..b477db874f60c 100644 --- a/lib/client/db/database_certificates.go +++ b/lib/client/db/database_certificates.go @@ -44,7 +44,10 @@ type GenerateDatabaseCertificatesRequest struct { // GenerateDatabaseCertificates to be used by databases to set up mTLS authentication func GenerateDatabaseCertificates(ctx context.Context, req GenerateDatabaseCertificatesRequest) ([]string, error) { - if req.OutputFormat != identityfile.FormatSnowflake && len(req.Principals) == 1 && req.Principals[0] == "" { + + if len(req.Principals) == 0 || + (len(req.Principals) == 1 && req.Principals[0] == "" && req.OutputFormat != identityfile.FormatSnowflake) { + return nil, trace.BadParameter("at least one hostname must be specified") } diff --git a/lib/services/provisioning.go b/lib/services/provisioning.go index 3c3ba5eee28e7..f6bb1938bac4d 100644 --- a/lib/services/provisioning.go +++ b/lib/services/provisioning.go @@ -38,7 +38,7 @@ type Provisioner interface { GetToken(ctx context.Context, token string) (types.ProvisionToken, error) // DeleteToken deletes provisioning token - // Imlementations must guarantee that this returns trace.NotFound error is the token doesn't exist + // Imlementations must guarantee that this returns trace.NotFound error if the token doesn't exist DeleteToken(ctx context.Context, token string) error // DeleteAllTokens deletes all provisioning tokens diff --git a/lib/web/apiserver.go b/lib/web/apiserver.go index 163478feb8c76..1e79c2ce97261 100644 --- a/lib/web/apiserver.go +++ b/lib/web/apiserver.go @@ -2700,7 +2700,7 @@ func (h *Handler) WithClusterAuth(fn ClusterHandler) httprouter.Handle { } // ProvisionTokenHandler is a authenticated handler that is called for some existing Token -type ProvisionTokenHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, roles types.SystemRoles) (interface{}, error) +type ProvisionTokenHandler func(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, token types.ProvisionToken) (interface{}, error) // WithProvisionTokenAuth ensures that request is authenticated with a provision token. // Provision tokens, when used like this are invalidated as soon as used. @@ -2716,7 +2716,7 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenHandler) httprouter.Ha return nil, trace.AccessDenied("need auth") } - tokenRoles, err := h.consumeTokenForAPICall(ctx, creds.Password) + token, err := h.consumeTokenForAPICall(ctx, creds.Password) if err != nil { h.log.WithError(err).Warn("Failed to authenticate.") return nil, trace.AccessDenied("need auth") @@ -2728,7 +2728,7 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenHandler) httprouter.Ha return nil, trace.Wrap(err) } - return fn(w, r, p, site, tokenRoles) + return fn(w, r, p, site, token) }) } @@ -2739,19 +2739,17 @@ func (h *Handler) WithProvisionTokenAuth(fn ProvisionTokenHandler) httprouter.Ha // This is possible because the latest call - DeleteToken - returns an error if the resource doesn't exist // This is currently true for all the backends as explained here // https://github.com/gravitational/teleport/commit/24fcadc375d8359e80790b3ebeaa36bd8dd2822f -func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) (types.SystemRoles, error) { +func (h *Handler) consumeTokenForAPICall(ctx context.Context, tokenName string) (types.ProvisionToken, error) { token, err := h.GetProxyClient().GetToken(ctx, tokenName) if err != nil { return nil, trace.Wrap(err) } - roles := token.GetRoles() - if err := h.GetProxyClient().DeleteToken(ctx, token.GetName()); err != nil { return nil, trace.Wrap(err) } - return roles, nil + return token, nil } type redirectHandlerFunc func(w http.ResponseWriter, r *http.Request, p httprouter.Params) (redirectURL string) diff --git a/lib/web/sign.go b/lib/web/sign.go index 85a3bbff432af..3b1ec393f3e1d 100644 --- a/lib/web/sign.go +++ b/lib/web/sign.go @@ -49,8 +49,8 @@ Should be equivalent to running: This endpoint returns a tar.gz compressed archive containing the required files to setup mTLS for the database. */ -func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, tokenRoles types.SystemRoles) (interface{}, error) { - if !tokenRoles.Include(types.RoleDatabase) { +func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request, p httprouter.Params, site reversetunnel.RemoteSite, token types.ProvisionToken) (interface{}, error) { + if !token.GetRoles().Include(types.RoleDatabase) { return nil, trace.AccessDenied("required '%s' role was not provided by the token", types.RoleDatabase) } @@ -65,7 +65,7 @@ func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request virtualFS := identityfile.NewInMemoryConfigWriter() - mTLSReq := db.GenerateDatabaseCertificatesRequest{ + dbCertReq := db.GenerateDatabaseCertificatesRequest{ ClusterAPI: h.auth.proxyClient, Principals: []string{req.Hostname}, OutputFormat: identityfile.FormatDatabase, @@ -74,7 +74,7 @@ func (h *Handler) signDatabaseCertificate(w http.ResponseWriter, r *http.Request IdentityFileWriter: virtualFS, TTL: req.TTL, } - filesWritten, err := db.GenerateDatabaseCertificates(r.Context(), mTLSReq) + filesWritten, err := db.GenerateDatabaseCertificates(r.Context(), dbCertReq) if err != nil { return nil, trace.Wrap(err) } diff --git a/tool/tctl/common/auth_command.go b/tool/tctl/common/auth_command.go index 7a01a340508fa..58d925d078392 100644 --- a/tool/tctl/common/auth_command.go +++ b/tool/tctl/common/auth_command.go @@ -468,7 +468,7 @@ func (a *AuthCommand) generateDatabaseKeys(ctx context.Context, clusterAPI auth. func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI auth.ClientI, key *client.Key) error { principals := strings.Split(a.genHost, ",") - genMTLSReq := db.GenerateDatabaseCertificatesRequest{ + dbCertReq := db.GenerateDatabaseCertificatesRequest{ ClusterAPI: clusterAPI, Principals: principals, OutputFormat: a.outputFormat, @@ -477,7 +477,7 @@ func (a *AuthCommand) generateDatabaseKeysForKey(ctx context.Context, clusterAPI TTL: a.genTTL, Key: key, } - filesWritten, err := db.GenerateDatabaseCertificates(ctx, genMTLSReq) + filesWritten, err := db.GenerateDatabaseCertificates(ctx, dbCertReq) if err != nil { return trace.Wrap(err) } @@ -510,10 +510,6 @@ func writeHelperMessageDBmTLS(writer io.Writer, filesWritten []string, output st "output": output, } - if outputFormat == identityfile.FormatSnowflake { - delete(tplVars, "output") - } - return trace.Wrap(tpl.Execute(writer, tplVars)) }