Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce the lifetime of exported kubecfg credentials #9593

Merged
merged 1 commit into from
Aug 15, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion cmd/kops/create_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ import (
"k8s.io/kops/pkg/assets"
"k8s.io/kops/pkg/commands"
"k8s.io/kops/pkg/featureflag"
"k8s.io/kops/pkg/kubeconfig"
"k8s.io/kops/upup/pkg/fi"
"k8s.io/kops/upup/pkg/fi/cloudup"
"k8s.io/kops/upup/pkg/fi/utils"
Expand Down Expand Up @@ -613,7 +614,7 @@ func RunCreateCluster(ctx context.Context, f *util.Factory, out io.Writer, c *Cr
updateClusterOptions.Yes = c.Yes
updateClusterOptions.Target = c.Target
updateClusterOptions.OutDir = c.OutDir
updateClusterOptions.admin = true
updateClusterOptions.admin = kubeconfig.DefaultKubecfgAdminLifetime
updateClusterOptions.CreateKubecfg = true

// SSHPublicKey has already been mapped
Expand Down
8 changes: 5 additions & 3 deletions cmd/kops/export_kubecfg.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ import (
"context"
"fmt"
"io"
"time"

"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
Expand Down Expand Up @@ -53,7 +54,7 @@ var (
type ExportKubecfgOptions struct {
KubeConfigPath string
all bool
admin bool
admin time.Duration
user string
}

Expand All @@ -76,7 +77,8 @@ func NewCmdExportKubecfg(f *util.Factory, out io.Writer) *cobra.Command {

cmd.Flags().StringVar(&options.KubeConfigPath, "kubeconfig", options.KubeConfigPath, "the location of the kubeconfig file to create.")
cmd.Flags().BoolVar(&options.all, "all", options.all, "export all clusters from the kops state store")
cmd.Flags().BoolVar(&options.admin, "admin", options.admin, "export the cluster admin user and add it to the context")
cmd.Flags().DurationVar(&options.admin, "admin", options.admin, "export a cluster admin user credential with the given lifetime and add it to the cluster context")
cmd.Flags().Lookup("admin").NoOptDefVal = kubeconfig.DefaultKubecfgAdminLifetime.String()
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

OK, now that is cool.

cmd.Flags().StringVar(&options.user, "user", options.user, "add an existing user to the cluster context")
johngmyers marked this conversation as resolved.
Show resolved Hide resolved

return cmd
Expand All @@ -92,7 +94,7 @@ func RunExportKubecfg(ctx context.Context, f *util.Factory, out io.Writer, optio
return fmt.Errorf("cannot use both --all flag and positional arguments")
}
}
if options.admin && options.user != "" {
if options.admin != 0 && options.user != "" {
return fmt.Errorf("cannot use both --admin and --user")
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we allow setting the duration when --user is specified? (--user is not an option I use myself, so I don't really know)

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

--user doesn't generate credentials, so a duration isn't relevant.

}

Expand Down
34 changes: 13 additions & 21 deletions cmd/kops/update_cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ import (
"io/ioutil"
"path/filepath"
"strings"
"time"

"github.com/spf13/cobra"
"github.com/spf13/viper"
Expand Down Expand Up @@ -67,7 +68,7 @@ type UpdateClusterOptions struct {
AllowKopsDowngrade bool

CreateKubecfg bool
admin bool
admin time.Duration
user string

Phase string
Expand Down Expand Up @@ -116,7 +117,8 @@ func NewCmdUpdateCluster(f *util.Factory, out io.Writer) *cobra.Command {
cmd.Flags().StringVar(&options.SSHPublicKey, "ssh-public-key", options.SSHPublicKey, "SSH public key to use (deprecated: use kops create secret instead)")
cmd.Flags().StringVar(&options.OutDir, "out", options.OutDir, "Path to write any local output")
cmd.Flags().BoolVar(&options.CreateKubecfg, "create-kube-config", options.CreateKubecfg, "Will control automatically creating the kube config file on your local filesystem")
cmd.Flags().BoolVar(&options.admin, "admin", options.admin, "Also export the admin user. Implies --create-kube-config")
cmd.Flags().DurationVar(&options.admin, "admin", options.admin, "Also export a cluster admin user credential with the specified lifetime and add it to the cluster context")
cmd.Flags().Lookup("admin").NoOptDefVal = kubeconfig.DefaultKubecfgAdminLifetime.String()
cmd.Flags().StringVar(&options.user, "user", options.user, "Existing user to add to the cluster context. Implies --create-kube-config")
cmd.Flags().BoolVar(&options.AllowKopsDowngrade, "allow-kops-downgrade", options.AllowKopsDowngrade, "Allow an older version of kops to update the cluster than last used")
cmd.Flags().StringVar(&options.Phase, "phase", options.Phase, "Subset of tasks to run: "+strings.Join(cloudup.Phases.List(), ", "))
Expand All @@ -141,15 +143,15 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, clusterName string,
isDryrun := false
targetName := c.Target

if c.admin && c.user != "" {
if c.admin != 0 && c.user != "" {
return nil, fmt.Errorf("cannot use both --admin and --user")
}

if c.CreateKubecfg && !c.admin && c.user == "" {
if c.CreateKubecfg && c.admin == 0 && c.user == "" {
return nil, fmt.Errorf("--create-kube-config requires that either --admin or --user is set")
}

if c.admin && !c.CreateKubecfg {
if c.admin != 0 && !c.CreateKubecfg {
klog.Info("--admin implies --create-kube-config")
c.CreateKubecfg = true
}
Expand Down Expand Up @@ -296,25 +298,15 @@ func RunUpdateCluster(ctx context.Context, f *util.Factory, clusterName string,
}
firstRun = !hasKubecfg

kubecfgCert, err := keyStore.FindCert("kubecfg")
klog.Infof("Exporting kubecfg for cluster")
conf, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore, &commands.CloudDiscoveryStatusStore{}, clientcmd.NewDefaultPathOptions(), c.admin, c.user)
if err != nil {
// This is only a convenience; don't error because of it
klog.Warningf("Ignoring error trying to fetch kubecfg cert - won't export kubecfg: %v", err)
kubecfgCert = nil
return nil, err
}
if kubecfgCert != nil {
klog.Infof("Exporting kubecfg for cluster")
conf, err := kubeconfig.BuildKubecfg(cluster, keyStore, secretStore, &commands.CloudDiscoveryStatusStore{}, clientcmd.NewDefaultPathOptions(), c.admin, c.user)
if err != nil {
return nil, err
}

err = conf.WriteKubecfg()
if err != nil {
return nil, err
}
} else {
klog.Infof("kubecfg cert not found; won't export kubecfg")
err = conf.WriteKubecfg()
if err != nil {
return nil, err
}
}

Expand Down
10 changes: 5 additions & 5 deletions docs/cli/kops_export_kubecfg.md
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@ kops export kubecfg CLUSTERNAME [flags]
### Options

```
--admin export the cluster admin user and add it to the context
--all export all clusters from the kops state store
-h, --help help for kubecfg
--kubeconfig string the location of the kubeconfig file to create.
--user string add an existing user to the cluster context
--admin duration[=18h0m0s] export a cluster admin user credential with the given lifetime and add it to the cluster context
--all export all clusters from the kops state store
-h, --help help for kubecfg
--kubeconfig string the location of the kubeconfig file to create.
--user string add an existing user to the cluster context
```

### Options inherited from parent commands
Expand Down
2 changes: 1 addition & 1 deletion docs/cli/kops_update_cluster.md
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ kops update cluster [flags]
### Options

```
--admin Also export the admin user. Implies --create-kube-config
--admin duration[=18h0m0s] Also export a cluster admin user credential with the specified lifetime and add it to the cluster context
--allow-kops-downgrade Allow an older version of kops to update the cluster than last used
--create-kube-config Will control automatically creating the kube config file on your local filesystem
-h, --help help for cluster
Expand Down
5 changes: 4 additions & 1 deletion docs/releases/1.19-NOTES.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@ Kops will no longer automatically export the kubernetes config on `kops update c

Similarly, `kops export kubecfg` will also require passing either the `--admin` or `--user` flag if the context does not already exist.

`kops create cluster --yes` exports the admin user along with rest of the cluster config, as is existing behaviour.
By default, the credentials of any exported admin user now have a lifetime of 18 hours. The lifetime of the exported
credentials may be specified as a value of the `--admin` flag. To get the previous behavior, specify `--admin=87600h` to either `kops update cluster` or `kops export kubecfg`.

`kops create cluster --yes` exports the admin user along with rest of the cluster config, as was the previous behaviour (except for the 18-hour validity).

## Other significant changes

Expand Down
2 changes: 2 additions & 0 deletions pkg/kubeconfig/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,8 @@ go_library(
"//pkg/apis/kops:go_default_library",
"//pkg/apis/kops/util:go_default_library",
"//pkg/dns:go_default_library",
"//pkg/pki:go_default_library",
"//pkg/rbac:go_default_library",
"//upup/pkg/fi:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
Expand Down
51 changes: 28 additions & 23 deletions pkg/kubeconfig/create_kubecfg.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,18 +17,24 @@ limitations under the License.
package kubeconfig

import (
"crypto/x509/pkix"
"fmt"
"sort"
"time"

"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/apis/kops/util"
"k8s.io/kops/pkg/dns"
"k8s.io/kops/pkg/pki"
"k8s.io/kops/pkg/rbac"
"k8s.io/kops/upup/pkg/fi"
)

func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.SecretStore, status kops.StatusStore, configAccess clientcmd.ConfigAccess, admin bool, user string) (*KubeconfigBuilder, error) {
const DefaultKubecfgAdminLifetime = 18 * time.Hour
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you explain the 18 hours default?
It is a pretty big change from previous versions.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ten years is an unreasonably long time to have an unrevokable credential.

18 hours lets an admin go a working day without having to obtain a new credential, yet makes it likely that their next day starts off with an expired credential. It is more convenient to have to reacquire a credential at the start of a task than mid-way through it.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Seems like a good explanation for the purpose of strong security.

IMHO, this is a step in the right direction, but may be a bit too extreme. I don't think there was a strong request to reduce the lifetime of the credentials so much. It may be a bit excessive for people with multiple clusters to do this every day, or for every test cluster. I would suggest to change the default to something like 7-30 days that would at least not require new exports for short lived clusters.

Also, an env var may be desirable to change the default to something more suitable for each operator.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

7-30 days is too long for a default lifetime. Fixed-lifetime credentials that aren't themselves protected by other credentials typically have a lifetime no longer than a day. For example, temporary IAM credentials have a default lifetime of 12 hours.

Do we have a precedent/pattern for env vars providing defaults for flags? The env vars I can think of don't have corresponding flags. Do we want to adopt something like viper?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Specifically on the viper point, we do have limited precedent for --state KOPS_STATE_STORE and we even have a config file. However, I couldn't figure out how to get viper to just do this automatically - the state store configuration is a non-trivial amount of repetition. I'm not sure if we're "holding it wrong" but it wasn't very clear.

Multiple clusters (as hakman points out) is indeed going to be more annoying. I'm wondering if we should think about a helper process for login (that's essentially how GKE gets around this problem), or even nudging users to using a different authenticator, reserving the admin kubeconfig for "break glass".

I'm wondering if we could use a helper process to generate this on the fly, effectively chaining off your AWS credentials, so we'd inherit the same validity period.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A kubectl credential plugin is an interesting idea.

For production clusters it would be better to nudge users to use a different authenticator.


func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.SecretStore, status kops.StatusStore, configAccess clientcmd.ConfigAccess, admin time.Duration, user string) (*KubeconfigBuilder, error) {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Nit: I might consider splitting admin into buildAdmin and duration; keep this function logical and not too tightly bound to the flags. Although admin=false/0 is weird... it looks like the kubeconfig won't work?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A duration without a buildAdmin wouldn't make sense. Built credentails with duration 0 would not be useful. buildAdmin without duration would be pushing the defaulting logic down, making it less visible to the command line.

clusterName := cluster.ObjectMeta.Name

master := cluster.Spec.MasterPublicName
Expand Down Expand Up @@ -104,30 +110,29 @@ func BuildKubecfg(cluster *kops.Cluster, keyStore fi.Keystore, secretStore fi.Se
}
}

if admin {
{
cert, key, _, err := keyStore.FindKeypair("kubecfg")
if err != nil {
return nil, fmt.Errorf("error fetching kubecfg keypair: %v", err)
}
if cert != nil {
b.ClientCert, err = cert.AsBytes()
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("cannot find kubecfg certificate")
}
if key != nil {
b.ClientKey, err = key.AsBytes()
if err != nil {
return nil, err
}
} else {
return nil, fmt.Errorf("cannot find kubecfg key")
}
if admin != 0 {
req := pki.IssueCertRequest{
Signer: fi.CertificateIDCA,
Type: "client",
Subject: pkix.Name{
CommonName: "kubecfg",
Copy link
Member Author

@johngmyers johngmyers Jul 18, 2020

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps we should use user.Current().Username instead?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yeah, that's not a bad change to make at the same time. Or maybe "admin-kubecfg-${USER}", just in case of usage inside a container where I expect most of these will end up as "root" or "jenkins" which might be confusing.

Organization: []string{rbac.SystemPrivilegedGroup},
},
Validity: admin,
}
cert, privateKey, _, err := pki.IssueCert(&req, keyStore)
if err != nil {
return nil, err
}

b.ClientCert, err = cert.AsBytes()
if err != nil {
return nil, err
}
b.ClientKey, err = privateKey.AsBytes()
if err != nil {
return nil, err
}
}

b.Server = server
Expand Down
67 changes: 38 additions & 29 deletions pkg/kubeconfig/create_kubecfg_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@ package kubeconfig
import (
"reflect"
"testing"
"time"

"k8s.io/client-go/tools/clientcmd"
"k8s.io/kops/pkg/apis/kops"
Expand Down Expand Up @@ -113,13 +114,19 @@ func fakePrivateKey() *pki.PrivateKey {
}

func TestBuildKubecfg(t *testing.T) {
originalPKIDefaultPrivateKeySize := pki.DefaultPrivateKeySize
pki.DefaultPrivateKeySize = 512
defer func() {
pki.DefaultPrivateKeySize = originalPKIDefaultPrivateKeySize
}()

type args struct {
cluster *kops.Cluster
keyStore fakeKeyStore
secretStore fi.SecretStore
status fakeStatusStore
configAccess clientcmd.ConfigAccess
admin bool
admin time.Duration
user string
}

Expand Down Expand Up @@ -148,16 +155,14 @@ func TestBuildKubecfg(t *testing.T) {
nil,
fakeStatusStore{},
nil,
true,
DefaultKubecfgAdminLifetime,
"",
},
&KubeconfigBuilder{
Context: "testcluster",
Server: "https://testcluster.test.com",
CACert: []byte(certData),
ClientCert: []byte(certData),
ClientKey: []byte(privatekeyData),
User: "testcluster",
Context: "testcluster",
Server: "https://testcluster.test.com",
CACert: []byte(certData),
User: "testcluster",
},
false,
},
Expand All @@ -176,16 +181,14 @@ func TestBuildKubecfg(t *testing.T) {
nil,
fakeStatusStore{},
nil,
false,
0,
"myuser",
},
&KubeconfigBuilder{
Context: "testcluster",
Server: "https://testcluster.test.com",
CACert: []byte(certData),
ClientCert: nil,
ClientKey: nil,
User: "myuser",
Context: "testcluster",
Server: "https://testcluster.test.com",
CACert: []byte(certData),
User: "myuser",
},
false,
},
Expand All @@ -204,16 +207,14 @@ func TestBuildKubecfg(t *testing.T) {
nil,
fakeStatusStore{},
nil,
true,
0,
"",
},
&KubeconfigBuilder{
Context: "emptyMasterPublicNameCluster",
Server: "https://api.emptyMasterPublicNameCluster",
CACert: []byte(certData),
ClientCert: []byte(certData),
ClientKey: []byte(privatekeyData),
User: "emptyMasterPublicNameCluster",
Context: "emptyMasterPublicNameCluster",
Server: "https://api.emptyMasterPublicNameCluster",
CACert: []byte(certData),
User: "emptyMasterPublicNameCluster",
},
false,
},
Expand All @@ -240,16 +241,14 @@ func TestBuildKubecfg(t *testing.T) {
},
},
nil,
true,
0,
"",
},
&KubeconfigBuilder{
Context: "testgossipcluster.k8s.local",
Server: "https://elbHostName",
CACert: []byte(certData),
ClientCert: []byte(certData),
ClientKey: []byte(privatekeyData),
User: "testgossipcluster.k8s.local",
Context: "testgossipcluster.k8s.local",
Server: "https://elbHostName",
CACert: []byte(certData),
User: "testgossipcluster.k8s.local",
},
false,
},
Expand All @@ -261,6 +260,16 @@ func TestBuildKubecfg(t *testing.T) {
t.Errorf("BuildKubecfg() error = %v, wantErr %v", err, tt.wantErr)
return
}
if tt.args.admin != 0 {
if got.ClientCert == nil {
t.Errorf("Expected ClientCert, got nil")
}
if got.ClientKey == nil {
t.Errorf("Expected ClientKey, got nil")
}
tt.want.ClientCert = got.ClientCert
tt.want.ClientKey = got.ClientKey
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("BuildKubecfg() = %v, want %v", got, tt.want)
}
Expand Down
Loading