;
-}
-
export interface SearchSelectOption {
name: string;
id: string;
diff --git a/ui/types/vault/route.d.ts b/ui/types/vault/route.d.ts
new file mode 100644
index 000000000000..159718dafae5
--- /dev/null
+++ b/ui/types/vault/route.d.ts
@@ -0,0 +1,25 @@
+/**
+ * Copyright (c) HashiCorp, Inc.
+ * SPDX-License-Identifier: BUSL-1.1
+ */
+import Route from '@ember/routing/route';
+
+/*
+Get the resolved type of an item.
+https://docs.ember-cli-typescript.com/cookbook/working-with-route-models
+
+- If the item is a promise, the result will be the resolved value type
+- If the item is not a promise, the result will just be the type of the item
+*/
+export type Resolved = P extends Promise ? T : P;
+
+/*
+Get the resolved model value from a route.
+Example use:
+
+import type { ModelFrom } from 'vault/vault/router';
+export default class MyRoute extends Route {
+ redirect(model: ModelFrom) {}
+}
+*/
+export type ModelFrom = Resolved>;
From 2051758f04434fbcf99dc4180802e08e2a073748 Mon Sep 17 00:00:00 2001
From: miagilepner
Date: Fri, 5 Jan 2024 11:27:20 +0100
Subject: [PATCH 25/39] rename secret sync association to secret syncs in
activity log (#24671)
---
vault/activity/query.go | 16 ++++++++--------
vault/activity_log.go | 12 ++++++------
vault/activity_log_test.go | 26 +++++++++++++-------------
vault/activity_log_util_common.go | 10 +++++-----
4 files changed, 32 insertions(+), 32 deletions(-)
diff --git a/vault/activity/query.go b/vault/activity/query.go
index b49ba592ce88..bfe9eb3a1ccc 100644
--- a/vault/activity/query.go
+++ b/vault/activity/query.go
@@ -19,17 +19,17 @@ import (
)
type NamespaceRecord struct {
- NamespaceID string `json:"namespace_id"`
- Entities uint64 `json:"entities"`
- NonEntityTokens uint64 `json:"non_entity_tokens"`
- SecretSyncAssociations uint64 `json:"secret_sync_associations"`
- Mounts []*MountRecord `json:"mounts"`
+ NamespaceID string `json:"namespace_id"`
+ Entities uint64 `json:"entities"`
+ NonEntityTokens uint64 `json:"non_entity_tokens"`
+ SecretSyncs uint64 `json:"secret_syncs"`
+ Mounts []*MountRecord `json:"mounts"`
}
type CountsRecord struct {
- EntityClients int `json:"entity_clients"`
- NonEntityClients int `json:"non_entity_clients"`
- SecretSyncAssociations int `json:"secret_sync_associations"`
+ EntityClients int `json:"entity_clients"`
+ NonEntityClients int `json:"non_entity_clients"`
+ SecretSyncs int `json:"secret_syncs"`
}
type NewClientRecord struct {
diff --git a/vault/activity_log.go b/vault/activity_log.go
index 610fbc9fc35d..2f6c8acb48ef 100644
--- a/vault/activity_log.go
+++ b/vault/activity_log.go
@@ -82,9 +82,9 @@ const (
// Known types of activity events; there's presently two internal event
// types (tokens/clients with and without entities), but we're beginning
// to support additional buckets for e.g., ACME requests.
- nonEntityTokenActivityType = "non-entity-token"
- entityActivityType = "entity"
- secretSyncAssociationActivityType = "secret-sync-association"
+ nonEntityTokenActivityType = "non-entity-token"
+ entityActivityType = "entity"
+ secretSyncActivityType = "secret-sync"
)
type segmentInfo struct {
@@ -2033,9 +2033,9 @@ func (p *processCounts) contains(client *activity.EntityRecord) bool {
func (p *processCounts) toCountsRecord() *activity.CountsRecord {
return &activity.CountsRecord{
- EntityClients: p.countByType(entityActivityType),
- NonEntityClients: p.countByType(nonEntityTokenActivityType),
- SecretSyncAssociations: p.countByType(secretSyncAssociationActivityType),
+ EntityClients: p.countByType(entityActivityType),
+ NonEntityClients: p.countByType(nonEntityTokenActivityType),
+ SecretSyncs: p.countByType(secretSyncActivityType),
}
}
diff --git a/vault/activity_log_test.go b/vault/activity_log_test.go
index 76a4edfa7561..3b3d44ee7eaa 100644
--- a/vault/activity_log_test.go
+++ b/vault/activity_log_test.go
@@ -4295,7 +4295,7 @@ func TestActivityLog_processNewClients_delete(t *testing.T) {
byNS := newClients.Namespaces
counts := newClients.Counts
- for _, typ := range []string{nonEntityTokenActivityType, secretSyncAssociationActivityType, entityActivityType, ACMEActivityType} {
+ for _, typ := range []string{nonEntityTokenActivityType, secretSyncActivityType, entityActivityType, ACMEActivityType} {
require.NotContains(t, counts.clientsByType(typ), clientID)
require.NotContains(t, byNS[namespace].Mounts[mount].Counts.clientsByType(typ), clientID)
require.NotContains(t, byNS[namespace].Counts.clientsByType(typ), clientID)
@@ -4308,7 +4308,7 @@ func TestActivityLog_processNewClients_delete(t *testing.T) {
run(t, nonEntityTokenActivityType)
})
t.Run("secret sync", func(t *testing.T) {
- run(t, secretSyncAssociationActivityType)
+ run(t, secretSyncActivityType)
})
t.Run("acme", func(t *testing.T) {
run(t, ACMEActivityType)
@@ -4342,7 +4342,7 @@ func TestActivityLog_processClientRecord(t *testing.T) {
require.Equal(t, byMonth[monthIndex].Namespaces, byNS)
require.Equal(t, byMonth[monthIndex].NewClients.Namespaces, byNS)
- for _, typ := range []string{nonEntityTokenActivityType, secretSyncAssociationActivityType, entityActivityType} {
+ for _, typ := range []string{nonEntityTokenActivityType, secretSyncActivityType, entityActivityType} {
if clientType == typ || (clientType == ACMEActivityType && typ == nonEntityTokenActivityType) {
require.Contains(t, byMonth[monthIndex].Counts.clientsByType(typ), clientID)
require.Contains(t, byMonth[monthIndex].NewClients.Counts.clientsByType(typ), clientID)
@@ -4364,7 +4364,7 @@ func TestActivityLog_processClientRecord(t *testing.T) {
run(t, entityActivityType)
})
t.Run("secret sync", func(t *testing.T) {
- run(t, secretSyncAssociationActivityType)
+ run(t, secretSyncActivityType)
})
t.Run("acme", func(t *testing.T) {
run(t, ACMEActivityType)
@@ -4651,7 +4651,7 @@ func TestActivityLog_writePrecomputedQuery(t *testing.T) {
ClientID: "id-3",
NamespaceID: "ns-3",
MountAccessor: "mnt-3",
- ClientType: secretSyncAssociationActivityType,
+ ClientType: secretSyncActivityType,
}
now := time.Now()
@@ -4690,13 +4690,13 @@ func TestActivityLog_writePrecomputedQuery(t *testing.T) {
require.Equal(t, ns1.Entities, uint64(1))
require.Equal(t, ns1.NonEntityTokens, uint64(0))
- require.Equal(t, ns1.SecretSyncAssociations, uint64(0))
+ require.Equal(t, ns1.SecretSyncs, uint64(0))
require.Equal(t, ns2.Entities, uint64(0))
require.Equal(t, ns2.NonEntityTokens, uint64(1))
- require.Equal(t, ns2.SecretSyncAssociations, uint64(0))
+ require.Equal(t, ns2.SecretSyncs, uint64(0))
require.Equal(t, ns3.Entities, uint64(0))
require.Equal(t, ns3.NonEntityTokens, uint64(0))
- require.Equal(t, ns3.SecretSyncAssociations, uint64(1))
+ require.Equal(t, ns3.SecretSyncs, uint64(1))
require.Len(t, ns1.Mounts, 1)
require.Len(t, ns2.Mounts, 1)
@@ -4711,29 +4711,29 @@ func TestActivityLog_writePrecomputedQuery(t *testing.T) {
// ns1 only has an entity client
require.Equal(t, 1, ns1.Mounts[0].Counts.EntityClients)
require.Equal(t, 0, ns1.Mounts[0].Counts.NonEntityClients)
- require.Equal(t, 0, ns1.Mounts[0].Counts.SecretSyncAssociations)
+ require.Equal(t, 0, ns1.Mounts[0].Counts.SecretSyncs)
// ns2 only has a non entity client
require.Equal(t, 0, ns2.Mounts[0].Counts.EntityClients)
require.Equal(t, 1, ns2.Mounts[0].Counts.NonEntityClients)
- require.Equal(t, 0, ns2.Mounts[0].Counts.SecretSyncAssociations)
+ require.Equal(t, 0, ns2.Mounts[0].Counts.SecretSyncs)
// ns3 only has a secret sync association
require.Equal(t, 0, ns3.Mounts[0].Counts.EntityClients)
require.Equal(t, 0, ns3.Mounts[0].Counts.NonEntityClients)
- require.Equal(t, 1, ns3.Mounts[0].Counts.SecretSyncAssociations)
+ require.Equal(t, 1, ns3.Mounts[0].Counts.SecretSyncs)
monthRecord := val.Months[0]
// there should only be one month present, since the clients were added with the same timestamp
require.Equal(t, monthRecord.Timestamp, timeutil.StartOfMonth(now).UTC().Unix())
require.Equal(t, 1, monthRecord.Counts.NonEntityClients)
require.Equal(t, 1, monthRecord.Counts.EntityClients)
- require.Equal(t, 1, monthRecord.Counts.SecretSyncAssociations)
+ require.Equal(t, 1, monthRecord.Counts.SecretSyncs)
require.Len(t, monthRecord.Namespaces, 3)
require.Len(t, monthRecord.NewClients.Namespaces, 3)
require.Equal(t, 1, monthRecord.NewClients.Counts.EntityClients)
require.Equal(t, 1, monthRecord.NewClients.Counts.NonEntityClients)
- require.Equal(t, 1, monthRecord.NewClients.Counts.SecretSyncAssociations)
+ require.Equal(t, 1, monthRecord.NewClients.Counts.SecretSyncs)
}
type mockTimeNowClock struct {
diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go
index bbce9b73c34d..f0f413e9aadd 100644
--- a/vault/activity_log_util_common.go
+++ b/vault/activity_log_util_common.go
@@ -182,11 +182,11 @@ func (a *ActivityLog) transformALNamespaceBreakdowns(nsData map[string]*processB
for nsID, ns := range nsData {
nsRecord := activity.NamespaceRecord{
- NamespaceID: nsID,
- Entities: uint64(ns.Counts.countByType(entityActivityType)),
- NonEntityTokens: uint64(ns.Counts.countByType(nonEntityTokenActivityType)),
- SecretSyncAssociations: uint64(ns.Counts.countByType(secretSyncAssociationActivityType)),
- Mounts: a.transformActivityLogMounts(ns.Mounts),
+ NamespaceID: nsID,
+ Entities: uint64(ns.Counts.countByType(entityActivityType)),
+ NonEntityTokens: uint64(ns.Counts.countByType(nonEntityTokenActivityType)),
+ SecretSyncs: uint64(ns.Counts.countByType(secretSyncActivityType)),
+ Mounts: a.transformActivityLogMounts(ns.Mounts),
}
byNamespace = append(byNamespace, &nsRecord)
}
From 5aea0dac1c68b5922964230bd69e08a63975d9cf Mon Sep 17 00:00:00 2001
From: miagilepner
Date: Fri, 5 Jan 2024 14:11:23 +0100
Subject: [PATCH 26/39] [VAULT-22641] Include secret sync associations with
hyperloglog estimations (#24586)
* include secret sync associations with hlls
* add test comment
* secret sync associations -> secret syncs
---
vault/activity_log_util_common.go | 78 ++++++------
vault/activity_log_util_common_test.go | 169 +++++++++++++------------
2 files changed, 127 insertions(+), 120 deletions(-)
diff --git a/vault/activity_log_util_common.go b/vault/activity_log_util_common.go
index f0f413e9aadd..eca59e3c0a55 100644
--- a/vault/activity_log_util_common.go
+++ b/vault/activity_log_util_common.go
@@ -110,59 +110,61 @@ func (a *ActivityLog) computeCurrentMonthForBillingPeriodInternal(ctx context.Co
}
hllMonthlyTimestamp = timeutil.StartOfNextMonth(hllMonthlyTimestamp)
}
-
- // Now we will add the clients for the current month to a copy of the billing period's hll to
- // see how the cardinality grows.
- billingPeriodHLLWithCurrentMonthEntityClients := billingPeriodHLL.Clone()
- billingPeriodHLLWithCurrentMonthNonEntityClients := billingPeriodHLL.Clone()
-
// There's at most one month of data here. We should validate this assumption explicitly
if len(byMonth) > 1 {
return nil, errors.New(fmt.Sprintf("multiple months of data found in partial month's client count breakdowns: %+v\n", byMonth))
}
- totalEntities := 0
- totalNonEntities := 0
- for _, month := range byMonth {
+ activityTypes := []string{entityActivityType, nonEntityTokenActivityType, secretSyncActivityType}
+
+ // Now we will add the clients for the current month to a copy of the billing period's hll to
+ // see how the cardinality grows.
+ hllByType := make(map[string]*hyperloglog.Sketch, len(activityTypes))
+ totalByType := make(map[string]int, len(activityTypes))
+ for _, typ := range activityTypes {
+ hllByType[typ] = billingPeriodHLL.Clone()
+ }
+ for _, month := range byMonth {
if month.NewClients == nil || month.NewClients.Counts == nil || month.Counts == nil {
return nil, errors.New("malformed current month used to calculate current month's activity")
}
- // Note that the following calculations assume that all clients seen are currently in
- // the NewClients section of byMonth. It is best to explicitly check this, just verify
- // our assumptions about the passed in byMonth argument.
- if month.Counts.countByType(entityActivityType) != month.NewClients.Counts.countByType(entityActivityType) ||
- month.Counts.countByType(nonEntityTokenActivityType) != month.NewClients.Counts.countByType(nonEntityTokenActivityType) {
- return nil, errors.New("current month clients cache assumes billing period")
- }
-
- // All the clients for the current month are in the newClients section, initially.
- // We need to deduplicate these clients across the billing period by adding them
- // into the billing period hyperloglogs.
- entities := month.NewClients.Counts.clientsByType(entityActivityType)
- nonEntities := month.NewClients.Counts.clientsByType(nonEntityTokenActivityType)
- if entities != nil {
- for entityID := range entities {
- billingPeriodHLLWithCurrentMonthEntityClients.Insert([]byte(entityID))
- totalEntities += 1
+ for _, typ := range activityTypes {
+ // Note that the following calculations assume that all clients seen are currently in
+ // the NewClients section of byMonth. It is best to explicitly check this, just verify
+ // our assumptions about the passed in byMonth argument.
+ if month.Counts.countByType(typ) != month.NewClients.Counts.countByType(typ) {
+ return nil, errors.New("current month clients cache assumes billing period")
}
- }
- if nonEntities != nil {
- for nonEntityID := range nonEntities {
- billingPeriodHLLWithCurrentMonthNonEntityClients.Insert([]byte(nonEntityID))
- totalNonEntities += 1
+ for clientID := range month.NewClients.Counts.clientsByType(typ) {
+ // All the clients for the current month are in the newClients section, initially.
+ // We need to deduplicate these clients across the billing period by adding them
+ // into the billing period hyperloglogs.
+ hllByType[typ].Insert([]byte(clientID))
+ totalByType[typ] += 1
}
}
}
- // The number of new entities for the current month is approximately the size of the hll with
- // the current month's entities minus the size of the initial billing period hll.
- currentMonthNewEntities := billingPeriodHLLWithCurrentMonthEntityClients.Estimate() - billingPeriodHLL.Estimate()
- currentMonthNewNonEntities := billingPeriodHLLWithCurrentMonthNonEntityClients.Estimate() - billingPeriodHLL.Estimate()
+ currentMonthNewByType := make(map[string]int, len(activityTypes))
+ for _, typ := range activityTypes {
+ // The number of new entities for the current month is approximately the size of the hll with
+ // the current month's entities minus the size of the initial billing period hll.
+ currentMonthNewByType[typ] = int(hllByType[typ].Estimate() - billingPeriodHLL.Estimate())
+ }
+
return &activity.MonthRecord{
- Timestamp: timeutil.StartOfMonth(endTime).UTC().Unix(),
- NewClients: &activity.NewClientRecord{Counts: &activity.CountsRecord{EntityClients: int(currentMonthNewEntities), NonEntityClients: int(currentMonthNewNonEntities)}},
- Counts: &activity.CountsRecord{EntityClients: totalEntities, NonEntityClients: totalNonEntities},
+ Timestamp: timeutil.StartOfMonth(endTime).UTC().Unix(),
+ NewClients: &activity.NewClientRecord{Counts: &activity.CountsRecord{
+ EntityClients: currentMonthNewByType[entityActivityType],
+ NonEntityClients: currentMonthNewByType[nonEntityTokenActivityType],
+ SecretSyncs: currentMonthNewByType[secretSyncActivityType],
+ }},
+ Counts: &activity.CountsRecord{
+ EntityClients: totalByType[entityActivityType],
+ NonEntityClients: totalByType[nonEntityTokenActivityType],
+ SecretSyncs: totalByType[secretSyncActivityType],
+ },
}, nil
}
diff --git a/vault/activity_log_util_common_test.go b/vault/activity_log_util_common_test.go
index af8d8b49951b..91b064aa4c35 100644
--- a/vault/activity_log_util_common_test.go
+++ b/vault/activity_log_util_common_test.go
@@ -18,28 +18,31 @@ import (
"google.golang.org/protobuf/proto"
)
-// Test_ActivityLog_ComputeCurrentMonthForBillingPeriodInternal creates 3 months of hyperloglogs and fills them with
-// overlapping clients. The test calls computeCurrentMonthForBillingPeriodInternal with the current month map having
-// some overlap with the previous months. The test then verifies that the results have the correct number of entity and
-// non-entity clients. The test also calls computeCurrentMonthForBillingPeriodInternal with an empty current month map,
+// Test_ActivityLog_ComputeCurrentMonthForBillingPeriodInternal creates 3 months
+// of hyperloglogs and fills them with overlapping clients. The test calls
+// computeCurrentMonthForBillingPeriodInternal with the current month map having
+// some overlap with the previous months. The test then verifies that the
+// results have the correct number of entity, non-entity, and secret sync
+// association clients. The test also calls
+// computeCurrentMonthForBillingPeriodInternal with an empty current month map,
// and verifies that the results are all 0.
func Test_ActivityLog_ComputeCurrentMonthForBillingPeriodInternal(t *testing.T) {
- // populate the first month with clients 1-10
+ // populate the first month with clients 1-20
monthOneHLL := hyperloglog.New()
- // populate the second month with clients 5-15
+ // populate the second month with clients 10-30
monthTwoHLL := hyperloglog.New()
- // populate the third month with clients 10-20
+ // populate the third month with clients 20-40
monthThreeHLL := hyperloglog.New()
- for i := 0; i < 20; i++ {
+ for i := 0; i < 40; i++ {
clientID := []byte(fmt.Sprintf("client_%d", i))
- if i < 10 {
+ if i < 20 {
monthOneHLL.Insert(clientID)
}
- if 5 <= i && i < 15 {
+ if 10 <= i && i < 20 {
monthTwoHLL.Insert(clientID)
}
- if 10 <= i && i < 20 {
+ if 20 <= i && i < 40 {
monthThreeHLL.Insert(clientID)
}
}
@@ -57,51 +60,72 @@ func Test_ActivityLog_ComputeCurrentMonthForBillingPeriodInternal(t *testing.T)
return nil, fmt.Errorf("bad start time")
}
+ // Below we register the entity, non-entity, and secret sync clients that
+ // are seen in the current month
+
// Let's add 2 entities exclusive to month 1 (clients 0,1),
- // 2 entities shared by month 1 and 2 (clients 5,6),
- // 2 entities shared by month 2 and 3 (clients 10,11), and
- // 2 entities exclusive to month 3 (15,16). Furthermore, we can add
- // 3 new entities (clients 20,21, and 22).
- entitiesStruct := make(map[string]struct{}, 0)
- entitiesStruct["client_0"] = struct{}{}
- entitiesStruct["client_1"] = struct{}{}
- entitiesStruct["client_5"] = struct{}{}
- entitiesStruct["client_6"] = struct{}{}
- entitiesStruct["client_10"] = struct{}{}
- entitiesStruct["client_11"] = struct{}{}
- entitiesStruct["client_15"] = struct{}{}
- entitiesStruct["client_16"] = struct{}{}
- entitiesStruct["client_20"] = struct{}{}
- entitiesStruct["client_21"] = struct{}{}
- entitiesStruct["client_22"] = struct{}{}
+ // 2 entities shared by month 1 and 2 (clients 10,11),
+ // 2 entities shared by month 2 and 3 (clients 20,21), and
+ // 2 entities exclusive to month 3 (30,31). Furthermore, we can add
+ // 3 new entities (clients 40,41,42).
+ entitiesStruct := map[string]struct{}{
+ "client_0": {},
+ "client_1": {},
+ "client_10": {},
+ "client_11": {},
+ "client_20": {},
+ "client_21": {},
+ "client_30": {},
+ "client_31": {},
+ "client_40": {},
+ "client_41": {},
+ "client_42": {},
+ }
// We will add 3 nonentity clients from month 1 (clients 2,3,4),
- // 3 shared by months 1 and 2 (7,8,9),
- // 3 shared by months 2 and 3 (12,13,14), and
- // 3 exclusive to month 3 (17,18,19). We will also
- // add 4 new nonentity clients.
- nonEntitiesStruct := make(map[string]struct{}, 0)
- nonEntitiesStruct["client_2"] = struct{}{}
- nonEntitiesStruct["client_3"] = struct{}{}
- nonEntitiesStruct["client_4"] = struct{}{}
- nonEntitiesStruct["client_7"] = struct{}{}
- nonEntitiesStruct["client_8"] = struct{}{}
- nonEntitiesStruct["client_9"] = struct{}{}
- nonEntitiesStruct["client_12"] = struct{}{}
- nonEntitiesStruct["client_13"] = struct{}{}
- nonEntitiesStruct["client_14"] = struct{}{}
- nonEntitiesStruct["client_17"] = struct{}{}
- nonEntitiesStruct["client_18"] = struct{}{}
- nonEntitiesStruct["client_19"] = struct{}{}
- nonEntitiesStruct["client_23"] = struct{}{}
- nonEntitiesStruct["client_24"] = struct{}{}
- nonEntitiesStruct["client_25"] = struct{}{}
- nonEntitiesStruct["client_26"] = struct{}{}
+ // 3 shared by months 1 and 2 (12,13,14),
+ // 3 shared by months 2 and 3 (22,23,24), and
+ // 3 exclusive to month 3 (32,33,34). We will also
+ // add 4 new nonentity clients (43,44,45,46)
+ nonEntitiesStruct := map[string]struct{}{
+ "client_2": {},
+ "client_3": {},
+ "client_4": {},
+ "client_12": {},
+ "client_13": {},
+ "client_14": {},
+ "client_22": {},
+ "client_23": {},
+ "client_24": {},
+ "client_32": {},
+ "client_33": {},
+ "client_34": {},
+ "client_43": {},
+ "client_44": {},
+ "client_45": {},
+ "client_46": {},
+ }
+
+ // secret syncs have 1 client from month 1 (5)
+ // 1 shared by months 1 and 2 (15)
+ // 1 shared by months 2 and 3 (25)
+ // 2 exclusive to month 3 (35,36)
+ // and 2 new clients (47,48)
+ secretSyncStruct := map[string]struct{}{
+ "client_5": {},
+ "client_15": {},
+ "client_25": {},
+ "client_35": {},
+ "client_36": {},
+ "client_47": {},
+ "client_48": {},
+ }
counts := &processCounts{
ClientsByType: map[string]clientIDSet{
entityActivityType: entitiesStruct,
nonEntityTokenActivityType: nonEntitiesStruct,
+ secretSyncActivityType: secretSyncStruct,
},
}
@@ -122,48 +146,29 @@ func Test_ActivityLog_ComputeCurrentMonthForBillingPeriodInternal(t *testing.T)
startTime := timeutil.MonthsPreviousTo(3, endTime)
monthRecord, err := a.computeCurrentMonthForBillingPeriodInternal(context.Background(), currentMonthClientsMap, mockHLLGetFunc, startTime, endTime)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
- // We should have 11 entity clients and 16 nonentity clients, and 3 new entity clients
- // and 4 new nonentity clients
- if monthRecord.Counts.EntityClients != 11 {
- t.Fatalf("wrong number of entity clients. Expected 11, got %d", monthRecord.Counts.EntityClients)
- }
- if monthRecord.Counts.NonEntityClients != 16 {
- t.Fatalf("wrong number of non entity clients. Expected 16, got %d", monthRecord.Counts.NonEntityClients)
- }
- if monthRecord.NewClients.Counts.EntityClients != 3 {
- t.Fatalf("wrong number of new entity clients. Expected 3, got %d", monthRecord.NewClients.Counts.EntityClients)
- }
- if monthRecord.NewClients.Counts.NonEntityClients != 4 {
- t.Fatalf("wrong number of new non entity clients. Expected 4, got %d", monthRecord.NewClients.Counts.NonEntityClients)
- }
+ require.Equal(t, &activity.CountsRecord{
+ EntityClients: 11,
+ NonEntityClients: 16,
+ SecretSyncs: 7,
+ }, monthRecord.Counts)
+
+ require.Equal(t, &activity.CountsRecord{
+ EntityClients: 3,
+ NonEntityClients: 4,
+ SecretSyncs: 2,
+ }, monthRecord.NewClients.Counts)
// Attempt to compute current month when no records exist
endTime = time.Now().UTC()
startTime = timeutil.StartOfMonth(endTime)
emptyClientsMap := make(map[int64]*processMonth, 0)
monthRecord, err = a.computeCurrentMonthForBillingPeriodInternal(context.Background(), emptyClientsMap, mockHLLGetFunc, startTime, endTime)
- if err != nil {
- t.Fatalf("failed to compute empty current month, err: %v", err)
- }
+ require.NoError(t, err)
- // We should have 0 entity clients, nonentity clients,new entity clients
- // and new nonentity clients
- if monthRecord.Counts.EntityClients != 0 {
- t.Fatalf("wrong number of entity clients. Expected 0, got %d", monthRecord.Counts.EntityClients)
- }
- if monthRecord.Counts.NonEntityClients != 0 {
- t.Fatalf("wrong number of non entity clients. Expected 0, got %d", monthRecord.Counts.NonEntityClients)
- }
- if monthRecord.NewClients.Counts.EntityClients != 0 {
- t.Fatalf("wrong number of new entity clients. Expected 0, got %d", monthRecord.NewClients.Counts.EntityClients)
- }
- if monthRecord.NewClients.Counts.NonEntityClients != 0 {
- t.Fatalf("wrong number of new non entity clients. Expected 0, got %d", monthRecord.NewClients.Counts.NonEntityClients)
- }
+ require.Equal(t, &activity.CountsRecord{}, monthRecord.Counts)
+ require.Equal(t, &activity.CountsRecord{}, monthRecord.NewClients.Counts)
}
// writeEntitySegment writes a single segment file with the given time and index for an entity
From fbb70eb0c6af58485a47a1e6735e47b9076f2969 Mon Sep 17 00:00:00 2001
From: Steven Clark
Date: Fri, 5 Jan 2024 10:52:33 -0500
Subject: [PATCH 27/39] Fix pluralization typo in ACME entity assignment doc
(#24676)
---
website/content/docs/concepts/client-count/index.mdx | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/website/content/docs/concepts/client-count/index.mdx b/website/content/docs/concepts/client-count/index.mdx
index d698884b96c0..109d10a229c2 100644
--- a/website/content/docs/concepts/client-count/index.mdx
+++ b/website/content/docs/concepts/client-count/index.mdx
@@ -71,7 +71,7 @@ For example:
- ACME client requests (from the same server or separate servers) for the same
certificate identifier (a unique combination of CN,DNS, SANS and IP SANS)
- are treated as the same entities.
+ are treated as the same entity.
- If an ACME client makes a request for `a.test.com`, and subsequently makes a new
request for `b.test.com` and `*.test.com` then two distinct entities will be created,
one for `a.test.com` and another for the combination of `b.test.com` and `*.test.com`.
From 0e23ae96ab985a14fad14c71ac2eb6f774fa81a4 Mon Sep 17 00:00:00 2001
From: claire bontempo <68122737+hellobontempo@users.noreply.github.com>
Date: Fri, 5 Jan 2024 10:49:38 -0800
Subject: [PATCH 28/39] UI: remove keyvauluri from credentials section (#24679)
* remove keyvauluri from credentials section
* move comment
---
ui/app/models/sync/destinations/azure-kv.js | 4 ++--
.../secrets/page/destinations/create-and-edit-test.js | 8 ++++----
2 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/ui/app/models/sync/destinations/azure-kv.js b/ui/app/models/sync/destinations/azure-kv.js
index 5c7dbcec894f..f1fb4c66f4d3 100644
--- a/ui/app/models/sync/destinations/azure-kv.js
+++ b/ui/app/models/sync/destinations/azure-kv.js
@@ -8,8 +8,8 @@ import { attr } from '@ember-data/model';
import { withFormFields } from 'vault/decorators/model-form-fields';
const displayFields = ['name', 'keyVaultUri', 'tenantId', 'cloud', 'clientId', 'clientSecret'];
const formFieldGroups = [
- { default: ['name', 'tenantId', 'cloud', 'clientId'] },
- { Credentials: ['keyVaultUri', 'clientSecret'] },
+ { default: ['name', 'keyVaultUri', 'tenantId', 'cloud', 'clientId'] },
+ { Credentials: ['clientSecret'] },
];
@withFormFields(displayFields, formFieldGroups)
export default class SyncDestinationsAzureKeyVaultModel extends SyncDestinationModel {
diff --git a/ui/tests/integration/components/sync/secrets/page/destinations/create-and-edit-test.js b/ui/tests/integration/components/sync/secrets/page/destinations/create-and-edit-test.js
index aa2ff5bd281b..600fee2c3ded 100644
--- a/ui/tests/integration/components/sync/secrets/page/destinations/create-and-edit-test.js
+++ b/ui/tests/integration/components/sync/secrets/page/destinations/create-and-edit-test.js
@@ -256,11 +256,11 @@ module('Integration | Component | sync | Secrets::Page::Destinations::CreateAndE
assert.dom(PAGE.title).hasTextContaining(`Edit ${this.model.name}`);
for (const attr of this.model.formFields) {
- // Enable inputs with sensitive values
- if (maskedParams.includes(attr.name)) {
- await click(PAGE.form.enableInput(attr.name));
- }
if (editable.includes(attr.name)) {
+ if (maskedParams.includes(attr.name)) {
+ // Enable inputs with sensitive values
+ await click(PAGE.form.enableInput(attr.name));
+ }
await PAGE.form.fillInByAttr(attr.name, `new-${decamelize(attr.name)}-value`);
} else {
assert.dom(PAGE.inputByAttr(attr.name)).isDisabled(`${attr.name} is disabled`);
From 3aee6ec464bb44d684c24d1767a09b50366e2e37 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?R=C3=A9mi=20Lapeyre?=
Date: Fri, 5 Jan 2024 20:11:33 +0100
Subject: [PATCH 29/39] Fix UI when editing database roles (#24660)
* Fix UI when editing database roles
When using a database role the UI will try to update the database connection
associated to the role. This is to make sure that the role is allowed to
use this connection:
async _updateAllowedRoles(store, { role, backend, db, type = 'add' }) {
const connection = await store.queryRecord('database/connection', { backend, id: db });
const roles = [...connection.allowed_roles];
const allowedRoles = type === 'add' ? addToArray([roles, role]) : removeFromArray([roles, role]);
connection.allowed_roles = allowedRoles;
return connection.save();
},
async createRecord(store, type, snapshot) {
const serializer = store.serializerFor(type.modelName);
const data = serializer.serialize(snapshot);
const roleType = snapshot.attr('type');
const backend = snapshot.attr('backend');
const id = snapshot.attr('name');
const db = snapshot.attr('database');
try {
await this._updateAllowedRoles(store, {
role: id,
backend,
db: db[0],
});
} catch (e) {
throw new Error('Could not update allowed roles for selected database. Check Vault logs for details');
}
return this.ajax(this.urlFor(backend, id, roleType), 'POST', { data }).then(() => {
// ember data doesn't like 204s if it's not a DELETE
return {
data: assign({}, data, { id }),
};
});
},
This is intended to help the administrator as the role will only work if
it is allowed by the database connection.
This is however an issue if the person doing the update does not have
the permission to update the connection: they will not be able to use
the UI to update the role even though they have the appropriate permissions
to do so (using the CLI or the API will work for example).
This is often the case when the database connections are created by a
centralized system but a human operator needs to create the roles.
You can try this with the following test case:
$ cat main.tf
resource "vault_auth_backend" "userpass" {
type = "userpass"
}
resource "vault_generic_endpoint" "alice" {
depends_on = [vault_auth_backend.userpass]
path = "auth/userpass/users/alice"
ignore_absent_fields = true
data_json = jsonencode({
"policies" : ["root"],
"password" : "alice"
})
}
data "vault_policy_document" "db_admin" {
rule {
path = "database/roles/*"
capabilities = ["create", "read", "update", "delete", "list"]
}
}
resource "vault_policy" "db_admin" {
name = "db-admin"
policy = data.vault_policy_document.db_admin.hcl
}
resource "vault_generic_endpoint" "bob" {
depends_on = [vault_auth_backend.userpass]
path = "auth/userpass/users/bob"
ignore_absent_fields = true
data_json = jsonencode({
"policies" : [vault_policy.db_admin.name],
"password" : "bob"
})
}
resource "vault_mount" "db" {
path = "database"
type = "database"
}
resource "vault_database_secret_backend_connection" "postgres" {
backend = vault_mount.db.path
name = "postgres"
allowed_roles = ["*"]
verify_connection = false
postgresql {
connection_url = "postgres://username:password@localhost/database"
}
}
$ terraform apply --auto-approve
then using bob to create a role associated to the `postgres` connection.
This patch changes the way the UI does the update: it still tries to
update the database connection but if it fails to do so because it does not
have the permission it just silently skip this part and updates the role.
This also update the error message returned to the user in case of issues
to include the actual errors.
* Add changelog
* Also ignore error when deleting a role
* Address code review comments
---------
Co-authored-by: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com>
---
changelog/24660.txt | 3 +++
ui/app/adapters/database/role.js | 28 ++++++++++++++++++-------
ui/app/components/database-role-edit.js | 3 ---
3 files changed, 24 insertions(+), 10 deletions(-)
create mode 100644 changelog/24660.txt
diff --git a/changelog/24660.txt b/changelog/24660.txt
new file mode 100644
index 000000000000..415944299e1a
--- /dev/null
+++ b/changelog/24660.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: The UI can now be used to create or update database roles by operator without permission on the database connection.
+```
diff --git a/ui/app/adapters/database/role.js b/ui/app/adapters/database/role.js
index 848719e37a7f..2a3002c1d1fc 100644
--- a/ui/app/adapters/database/role.js
+++ b/ui/app/adapters/database/role.js
@@ -164,7 +164,7 @@ export default ApplicationAdapter.extend({
db: db[0],
});
} catch (e) {
- throw new Error('Could not update allowed roles for selected database. Check Vault logs for details');
+ this.checkError(e);
}
return this.ajax(this.urlFor(backend, id, roleType), 'POST', { data }).then(() => {
@@ -180,12 +180,16 @@ export default ApplicationAdapter.extend({
const backend = snapshot.attr('backend');
const id = snapshot.attr('name');
const db = snapshot.attr('database');
- await this._updateAllowedRoles(store, {
- role: id,
- backend,
- db: db[0],
- type: 'remove',
- });
+ try {
+ await this._updateAllowedRoles(store, {
+ role: id,
+ backend,
+ db: db[0],
+ type: 'remove',
+ });
+ } catch (e) {
+ this.checkError(e);
+ }
return this.ajax(this.urlFor(backend, id, roleType), 'DELETE');
},
@@ -199,4 +203,14 @@ export default ApplicationAdapter.extend({
return this.ajax(this.urlFor(backend, id, roleType), 'POST', { data }).then(() => data);
},
+
+ checkError(e) {
+ if (e.httpStatus === 403) {
+ // The user does not have the permission to update the connection. This
+ // can happen if their permissions are limited to the role. In that case
+ // we ignore the error and continue updating the role.
+ return;
+ }
+ throw new Error(`Could not update allowed roles for selected database: ${e.errors.join(', ')}`);
+ },
});
diff --git a/ui/app/components/database-role-edit.js b/ui/app/components/database-role-edit.js
index 3379650e6e52..9672fe676034 100644
--- a/ui/app/components/database-role-edit.js
+++ b/ui/app/components/database-role-edit.js
@@ -27,9 +27,6 @@ export default class DatabaseRoleEdit extends Component {
get warningMessages() {
const warnings = {};
- if (this.args.model.canUpdateDb === false) {
- warnings.database = `You don’t have permissions to update this database connection, so this role cannot be created.`;
- }
if (
(this.args.model.type === 'dynamic' && this.args.model.canCreateDynamic === false) ||
(this.args.model.type === 'static' && this.args.model.canCreateStatic === false)
From 8caaa131329adeb402ab457f758abae626dd63dd Mon Sep 17 00:00:00 2001
From: Marc Boudreau
Date: Fri, 5 Jan 2024 15:27:56 -0500
Subject: [PATCH 30/39] VAULT-21608: Endpoints to Retrieve Active Pre- and
Post- Login Messages (#24626)
* add foundation to allow enterprise edition to walk up from current namespace to root
* add sys/internal/ui/*-messages paths
* add tests for consume custom messages endpoints
* more tests and change structure of link parameter
* add error when multiple links are provided for a custom message
---
vault/core.go | 2 +-
vault/custom_messages_manager.go | 20 +++
vault/logical_system.go | 86 +++++++++-
vault/logical_system_custom_messages.go | 65 ++++----
vault/logical_system_custom_messages_test.go | 72 ++++++--
vault/logical_system_paths.go | 62 +++++++
vault/logical_system_test.go | 165 +++++++++++++++++++
vault/ui_custom_messages/manager.go | 17 +-
vault/ui_custom_messages/manager_test.go | 57 ++++++-
vault/ui_custom_messages/namespace.go | 24 +++
vault/ui_custom_messages/namespace_test.go | 31 ++++
11 files changed, 549 insertions(+), 52 deletions(-)
create mode 100644 vault/custom_messages_manager.go
create mode 100644 vault/ui_custom_messages/namespace.go
create mode 100644 vault/ui_custom_messages/namespace_test.go
diff --git a/vault/core.go b/vault/core.go
index e9239f387cbd..542a62b249a4 100644
--- a/vault/core.go
+++ b/vault/core.go
@@ -526,7 +526,7 @@ type Core struct {
// uiConfig contains UI configuration
uiConfig *UIConfig
- customMessageManager *uicustommessages.Manager
+ customMessageManager CustomMessagesManager
// rawEnabled indicates whether the Raw endpoint is enabled
rawEnabled bool
diff --git a/vault/custom_messages_manager.go b/vault/custom_messages_manager.go
new file mode 100644
index 000000000000..ca7c7cd3a806
--- /dev/null
+++ b/vault/custom_messages_manager.go
@@ -0,0 +1,20 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package vault
+
+import (
+ "context"
+
+ uicustommessages "github.com/hashicorp/vault/vault/ui_custom_messages"
+)
+
+// CustomMessagesManager is the interface used by the vault package when
+// interacting with a uicustommessages.Manager instance.
+type CustomMessagesManager interface {
+ FindMessages(context.Context, uicustommessages.FindFilter) ([]uicustommessages.Message, error)
+ AddMessage(context.Context, uicustommessages.Message) (*uicustommessages.Message, error)
+ ReadMessage(context.Context, string) (*uicustommessages.Message, error)
+ UpdateMessage(context.Context, uicustommessages.Message) (*uicustommessages.Message, error)
+ DeleteMessage(context.Context, string) error
+}
diff --git a/vault/logical_system.go b/vault/logical_system.go
index dcf5f0b97f0e..1ae623546e60 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -50,6 +50,7 @@ import (
"github.com/hashicorp/vault/sdk/helper/wrapping"
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault/plugincatalog"
+ uicustommessages "github.com/hashicorp/vault/vault/ui_custom_messages"
"github.com/hashicorp/vault/version"
"github.com/mitchellh/mapstructure"
"golang.org/x/crypto/sha3"
@@ -142,8 +143,8 @@ func NewSystemBackend(core *Core, logger log.Logger, config *logical.BackendConf
"wrapping/pubkey",
"replication/status",
"internal/specs/openapi",
- "internal/ui/custom-messages",
- "internal/ui/custom-messages/*",
+ "internal/ui/authenticated-messages",
+ "internal/ui/unauthenticated-messages",
"internal/ui/mounts",
"internal/ui/mounts/*",
"internal/ui/namespaces",
@@ -4428,6 +4429,87 @@ func hasMountAccess(ctx context.Context, acl *ACL, path string) bool {
return aclCapabilitiesGiven
}
+// pathInternalUIAuthenticatedMessages finds all of the active messages whose
+// Authenticated property is set to true in the current namespace (based on the
+// provided context.Context) or in any ancestor namespace all the way up to the
+// root namespace.
+func (b *SystemBackend) pathInternalUIAuthenticatedMessages(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ // Make sure that the request includes a Vault token.
+ var tokenEntry *logical.TokenEntry
+ if token := req.ClientToken; token != "" {
+ tokenEntry, _ = b.Core.LookupToken(ctx, token)
+ }
+
+ if tokenEntry == nil {
+ return logical.ListResponseWithInfo([]string{}, map[string]any{}), nil
+ }
+
+ filter := uicustommessages.FindFilter{
+ IncludeAncestors: true,
+ }
+ filter.Active(true)
+ filter.Authenticated(true)
+
+ return b.pathInternalUICustomMessagesCommon(ctx, filter)
+}
+
+// pathInternalUIUnauthenticatedMessages finds all of the active messages whose
+// Authenticated property is set to false in the current namespace (based on the
+// provided context.Context) or in any ancestor namespace all the way up to the
+// root namespace.
+func (b *SystemBackend) pathInternalUIUnauthenticatedMessages(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
+ filter := uicustommessages.FindFilter{
+ IncludeAncestors: true,
+ }
+ filter.Active(true)
+ filter.Authenticated(false)
+
+ return b.pathInternalUICustomMessagesCommon(ctx, filter)
+}
+
+// pathInternalUICustomMessagesCommon takes care of finding the custom messages
+// that meet the criteria set in the provided uicustommessages.FindFilter.
+func (b *SystemBackend) pathInternalUICustomMessagesCommon(ctx context.Context, filter uicustommessages.FindFilter) (*logical.Response, error) {
+ messages, err := b.Core.customMessageManager.FindMessages(ctx, filter)
+ if err != nil {
+ return logical.ErrorResponse("failed to retrieve custom messages: %w", err), nil
+ }
+
+ keys := []string{}
+ keyInfo := map[string]any{}
+
+ for _, message := range messages {
+ keys = append(keys, message.ID)
+
+ var endTimeFormatted any
+
+ if message.EndTime != nil {
+ endTimeFormatted = message.EndTime.Format(time.RFC3339Nano)
+ }
+
+ var linkFormatted map[string]string = nil
+
+ if message.Link != nil {
+ linkFormatted = make(map[string]string)
+
+ linkFormatted[message.Link.Title] = message.Link.Href
+ }
+
+ keyInfo[message.ID] = map[string]any{
+ "title": message.Title,
+ "message": message.Message,
+ "authenticated": message.Authenticated,
+ "type": message.Type,
+ "start_time": message.StartTime.Format(time.RFC3339Nano),
+ "end_time": endTimeFormatted,
+ "link": linkFormatted,
+ "options": message.Options,
+ }
+ }
+
+ return logical.ListResponseWithInfo(keys, keyInfo), nil
+}
+
func (b *SystemBackend) pathInternalUIMountsRead(ctx context.Context, req *logical.Request, d *framework.FieldData) (*logical.Response, error) {
ns, err := namespace.FromContext(ctx)
if err != nil {
diff --git a/vault/logical_system_custom_messages.go b/vault/logical_system_custom_messages.go
index 6bba7a0db802..cddf33e5735a 100644
--- a/vault/logical_system_custom_messages.go
+++ b/vault/logical_system_custom_messages.go
@@ -422,28 +422,24 @@ func (b *SystemBackend) handleCreateCustomMessages(ctx context.Context, req *log
return logical.ErrorResponse(err.Error()), nil
}
+ if len(linkMap) > 1 {
+ return logical.ErrorResponse("invalid number of elements in link parameter value; only a single element can be provided"), nil
+ }
+
var link *uicustommessages.MessageLink
if linkMap != nil {
link = &uicustommessages.MessageLink{}
- linkTitle, ok := linkMap["title"]
- if !ok {
- return logical.ErrorResponse("missing title in link parameter value"), nil
- }
-
- link.Title, ok = linkTitle.(string)
- if !ok {
- return logical.ErrorResponse("invalid title value in link parameter value"), nil
- }
+ for k, v := range linkMap {
+ href, ok := v.(string)
+ if !ok {
+ return logical.ErrorResponse(fmt.Sprintf("invalid url for %q key in link parameter value", k)), nil
+ }
- linkHref, ok := linkMap["href"]
- if !ok {
- return logical.ErrorResponse("missing href in link parameter value"), nil
- }
+ link.Title = k
+ link.Href = href
- link.Href, ok = linkHref.(string)
- if !ok {
- return logical.ErrorResponse("invalid href value in link parameter value"), nil
+ break
}
}
@@ -509,6 +505,13 @@ func (b *SystemBackend) handleReadCustomMessage(ctx context.Context, req *logica
endTimeResponse = message.EndTime.Format(time.RFC3339Nano)
}
+ var linkResponse map[string]string = nil
+ if message.Link != nil {
+ linkResponse = make(map[string]string)
+
+ linkResponse[message.Link.Title] = message.Link.Href
+ }
+
return &logical.Response{
Data: map[string]any{
"id": id,
@@ -517,7 +520,7 @@ func (b *SystemBackend) handleReadCustomMessage(ctx context.Context, req *logica
"message": message.Message,
"start_time": message.StartTime.Format(time.RFC3339Nano),
"end_time": endTimeResponse,
- "link": message.Link,
+ "link": linkResponse,
"options": message.Options,
"active": message.Active(),
"title": message.Title,
@@ -558,28 +561,24 @@ func (b *SystemBackend) handleUpdateCustomMessage(ctx context.Context, req *logi
return logical.ErrorResponse(err.Error()), nil
}
+ if len(linkMap) > 1 {
+ return logical.ErrorResponse("invalid number of elements in link parameter value; only a single element can be provided"), nil
+ }
+
var link *uicustommessages.MessageLink
if linkMap != nil {
link = &uicustommessages.MessageLink{}
- linkTitle, ok := linkMap["title"]
- if !ok {
- return logical.ErrorResponse("missing title in link parameter value"), nil
- }
-
- link.Title, ok = linkTitle.(string)
- if !ok {
- return logical.ErrorResponse("invalid title value in link parameter value"), nil
- }
+ for k, v := range linkMap {
+ href, ok := v.(string)
+ if !ok {
+ return logical.ErrorResponse("invalid url for %q key link parameter value", k), nil
+ }
- linkHref, ok := linkMap["href"]
- if !ok {
- return logical.ErrorResponse("missing href in link parameter value"), nil
- }
+ link.Title = k
+ link.Href = href
- link.Href, ok = linkHref.(string)
- if !ok {
- return logical.ErrorResponse("invalid href value in link parameter value"), nil
+ break
}
}
diff --git a/vault/logical_system_custom_messages_test.go b/vault/logical_system_custom_messages_test.go
index 234c0e8172c1..d3dadc57ed2e 100644
--- a/vault/logical_system_custom_messages_test.go
+++ b/vault/logical_system_custom_messages_test.go
@@ -302,6 +302,25 @@ func TestHandleCreateCustomMessage(t *testing.T) {
},
errorExpected: true,
},
+ {
+ name: "link-parameter-href-invalid",
+ fieldRawUpdate: map[string]any{
+ "link": map[string]any{
+ "click here": []int{},
+ },
+ },
+ errorExpected: true,
+ },
+ {
+ name: "link-parameter-multiple-links",
+ fieldRawUpdate: map[string]any{
+ "link": map[string]any{
+ "click here": "http://example.org",
+ "click here 2": "http://ping.net",
+ },
+ },
+ errorExpected: true,
+ },
{
name: "options-parameter-invalid",
fieldRawUpdate: map[string]any{
@@ -321,9 +340,8 @@ func TestHandleCreateCustomMessage(t *testing.T) {
"options": map[string]any{
"color": "red",
},
- "link": map[string]any{
- "title": "Details",
- "href": "https://server.com/details",
+ "link": map[string]string{
+ "Details": "https://server.com/details",
},
},
},
@@ -373,14 +391,24 @@ func TestHandleCreateCustomMessage(t *testing.T) {
assert.Contains(t, resp.Data, "start_time", testcase.name)
assert.Contains(t, resp.Data, "end_time", testcase.name)
assert.Contains(t, resp.Data, "id", testcase.name)
- if _, ok := testcase.fieldRawUpdate["authenticated"]; !ok {
- assert.True(t, resp.Data["authenticated"].(bool), testcase.name)
- }
+ assert.Contains(t, resp.Data, "options", testcase.name)
+ assert.Contains(t, resp.Data, "link", testcase.name)
+ _, ok := testcase.fieldRawUpdate["authenticated"]
+ assert.Equal(t, !ok, resp.Data["authenticated"].(bool), testcase.name)
if _, ok := testcase.fieldRawUpdate["type"]; !ok {
assert.Equal(t, resp.Data["type"], uicustommessages.BannerMessageType, testcase.name)
+ } else {
+ assert.Equal(t, resp.Data["type"], uicustommessages.ModalMessageType, testcase.name)
}
if _, ok := testcase.fieldRawUpdate["end_time"]; !ok {
assert.Nil(t, resp.Data["end_time"], testcase.name)
+ } else {
+ assert.NotNil(t, resp.Data["end_time"], testcase.name)
+ }
+ if _, ok := testcase.fieldRawUpdate["link"]; !ok {
+ assert.Nil(t, resp.Data["link"], testcase.name)
+ } else {
+ assert.NotNil(t, resp.Data["link"], testcase.name)
}
}
}
@@ -428,7 +456,10 @@ func TestHandleReadCustomMessage(t *testing.T) {
StartTime: earlier,
EndTime: &later,
Options: make(map[string]any),
- Link: nil,
+ Link: &uicustommessages.MessageLink{
+ Title: "Click Here",
+ Href: "www.example.com",
+ },
}
message, err := backend.Core.customMessageManager.AddMessage(nsCtx, *message)
@@ -457,9 +488,12 @@ func TestHandleReadCustomMessage(t *testing.T) {
assert.Equal(t, resp.Data["active"], true)
assert.Contains(t, resp.Data, "end_time")
assert.NotNil(t, resp.Data["end_time"])
+ assert.Contains(t, resp.Data, "link")
+ assert.Equal(t, 1, len(resp.Data["link"].(map[string]string)))
// Change the message so that it doesn't have an end time.
message.EndTime = nil
+ message.Link = nil
message, err = backend.Core.customMessageManager.UpdateMessage(nsCtx, *message)
require.NoError(t, err)
require.NotNil(t, message)
@@ -474,6 +508,8 @@ func TestHandleReadCustomMessage(t *testing.T) {
assert.Equal(t, resp.Data["active"], true)
assert.Contains(t, resp.Data, "end_time")
assert.Nil(t, resp.Data["end_time"])
+ assert.Contains(t, resp.Data, "link")
+ assert.Nil(t, resp.Data["link"])
// Check that there's an error when trying to read a non-existant custom
// message.
@@ -538,7 +574,7 @@ func TestHandleUpdateCustomMessage(t *testing.T) {
endTime := now.Add(time.Hour).Format(time.RFC3339Nano)
startTime2 := now.UTC().Add(-2 * time.Hour).Format(time.RFC3339Nano)
- storageEntryValue := fmt.Sprintf(`{"messages":{"xyz":{"id":"xyz","title":"title","message":"message","authenticated":true,"type":"%s","start_time":"%s","end_time":"%s","link":{},"options":{}}}}`, uicustommessages.ModalMessageType, startTime, endTime)
+ storageEntryValue := fmt.Sprintf(`{"messages":{"xyz":{"id":"xyz","title":"title","message":"message","authenticated":true,"type":"%s","start_time":"%s","end_time":"%s","link":null,"options":null}}}`, uicustommessages.ModalMessageType, startTime, endTime)
storageEntry := &logical.StorageEntry{
Key: "sys/config/ui/custom-messages",
@@ -595,8 +631,7 @@ func TestHandleUpdateCustomMessage(t *testing.T) {
"start_time": startTime,
"end_time": endTime,
"link": map[string]any{
- "title": "link-title",
- "href": "http://link.url.com",
+ "link-title": "http://link.url.com",
},
"options": map[string]any{},
},
@@ -704,6 +739,23 @@ func TestHandleUpdateCustomMessage(t *testing.T) {
"link": "link",
},
},
+ {
+ name: "link-parameter-url-invalid",
+ fieldRawUpdate: map[string]any{
+ "link": map[string]any{
+ "my-link": []int{},
+ },
+ },
+ },
+ {
+ name: "link-parameter-multiple-links",
+ fieldRawUpdate: map[string]any{
+ "link": map[string]any{
+ "click here": "http://example.org",
+ "click here 2": "http://ping.net",
+ },
+ },
+ },
{
name: "options-parameter-invalid",
fieldRawUpdate: map[string]any{
diff --git a/vault/logical_system_paths.go b/vault/logical_system_paths.go
index e88369d647fa..c12b83e6ecaf 100644
--- a/vault/logical_system_paths.go
+++ b/vault/logical_system_paths.go
@@ -2435,6 +2435,37 @@ func (b *SystemBackend) internalPaths() []*framework.Path {
HelpSynopsis: "Generate an OpenAPI 3 document of all mounted paths.",
},
+ {
+ Pattern: "internal/ui/authenticated-messages",
+
+ DisplayAttrs: &framework.DisplayAttributes{
+ OperationPrefix: "internal-ui",
+ OperationVerb: "read",
+ OperationSuffix: "authenticated-active-custom-messages",
+ },
+
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ReadOperation: &framework.PathOperation{
+ Callback: b.pathInternalUIAuthenticatedMessages,
+ Summary: "Retrieves Active post-login Custom Messages",
+ Responses: map[int][]framework.Response{
+ http.StatusOK: {{
+ Description: "OK",
+ Fields: map[string]*framework.FieldSchema{
+ "keys": {
+ Type: framework.TypeStringSlice,
+ Required: true,
+ },
+ "key_info": {
+ Type: framework.TypeMap,
+ Required: true,
+ },
+ },
+ }},
+ },
+ },
+ },
+ },
{
Pattern: "internal/ui/feature-flags",
@@ -2653,6 +2684,37 @@ func (b *SystemBackend) internalPaths() []*framework.Path {
HelpSynopsis: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][0]),
HelpDescription: strings.TrimSpace(sysHelp["internal-ui-resultant-acl"][1]),
},
+ {
+ Pattern: "internal/ui/unauthenticated-messages",
+
+ DisplayAttrs: &framework.DisplayAttributes{
+ OperationPrefix: "internal-ui",
+ OperationVerb: "read",
+ OperationSuffix: "unauthenticated-active-custom-messages",
+ },
+
+ Operations: map[logical.Operation]framework.OperationHandler{
+ logical.ReadOperation: &framework.PathOperation{
+ Callback: b.pathInternalUIUnauthenticatedMessages,
+ Summary: "Retrieves Active pre-login Custom Messages",
+ Responses: map[int][]framework.Response{
+ http.StatusOK: {{
+ Description: "OK",
+ Fields: map[string]*framework.FieldSchema{
+ "keys": {
+ Type: framework.TypeStringSlice,
+ Required: true,
+ },
+ "key_info": {
+ Type: framework.TypeMap,
+ Required: true,
+ },
+ },
+ }},
+ },
+ },
+ },
+ },
{
Pattern: "internal/ui/version",
DisplayAttrs: &framework.DisplayAttributes{
diff --git a/vault/logical_system_test.go b/vault/logical_system_test.go
index 9b28f1bfc301..3f9ead9a9533 100644
--- a/vault/logical_system_test.go
+++ b/vault/logical_system_test.go
@@ -44,8 +44,10 @@ import (
"github.com/hashicorp/vault/sdk/logical"
"github.com/hashicorp/vault/vault/plugincatalog"
"github.com/hashicorp/vault/vault/seal"
+ uicustommessages "github.com/hashicorp/vault/vault/ui_custom_messages"
"github.com/hashicorp/vault/version"
"github.com/mitchellh/mapstructure"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -6383,3 +6385,166 @@ func TestSystemBackend_pluginRuntime_CannotDeleteRuntimeWithReferencingPlugins(t
t.Fatalf("err: %v %v", err, resp.Error())
}
}
+
+type testingCustomMessageManager struct {
+ findFilters []uicustommessages.FindFilter
+}
+
+func (m *testingCustomMessageManager) FindMessages(_ context.Context, filter uicustommessages.FindFilter) ([]uicustommessages.Message, error) {
+ m.findFilters = append(m.findFilters, filter)
+
+ return []uicustommessages.Message{}, nil
+}
+
+func (m *testingCustomMessageManager) AddMessage(_ context.Context, _ uicustommessages.Message) (*uicustommessages.Message, error) {
+ return nil, nil
+}
+
+func (m *testingCustomMessageManager) ReadMessage(_ context.Context, _ string) (*uicustommessages.Message, error) {
+ return nil, nil
+}
+
+func (m *testingCustomMessageManager) UpdateMessage(_ context.Context, _ uicustommessages.Message) (*uicustommessages.Message, error) {
+ return nil, nil
+}
+
+func (m *testingCustomMessageManager) DeleteMessage(_ context.Context, _ string) error {
+ return nil
+}
+
+// TestPathInternalUIUnauthenticatedMessages verifies the correct behaviour of
+// the pathInternalUIUnauthenticatedMessages method, which is to call the
+// FindMessages method of the Core.customMessagesManager field with a FindFilter
+// that has the IncludeAncestors field set to true, the active field pointing to
+// a true value, and the authenticated field pointing to a false value.
+func TestPathInternalUIUnauthenticatedMessages(t *testing.T) {
+ testingCMM := &testingCustomMessageManager{}
+ backend := &SystemBackend{
+ Core: &Core{
+ customMessageManager: testingCMM,
+ },
+ }
+
+ resp, err := backend.pathInternalUIUnauthenticatedMessages(context.Background(), &logical.Request{}, &framework.FieldData{})
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+
+ expectedFilter := uicustommessages.FindFilter{IncludeAncestors: true}
+ expectedFilter.Active(true)
+ expectedFilter.Authenticated(false)
+
+ assert.ElementsMatch(t, testingCMM.findFilters, []uicustommessages.FindFilter{expectedFilter})
+}
+
+// TestPathInternalUIAuthenticatedMessages verifies the correct behaviour of the
+// pathInternalUIAuthenticatedMessages method, which is to first check if the
+// request has a valid token included, then call the FindMessages method of the
+// Core.customMessagesManager field with a FindFilter that has the
+// IncludeAncestors field set to true, the active field pointing to a true
+// value, and the authenticated field pointing to a true value. If the request
+// does not have a valid token, the method behaves as if no messages meet the
+// criteria.
+func TestPathInternalUIAuthenticatedMessages(t *testing.T) {
+ testingCMM := &testingCustomMessageManager{}
+ testCore := TestCoreRaw(t)
+ _, _, token := testCoreUnsealed(t, testCore)
+ testCore.customMessageManager = testingCMM
+
+ backend := &SystemBackend{
+ Core: testCore,
+ }
+
+ nsCtx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
+
+ // Check with a request that includes a valid token
+ resp, err := backend.pathInternalUIAuthenticatedMessages(nsCtx, &logical.Request{
+ ClientToken: token,
+ }, &framework.FieldData{})
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+
+ expectedFilter := uicustommessages.FindFilter{
+ IncludeAncestors: true,
+ }
+ expectedFilter.Active(true)
+ expectedFilter.Authenticated(true)
+
+ assert.ElementsMatch(t, testingCMM.findFilters, []uicustommessages.FindFilter{expectedFilter})
+
+ // Now, check with a request that has no token: expecting no new filter
+ // in the testingCMM.
+ resp, err = backend.pathInternalUIAuthenticatedMessages(nsCtx, &logical.Request{}, &framework.FieldData{})
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+ assert.NotContains(t, resp.Data, "keys")
+ assert.NotContains(t, resp.Data, "key_info")
+
+ assert.ElementsMatch(t, testingCMM.findFilters, []uicustommessages.FindFilter{expectedFilter})
+
+ // Finally, check with an invalid token in the request: again, expecting no
+ // new filter in the testingCMM.
+ resp, err = backend.pathInternalUIAuthenticatedMessages(nsCtx, &logical.Request{ClientToken: "invalid"}, &framework.FieldData{})
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+ assert.NotContains(t, resp.Data, "keys")
+ assert.NotContains(t, resp.Data, "key_info")
+
+ assert.ElementsMatch(t, testingCMM.findFilters, []uicustommessages.FindFilter{expectedFilter})
+}
+
+// TestPathInternalUICustomMessagesCommon verifies the correct behaviour of the
+// (*SystemBackend).pathInternalUICustomMessagesCommon method.
+func TestPathInternalUICustomMessagesCommon(t *testing.T) {
+ var storage logical.Storage = &testingStorage{getFails: true}
+ testingCMM := uicustommessages.NewManager(storage)
+ backend := &SystemBackend{
+ Core: &Core{
+ customMessageManager: testingCMM,
+ },
+ }
+
+ // First, check that when an error occurs in the FindMessages method, it's
+ // handled correctly.
+ filter := uicustommessages.FindFilter{
+ IncludeAncestors: true,
+ }
+ filter.Active(true)
+ filter.Authenticated(false)
+
+ resp, err := backend.pathInternalUICustomMessagesCommon(context.Background(), filter)
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+ assert.Contains(t, resp.Data, "error")
+ assert.Contains(t, resp.Data["error"], "failed to retrieve custom messages")
+
+ // Next, check that when no error occur and messages are returned by
+ // FindMessages that they are correctly translated.
+ storage = &logical.InmemStorage{}
+ backend.Core.customMessageManager = uicustommessages.NewManager(storage)
+
+ // Load some messages for the root namespace and a testNS namespace.
+ startTime := time.Now().Add(-1 * time.Hour).Format(time.RFC3339Nano)
+ endTime := time.Now().Add(time.Hour).Format(time.RFC3339Nano)
+
+ messagesTemplate := `{"messages":{"%[1]d01":{"id":"%[1]d01","title":"Title-%[1]d01","message":"Message of Title-%[1]d01","authenticated":false,"type":"banner","start_time":"%[2]s"},"%[1]d02":{"id":"%[1]d02","title":"Title-%[1]d02","message":"Message of Title-%[1]d02","authenticated":false,"type":"modal","start_time":"%[2]s","end_time":"%[3]s"},"%[1]d03":{"id":"%[1]d03","title":"Title-%[1]d03","message":"Message of Title-%[1]d03","authenticated":false,"type":"banner","start_time":"%[2]s","link":{"Link":"www.example.com"}}}}`
+
+ cmStorageEntry := &logical.StorageEntry{
+ Key: "sys/config/ui/custom-messages",
+ Value: []byte(fmt.Sprintf(messagesTemplate, 0, startTime, endTime)),
+ }
+ storage.Put(context.Background(), cmStorageEntry)
+
+ cmStorageEntry = &logical.StorageEntry{
+ Key: "namespaces/testNS/sys/config/ui/custom-messages",
+ Value: []byte(fmt.Sprintf(messagesTemplate, 1, startTime, endTime)),
+ }
+ storage.Put(context.Background(), cmStorageEntry)
+
+ resp, err = backend.pathInternalUICustomMessagesCommon(namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace), filter)
+ assert.NoError(t, err)
+ assert.NotNil(t, resp)
+ assert.Contains(t, resp.Data, "keys")
+ assert.Equal(t, 3, len(resp.Data["keys"].([]string)))
+ assert.Contains(t, resp.Data, "key_info")
+ assert.Equal(t, 3, len(resp.Data["key_info"].(map[string]any)))
+}
diff --git a/vault/ui_custom_messages/manager.go b/vault/ui_custom_messages/manager.go
index 105d41a1ac18..47439e620b47 100644
--- a/vault/ui_custom_messages/manager.go
+++ b/vault/ui_custom_messages/manager.go
@@ -26,6 +26,12 @@ const (
MaximumMessageCountPerNamespace int = 100
)
+// nsManager is the NamespaceManager instance used to determine the set of
+// Namespaces to consider when retrieving active Custom Message. This
+// variable is re-assigned to point to a real NamespaceManager in the
+// enterprise edition.
+var nsManager NamespaceManager = &CommunityEditionNamespaceManager{}
+
// Manager is a struct that provides methods to manage messages stored in a
// logical.Storage.
type Manager struct {
@@ -223,10 +229,13 @@ func getNamespacesToSearch(ctx context.Context, filters FindFilter) ([]*namespac
// Add the current namespace based on the context.Context to nsList.
nsList = append(nsList, ns)
- //if filters.IncludeAncestors {
- // Add the parent, grand-parent, etc... namespaces all the way back up
- // to the root namespace to nsList.
- //}
+ if filters.IncludeAncestors {
+ parentNs := nsManager.GetParentNamespace(ns.Path)
+ for ; parentNs.ID != ns.ID; parentNs = nsManager.GetParentNamespace(ns.Path) {
+ ns = parentNs
+ nsList = append(nsList, ns)
+ }
+ }
return nsList, nil
}
diff --git a/vault/ui_custom_messages/manager_test.go b/vault/ui_custom_messages/manager_test.go
index 0774fa7ae0d6..e0d8e43ddf9d 100644
--- a/vault/ui_custom_messages/manager_test.go
+++ b/vault/ui_custom_messages/manager_test.go
@@ -220,9 +220,41 @@ func TestGetNamespacesToSearch(t *testing.T) {
list, err = getNamespacesToSearch(namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace), FindFilter{})
assert.NoError(t, err)
- assert.NotNil(t, list)
- assert.Equal(t, 1, len(list))
+ assert.Len(t, list, 1)
assert.Equal(t, namespace.RootNamespace, list[0])
+
+ // Verify with nsManager set to an instance of testNamespaceManager to
+ // ensure that it is used to calculate the list of namespaces.
+ currentNsManager := nsManager
+ defer func() {
+ nsManager = currentNsManager
+ }()
+
+ nsManager = &testNamespaceManager{
+ results: []namespace.Namespace{
+ {
+ ID: "ccc",
+ Path: "c/",
+ },
+ {
+ ID: "bbb",
+ Path: "b/",
+ },
+ {
+ ID: "aaa",
+ Path: "a/",
+ },
+ },
+ }
+
+ list, err = getNamespacesToSearch(namespace.ContextWithNamespace(context.Background(), &namespace.Namespace{ID: "ddd", Path: "d/"}), FindFilter{IncludeAncestors: true})
+ assert.NoError(t, err)
+ assert.Len(t, list, 5)
+ assert.Equal(t, list[0].Path, "d/")
+ assert.Equal(t, list[1].Path, "c/")
+ assert.Equal(t, list[2].Path, "b/")
+ assert.Equal(t, list[3].Path, "a/")
+ assert.Equal(t, list[4].Path, "")
}
// TestStorageKeyForNamespace verifies that the storageKeyForNamespace function
@@ -633,3 +665,24 @@ func (s *testingStorage) Put(_ context.Context, _ *logical.StorageEntry) error {
return nil
}
+
+// testNamespaceManager is a perculiar type of NamespaceManager where it can be
+// instantiated with the results that successive calls to its GetParentNamespace
+// method will return.
+type testNamespaceManager struct {
+ results []namespace.Namespace
+}
+
+// GetParentNamespace effectively pops namespaces from the results field in the
+// receiver testNamespaceManager struct and returns them. Once all namespaces
+// have been returns, it returns namespace.RootNamespace.
+func (n *testNamespaceManager) GetParentNamespace(_ string) *namespace.Namespace {
+ if len(n.results) == 0 {
+ return namespace.RootNamespace
+ }
+
+ ns := n.results[0]
+ n.results = n.results[1:]
+
+ return &ns
+}
diff --git a/vault/ui_custom_messages/namespace.go b/vault/ui_custom_messages/namespace.go
new file mode 100644
index 000000000000..57af486dab43
--- /dev/null
+++ b/vault/ui_custom_messages/namespace.go
@@ -0,0 +1,24 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package uicustommessages
+
+import "github.com/hashicorp/vault/helper/namespace"
+
+// NamespaceManager is the interface needed of a NamespaceManager by this
+// package. This interface allows setting a dummy NamespaceManager in the
+// community edition that can be replaced with the real
+// namespace.NamespaceManager in the enterprise edition.
+type NamespaceManager interface {
+ GetParentNamespace(string) *namespace.Namespace
+}
+
+// CommunityEditionNamespaceManager is a struct that implements the
+// NamespaceManager interface. This struct is used as a placeholder in the
+// community edition.
+type CommunityEditionNamespaceManager struct{}
+
+// GetParentNamespace always returns namespace.RootNamespace.
+func (n *CommunityEditionNamespaceManager) GetParentNamespace(_ string) *namespace.Namespace {
+ return namespace.RootNamespace
+}
diff --git a/vault/ui_custom_messages/namespace_test.go b/vault/ui_custom_messages/namespace_test.go
new file mode 100644
index 000000000000..aa0fa680b681
--- /dev/null
+++ b/vault/ui_custom_messages/namespace_test.go
@@ -0,0 +1,31 @@
+// Copyright (c) HashiCorp, Inc.
+// SPDX-License-Identifier: BUSL-1.1
+
+package uicustommessages
+
+import (
+ "testing"
+
+ "github.com/hashicorp/vault/helper/namespace"
+ "github.com/stretchr/testify/assert"
+)
+
+// TestCommunityEditionNamespaceManagerGetParentNamespace verifies that the
+// (*CommunityEditionNamespaceManager).GetParentNamespace behaves as intended,
+// which is to always return namespace.RootNamespace, regardless of the input.
+func TestCommunityEditionNamespaceManagerGetParentNamespace(t *testing.T) {
+ testNsManager := &CommunityEditionNamespaceManager{}
+
+ // Verify root namespace
+ assert.Equal(t, namespace.RootNamespace, testNsManager.GetParentNamespace(namespace.RootNamespace.Path))
+
+ // Verify a different namespace
+ testNamespace := namespace.Namespace{
+ ID: "abc123",
+ Path: "test/",
+ }
+ assert.Equal(t, namespace.RootNamespace, testNsManager.GetParentNamespace(testNamespace.Path))
+
+ // Verify that even a random string results in the root namespace
+ assert.Equal(t, namespace.RootNamespace, testNsManager.GetParentNamespace("blah"))
+}
From d5af0658ef549e43e90d05746e2855876f44d957 Mon Sep 17 00:00:00 2001
From: Chelsea Shaw <82459713+hashishaw@users.noreply.github.com>
Date: Fri, 5 Jan 2024 14:40:55 -0600
Subject: [PATCH 31/39] UI: fix PKI issuer capabilities (#24686)
---
changelog/24686.txt | 3 +
ui/app/models/pki/issuer.js | 13 ++-
.../pki/pki-engine-workflow-test.js | 109 +++++++++++-------
ui/tests/acceptance/pki/pki-overview-test.js | 4 +-
ui/tests/acceptance/policies-acl-old-test.js | 2 +
ui/tests/helpers/pki/pki-run-commands.js | 18 +++
6 files changed, 99 insertions(+), 50 deletions(-)
create mode 100644 changelog/24686.txt
diff --git a/changelog/24686.txt b/changelog/24686.txt
new file mode 100644
index 000000000000..30ef696f491e
--- /dev/null
+++ b/changelog/24686.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: fix incorrectly calculated capabilities on PKI issuer endpoints
+```
diff --git a/ui/app/models/pki/issuer.js b/ui/app/models/pki/issuer.js
index a06ea671690b..6e51d469d5fc 100644
--- a/ui/app/models/pki/issuer.js
+++ b/ui/app/models/pki/issuer.js
@@ -135,13 +135,14 @@ export default class PkiIssuerModel extends Model {
@attr importedKeys;
@attr mapping;
- @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}`) issuerPath;
- @lazyCapabilities(apiPath`${'backend'}/root/rotate/exported`) rotateExported;
- @lazyCapabilities(apiPath`${'backend'}/root/rotate/internal`) rotateInternal;
- @lazyCapabilities(apiPath`${'backend'}/root/rotate/existing`) rotateExisting;
+ @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}`, 'backend', 'issuerId') issuerPath;
+ @lazyCapabilities(apiPath`${'backend'}/root/rotate/exported`, 'backend') rotateExported;
+ @lazyCapabilities(apiPath`${'backend'}/root/rotate/internal`, 'backend') rotateInternal;
+ @lazyCapabilities(apiPath`${'backend'}/root/rotate/existing`, 'backend') rotateExisting;
@lazyCapabilities(apiPath`${'backend'}/root`, 'backend') deletePath;
- @lazyCapabilities(apiPath`${'backend'}/intermediate/cross-sign`) crossSignPath;
- @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}/sign-intermediate`) signIntermediate;
+ @lazyCapabilities(apiPath`${'backend'}/intermediate/cross-sign`, 'backend') crossSignPath;
+ @lazyCapabilities(apiPath`${'backend'}/issuer/${'issuerId'}/sign-intermediate`, 'backend', 'issuerId')
+ signIntermediate;
get canRotateIssuer() {
return (
this.rotateExported.get('canUpdate') !== false ||
diff --git a/ui/tests/acceptance/pki/pki-engine-workflow-test.js b/ui/tests/acceptance/pki/pki-engine-workflow-test.js
index 3afc02b25db9..2e47be583573 100644
--- a/ui/tests/acceptance/pki/pki-engine-workflow-test.js
+++ b/ui/tests/acceptance/pki/pki-engine-workflow-test.js
@@ -13,7 +13,7 @@ import enablePage from 'vault/tests/pages/settings/mount-secret-backend';
import { click, currentURL, fillIn, find, isSettled, visit } from '@ember/test-helpers';
import { SELECTORS } from 'vault/tests/helpers/pki/workflow';
import { adminPolicy, readerPolicy, updatePolicy } from 'vault/tests/helpers/policy-generator/pki';
-import { tokenWithPolicy, runCommands } from 'vault/tests/helpers/pki/pki-run-commands';
+import { tokenWithPolicy, runCommands, clearRecords } from 'vault/tests/helpers/pki/pki-run-commands';
import { unsupportedPem } from 'vault/tests/helpers/pki/values';
/**
@@ -25,12 +25,14 @@ module('Acceptance | pki workflow', function (hooks) {
setupApplicationTest(hooks);
hooks.beforeEach(async function () {
+ this.store = this.owner.lookup('service:store');
await authPage.login();
// Setup PKI engine
const mountPath = `pki-workflow-${uuidv4()}`;
await enablePage.enable('pki', mountPath);
this.mountPath = mountPath;
await logout.visit();
+ clearRecords(this.store);
});
hooks.afterEach(async function () {
@@ -40,40 +42,50 @@ module('Acceptance | pki workflow', function (hooks) {
await runCommands([`delete sys/mounts/${this.mountPath}`]);
});
- test('empty state messages are correct when PKI not configured', async function (assert) {
- assert.expect(21);
- const assertEmptyState = (assert, resource) => {
- assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/${resource}`);
- assert
- .dom(SELECTORS.emptyStateTitle)
- .hasText(
- 'PKI not configured',
- `${resource} index renders correct empty state title when PKI not configured`
- );
- assert.dom(SELECTORS.emptyStateLink).hasText('Configure PKI');
- assert
- .dom(SELECTORS.emptyStateMessage)
- .hasText(
- `This PKI mount hasn't yet been configured with a certificate issuer.`,
- `${resource} index empty state message correct when PKI not configured`
- );
- };
- await authPage.login(this.pkiAdminToken);
- await visit(`/vault/secrets/${this.mountPath}/pki/overview`);
- assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`);
-
- await click(SELECTORS.rolesTab);
- assertEmptyState(assert, 'roles');
-
- await click(SELECTORS.issuersTab);
- assertEmptyState(assert, 'issuers');
-
- await click(SELECTORS.certsTab);
- assertEmptyState(assert, 'certificates');
- await click(SELECTORS.keysTab);
- assertEmptyState(assert, 'keys');
- await click(SELECTORS.tidyTab);
- assertEmptyState(assert, 'tidy');
+ module('not configured', function (hooks) {
+ hooks.beforeEach(async function () {
+ await authPage.login();
+ const pki_admin_policy = adminPolicy(this.mountPath, 'roles');
+ this.pkiAdminToken = await tokenWithPolicy(`pki-admin-${this.mountPath}`, pki_admin_policy);
+ await logout.visit();
+ clearRecords(this.store);
+ });
+
+ test('empty state messages are correct when PKI not configured', async function (assert) {
+ assert.expect(21);
+ const assertEmptyState = (assert, resource) => {
+ assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/${resource}`);
+ assert
+ .dom(SELECTORS.emptyStateTitle)
+ .hasText(
+ 'PKI not configured',
+ `${resource} index renders correct empty state title when PKI not configured`
+ );
+ assert.dom(SELECTORS.emptyStateLink).hasText('Configure PKI');
+ assert
+ .dom(SELECTORS.emptyStateMessage)
+ .hasText(
+ `This PKI mount hasn't yet been configured with a certificate issuer.`,
+ `${resource} index empty state message correct when PKI not configured`
+ );
+ };
+ await authPage.login(this.pkiAdminToken);
+ await visit(`/vault/secrets/${this.mountPath}/pki/overview`);
+ assert.strictEqual(currentURL(), `/vault/secrets/${this.mountPath}/pki/overview`);
+
+ await click(SELECTORS.rolesTab);
+ assertEmptyState(assert, 'roles');
+
+ await click(SELECTORS.issuersTab);
+ assertEmptyState(assert, 'issuers');
+
+ await click(SELECTORS.certsTab);
+ assertEmptyState(assert, 'certificates');
+ await click(SELECTORS.keysTab);
+ assertEmptyState(assert, 'keys');
+ await click(SELECTORS.tidyTab);
+ assertEmptyState(assert, 'tidy');
+ });
});
module('roles', function (hooks) {
@@ -91,10 +103,11 @@ module('Acceptance | pki workflow', function (hooks) {
const pki_admin_policy = adminPolicy(this.mountPath, 'roles');
const pki_reader_policy = readerPolicy(this.mountPath, 'roles');
const pki_editor_policy = updatePolicy(this.mountPath, 'roles');
- this.pkiRoleReader = await tokenWithPolicy('pki-reader', pki_reader_policy);
- this.pkiRoleEditor = await tokenWithPolicy('pki-editor', pki_editor_policy);
- this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy);
+ this.pkiRoleReader = await tokenWithPolicy(`pki-reader-${this.mountPath}`, pki_reader_policy);
+ this.pkiRoleEditor = await tokenWithPolicy(`pki-editor-${this.mountPath}`, pki_editor_policy);
+ this.pkiAdminToken = await tokenWithPolicy(`pki-admin-${this.mountPath}`, pki_admin_policy);
await logout.visit();
+ clearRecords(this.store);
});
test('shows correct items if user has all permissions', async function (assert) {
@@ -222,10 +235,11 @@ module('Acceptance | pki workflow', function (hooks) {
const pki_admin_policy = adminPolicy(this.mountPath);
const pki_reader_policy = readerPolicy(this.mountPath, 'keys', true);
const pki_editor_policy = updatePolicy(this.mountPath, 'keys');
- this.pkiKeyReader = await tokenWithPolicy('pki-reader', pki_reader_policy);
- this.pkiKeyEditor = await tokenWithPolicy('pki-editor', pki_editor_policy);
- this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy);
+ this.pkiKeyReader = await tokenWithPolicy(`pki-reader-${this.mountPath}`, pki_reader_policy);
+ this.pkiKeyEditor = await tokenWithPolicy(`pki-editor-${this.mountPath}`, pki_editor_policy);
+ this.pkiAdminToken = await tokenWithPolicy(`pki-admin-${this.mountPath}`, pki_admin_policy);
await logout.visit();
+ clearRecords(this.store);
});
test('shows correct items if user has all permissions', async function (assert) {
@@ -339,11 +353,14 @@ module('Acceptance | pki workflow', function (hooks) {
module('issuers', function (hooks) {
hooks.beforeEach(async function () {
await authPage.login();
+ const pki_admin_policy = adminPolicy(this.mountPath);
+ this.pkiAdminToken = await tokenWithPolicy(`pki-admin-${this.mountPath}`, pki_admin_policy);
// Configure engine with a default issuer
await runCommands([
`write ${this.mountPath}/root/generate/internal common_name="Hashicorp Test" name="Hashicorp Test"`,
]);
await logout.visit();
+ clearRecords(this.store);
});
test('lists the correct issuer metadata info', async function (assert) {
assert.expect(6);
@@ -373,7 +390,10 @@ module('Acceptance | pki workflow', function (hooks) {
capabilities = ["deny"]
}
`;
- this.token = await tokenWithPolicy('pki-issuer-denied-policy', pki_issuer_denied_policy);
+ this.token = await tokenWithPolicy(
+ `pki-issuer-denied-policy-${this.mountPath}`,
+ pki_issuer_denied_policy
+ );
await logout.visit();
await authPage.login(this.token);
await visit(`/vault/secrets/${this.mountPath}/pki/overview`);
@@ -487,7 +507,10 @@ module('Acceptance | pki workflow', function (hooks) {
${adminPolicy(this.mountPath)}
${readerPolicy(this.mountPath, 'config/cluster')}
`;
- this.mixedConfigCapabilities = await tokenWithPolicy('pki-reader', mixed_config_policy);
+ this.mixedConfigCapabilities = await tokenWithPolicy(
+ `pki-reader-${this.mountPath}`,
+ mixed_config_policy
+ );
await logout.visit();
});
diff --git a/ui/tests/acceptance/pki/pki-overview-test.js b/ui/tests/acceptance/pki/pki-overview-test.js
index a8234e0d31fc..05a09dd87dbb 100644
--- a/ui/tests/acceptance/pki/pki-overview-test.js
+++ b/ui/tests/acceptance/pki/pki-overview-test.js
@@ -10,12 +10,13 @@ import logout from 'vault/tests/pages/logout';
import enablePage from 'vault/tests/pages/settings/mount-secret-backend';
import { click, currentURL, currentRouteName, visit } from '@ember/test-helpers';
import { SELECTORS } from 'vault/tests/helpers/pki/overview';
-import { tokenWithPolicy, runCommands } from 'vault/tests/helpers/pki/pki-run-commands';
+import { tokenWithPolicy, runCommands, clearRecords } from 'vault/tests/helpers/pki/pki-run-commands';
module('Acceptance | pki overview', function (hooks) {
setupApplicationTest(hooks);
hooks.beforeEach(async function () {
+ this.store = this.owner.lookup('service:store');
await authPage.login();
// Setup PKI engine
const mountPath = `pki`;
@@ -42,6 +43,7 @@ module('Acceptance | pki overview', function (hooks) {
this.pkiIssuersList = await tokenWithPolicy('pki-issuers-list', pki_issuers_list_policy);
this.pkiAdminToken = await tokenWithPolicy('pki-admin', pki_admin_policy);
await logout.visit();
+ clearRecords(this.store);
});
hooks.afterEach(async function () {
diff --git a/ui/tests/acceptance/policies-acl-old-test.js b/ui/tests/acceptance/policies-acl-old-test.js
index 3415f0e5d63a..35eccecf4bc6 100644
--- a/ui/tests/acceptance/policies-acl-old-test.js
+++ b/ui/tests/acceptance/policies-acl-old-test.js
@@ -45,6 +45,7 @@ module('Acceptance | policies (old)', function (hooks) {
assert.dom('[data-test-policy-name]').hasText(policyLower, 'displays the policy name on the show page');
assert.dom('[data-test-flash-message].is-info').doesNotExist('no flash message is displayed on save');
await click('[data-test-policy-list-link] a');
+ await fillIn('[data-test-component="navigate-input"]', policyLower);
assert
.dom(`[data-test-policy-link="${policyLower}"]`)
.exists({ count: 1 }, 'new policy shown in the list');
@@ -63,6 +64,7 @@ module('Acceptance | policies (old)', function (hooks) {
`/vault/policies/acl`,
'navigates to policy list on successful deletion'
);
+ await fillIn('[data-test-component="navigate-input"]', policyLower);
assert
.dom(`[data-test-policy-item="${policyLower}"]`)
.doesNotExist('deleted policy is not shown in the list');
diff --git a/ui/tests/helpers/pki/pki-run-commands.js b/ui/tests/helpers/pki/pki-run-commands.js
index ac60ec513efd..291aab176fdd 100644
--- a/ui/tests/helpers/pki/pki-run-commands.js
+++ b/ui/tests/helpers/pki/pki-run-commands.js
@@ -34,3 +34,21 @@ export const runCommands = async function (commands) {
throw error;
}
};
+
+// Clears pki-related data and capabilities so that admin
+// capabilities from setup don't rollover
+export function clearRecords(store) {
+ store.unloadAll('pki/action');
+ store.unloadAll('pki/issuer');
+ store.unloadAll('pki/key');
+ store.unloadAll('pki/role');
+ store.unloadAll('pki/sign-intermediate');
+ store.unloadAll('pki/tidy');
+ store.unloadAll('pki/config/urls');
+ store.unloadAll('pki/config/crl');
+ store.unloadAll('pki/config/cluster');
+ store.unloadAll('pki/config/acme');
+ store.unloadAll('pki/certificate/generate');
+ store.unloadAll('pki/certificate/sign');
+ store.unloadAll('capabilities');
+}
From 8f784a3afb9246dd124afd1922cf03a15b1ea860 Mon Sep 17 00:00:00 2001
From: claire bontempo <68122737+hellobontempo@users.noreply.github.com>
Date: Fri, 5 Jan 2024 14:23:02 -0800
Subject: [PATCH 32/39] Secrets Sync UI: Small UX copy improvements (#24693)
* update secret details alert banner
* small copy changes
* remove empty description block
* update tests
* use pluralize helper
* change banner wording
---
.../addon/components/page/secret/details.hbs | 7 ++-
ui/lib/kv/package.json | 3 +-
.../page/destinations/destination/sync.hbs | 2 +-
.../components/secrets/page/overview.hbs | 2 +-
.../kv/page/kv-page-secret-details-test.js | 56 +++++++++++++++++--
.../sync/secrets/page/overview-test.js | 2 +-
6 files changed, 61 insertions(+), 11 deletions(-)
diff --git a/ui/lib/kv/addon/components/page/secret/details.hbs b/ui/lib/kv/addon/components/page/secret/details.hbs
index 4a5f91ea6609..2eb6bab4e8fa 100644
--- a/ui/lib/kv/addon/components/page/secret/details.hbs
+++ b/ui/lib/kv/addon/components/page/secret/details.hbs
@@ -6,10 +6,11 @@
<:syncDetails>
{{#if this.syncStatus}}
-
+
- This secret has been synced from Vault to other destinations, updates to the secret will get automatically synced
- to destinations.
+ This secret has been synced from Vault to
+ {{pluralize this.syncStatus.length "destination"}}. Updates to this secret will automatically sync to its
+ {{if (eq this.syncStatus.length 1) "destination" "destinations"}}.
{{#each this.syncStatus as |status|}}
diff --git a/ui/lib/kv/package.json b/ui/lib/kv/package.json
index 4942e70fea86..5c76aa678d4a 100644
--- a/ui/lib/kv/package.json
+++ b/ui/lib/kv/package.json
@@ -8,7 +8,8 @@
"ember-cli-htmlbars": "*",
"ember-cli-babel": "*",
"ember-concurrency": "*",
- "@ember/test-waiters": "*"
+ "@ember/test-waiters": "*",
+ "ember-inflector": "*"
},
"ember-addon": {
"paths": [
diff --git a/ui/lib/sync/addon/components/secrets/page/destinations/destination/sync.hbs b/ui/lib/sync/addon/components/secrets/page/destinations/destination/sync.hbs
index be90b8acb5cc..20725c4c7896 100644
--- a/ui/lib/sync/addon/components/secrets/page/destinations/destination/sync.hbs
+++ b/ui/lib/sync/addon/components/secrets/page/destinations/destination/sync.hbs
@@ -37,7 +37,7 @@
Select a KV engine mount and path to sync a secret to the
{{@destination.typeDisplayName}}
- destination.
+ destination. Selecting a previously synced secret will re-sync that secret.
diff --git a/ui/lib/sync/addon/components/secrets/page/overview.hbs b/ui/lib/sync/addon/components/secrets/page/overview.hbs
index 3a8844fb5f1b..e114da4cf849 100644
--- a/ui/lib/sync/addon/components/secrets/page/overview.hbs
+++ b/ui/lib/sync/addon/components/secrets/page/overview.hbs
@@ -104,7 +104,7 @@
diff --git a/ui/tests/integration/components/kv/page/kv-page-secret-details-test.js b/ui/tests/integration/components/kv/page/kv-page-secret-details-test.js
index d01abb56abee..28327e13a699 100644
--- a/ui/tests/integration/components/kv/page/kv-page-secret-details-test.js
+++ b/ui/tests/integration/components/kv/page/kv-page-secret-details-test.js
@@ -240,8 +240,8 @@ module('Integration | Component | kv-v2 | Page::Secret::Details', function (hook
.exists('renders current version icon');
});
- test('it renders sync status page alert', async function (assert) {
- assert.expect(5); // assert count important because confirms request made to fetch sync status twice
+ test('it renders sync status page alert and refreshes', async function (assert) {
+ assert.expect(6); // assert count important because confirms request made to fetch sync status twice
const destinationName = 'my-destination';
this.server.create('sync-association', {
type: 'aws-sm',
@@ -250,7 +250,7 @@ module('Integration | Component | kv-v2 | Page::Secret::Details', function (hook
secret_name: this.path,
});
this.server.get(`sys/sync/associations/destinations`, (schema, req) => {
- // this assertion should be hit twice, once on init and again when the 'Refresh' button is clicked
+ // these assertions should be hit twice, once on init and again when the 'Refresh' button is clicked
assert.ok(true, 'request made to fetch sync status');
assert.propEqual(
req.queryParams,
@@ -281,8 +281,56 @@ module('Integration | Component | kv-v2 | Page::Secret::Details', function (hook
'Synced my-destination - last updated September',
'renders sync status alert banner'
);
-
+ assert
+ .dom(PAGE.detail.syncAlert())
+ .hasTextContaining(
+ 'This secret has been synced from Vault to 1 destination. Updates to this secret will automatically sync to its destination.',
+ 'renders alert header referring to singular destination'
+ );
// sync status refresh button
await click(`${PAGE.detail.syncAlert()} button`);
});
+
+ test('it renders sync status page alert for multiple destinations', async function (assert) {
+ assert.expect(3); // assert count important because confirms request made to fetch sync status twice
+ this.server.create('sync-association', {
+ type: 'aws-sm',
+ name: 'aws-dest',
+ mount: this.backend,
+ secret_name: this.path,
+ });
+ this.server.create('sync-association', {
+ type: 'gh',
+ name: 'gh-dest',
+ mount: this.backend,
+ secret_name: this.path,
+ });
+ this.server.get(`sys/sync/associations/destinations`, (schema, req) => {
+ return syncStatusResponse(schema, req);
+ });
+
+ await render(
+ hbs`
+
+ `,
+ { owner: this.engine }
+ );
+ assert
+ .dom(PAGE.detail.syncAlert('aws-dest'))
+ .hasTextContaining('Synced aws-dest - last updated September', 'renders status for aws destination');
+ assert
+ .dom(PAGE.detail.syncAlert('gh-dest'))
+ .hasTextContaining('Syncing gh-dest - last updated September', 'renders status for gh destination');
+ assert
+ .dom(PAGE.detail.syncAlert())
+ .hasTextContaining(
+ 'This secret has been synced from Vault to 2 destinations. Updates to this secret will automatically sync to its destinations.',
+ 'renders alert title referring to plural destinations'
+ );
+ });
});
diff --git a/ui/tests/integration/components/sync/secrets/page/overview-test.js b/ui/tests/integration/components/sync/secrets/page/overview-test.js
index 33cdff977d04..860a185a1cc2 100644
--- a/ui/tests/integration/components/sync/secrets/page/overview-test.js
+++ b/ui/tests/integration/components/sync/secrets/page/overview-test.js
@@ -105,7 +105,7 @@ module('Integration | Component | sync | Page::Overview', function (hooks) {
await click(actionToggle(0));
assert.dom(action('sync')).hasText('Sync secrets', 'Sync action renders');
- assert.dom(action('details')).hasText('Details', 'Details action renders');
+ assert.dom(action('details')).hasText('View synced secrets', 'View synced secrets action renders');
});
test('it should paginate secrets by destination table', async function (assert) {
From 205680c0772df413ed7972f62f7bfd2874ceb277 Mon Sep 17 00:00:00 2001
From: Kyle Schochenmaier
Date: Fri, 5 Jan 2024 16:55:48 -0600
Subject: [PATCH 33/39] add docs for configuring jwt validation pubkeys for vso
(#24599)
* add docs for configuring jwt validation pubkeys for vso and update jwt auth docs to mention key rotation
Co-authored-by: Tom Proctor
---
.../content/docs/auth/jwt/oidc-providers/kubernetes.mdx | 5 +++++
website/content/docs/platform/k8s/vso/examples.mdx | 7 +++++++
2 files changed, 12 insertions(+)
diff --git a/website/content/docs/auth/jwt/oidc-providers/kubernetes.mdx b/website/content/docs/auth/jwt/oidc-providers/kubernetes.mdx
index 383b12592e9a..55220b7ecd14 100644
--- a/website/content/docs/auth/jwt/oidc-providers/kubernetes.mdx
+++ b/website/content/docs/auth/jwt/oidc-providers/kubernetes.mdx
@@ -81,6 +81,11 @@ This method can be useful if Kubernetes' API is not reachable from Vault or if
you would like a single JWT auth mount to service multiple Kubernetes clusters
by chaining their public signing keys.
+
+ Should the JWT Signing Key used by Kubernetes be rotated,
+ this process should be repeated with the new key.
+
+
Kubernetes cluster requirements:
* [`ServiceAccountIssuerDiscovery`][k8s-sa-issuer-discovery] feature enabled.
diff --git a/website/content/docs/platform/k8s/vso/examples.mdx b/website/content/docs/platform/k8s/vso/examples.mdx
index d2a02c7be081..62a02e3e3a3c 100644
--- a/website/content/docs/platform/k8s/vso/examples.mdx
+++ b/website/content/docs/platform/k8s/vso/examples.mdx
@@ -11,6 +11,13 @@ The Operator project provides the following examples:
- Sample use-cases are documented [here](https://github.com/hashicorp/vault-secrets-operator#samples)
- A Terraform based demo can be found [here](https://github.com/hashicorp/vault-secrets-operator/tree/main/demo)
+## JWT auth for Kubernetes clusters in private networks
+
+Vault Secrets Operator supports using the [JWT auth method](/vault/docs/platform/k8s/vso/api-reference#vaultauthconfigjwt).
+JWT auth [verifies tokens](/vault/docs/auth/jwt#jwt-verification) using the issuer's public signing key.
+Vault supports fetching this public key from the Kubernetes API, but if users can't expose the Kubernetes API to Vault, the public key can be provided directly using [`jwt_validation_pubkeys`](/vault/api-docs/auth/jwt#jwt_validation_pubkeys).
+To configure this please follow the steps outlined for [Using JWT validation public keys](/vault/docs/auth/jwt/oidc-providers/kubernetes#using-jwt-validation-public-keys)
+
## Using VaultStaticSecrets for imagePullSecrets
Vault Secret Operator supports Kubernetes' templating of Secrets based on their
From 87ab7497fa96c7f113b2e8790d3fdf11120a8e82 Mon Sep 17 00:00:00 2001
From: divyaac
Date: Fri, 5 Jan 2024 15:31:16 -0800
Subject: [PATCH 34/39] Docs changes (#24242)
---
.../content/api-docs/system/lease-count-quotas.mdx | 2 +-
website/content/api-docs/system/quotas-config.mdx | 13 +++++++++++--
.../content/api-docs/system/rate-limit-quotas.mdx | 2 +-
.../content/partials/api/restricted-endpoints.mdx | 6 +++---
4 files changed, 16 insertions(+), 7 deletions(-)
diff --git a/website/content/api-docs/system/lease-count-quotas.mdx b/website/content/api-docs/system/lease-count-quotas.mdx
index 1e95b2d108f2..dc2c513c50eb 100644
--- a/website/content/api-docs/system/lease-count-quotas.mdx
+++ b/website/content/api-docs/system/lease-count-quotas.mdx
@@ -8,7 +8,7 @@ description: The `/sys/quotas/lease-count` endpoint is used to create, edit and
@include 'alerts/enterprise-only.mdx'
-@include 'alerts/restricted-root.mdx'
+@include 'alerts/restricted-admin.mdx'
The `/sys/quotas/lease-count` endpoint is used to create, edit and delete lease count quotas.
diff --git a/website/content/api-docs/system/quotas-config.mdx b/website/content/api-docs/system/quotas-config.mdx
index f1cb2700c214..d0d021dddae9 100644
--- a/website/content/api-docs/system/quotas-config.mdx
+++ b/website/content/api-docs/system/quotas-config.mdx
@@ -6,7 +6,8 @@ description: The `/sys/quotas/config` endpoint is used to configure rate limit q
# `/sys/quotas/config`
-@include 'alerts/restricted-root.mdx'
+@include 'alerts/restricted-admin.mdx'
+
The `/sys/quotas/config` endpoint is used to configure rate limit quotas.
@@ -19,7 +20,15 @@ The `/sys/quotas/config` endpoint is used to configure rate limit quotas.
### Parameters
- `rate_limit_exempt_paths` `([]string: [])` - Specifies the list of exempt paths
- from all rate limit quotas. If empty no paths will be exempt.
+ from all rate limit quotas. Exempt paths are relative and apply to all
+ namespaces. When `rate_limit_exempt_paths` is empty, Vault applies quotas to
+ all relative paths. Access to exemption data is read-only from the admin namespace.
+ **You cannot update `rate_limit_exempt_paths` from the admin namespace**
+- `global_rate_limit_exempt_paths` `([]string: [])` -Specifies the list of
+ exempt paths from all rate limit quotas. Global exempt paths are absolute and
+ do not apply across namespaces. When `global_rate_limit_exempt_paths` is empty,
+ Vault applies quotas to all absolute paths. You can only add, update, or delete
+ global paths within the scope of the calling namespace.
- `enable_rate_limit_audit_logging` `(bool: false)` - If set, starts audit logging
of requests that get rejected due to rate limit quota rule violations.
- `enable_rate_limit_response_headers` `(bool: false)` - If set, additional rate
diff --git a/website/content/api-docs/system/rate-limit-quotas.mdx b/website/content/api-docs/system/rate-limit-quotas.mdx
index a85a18a75724..e479781fea20 100644
--- a/website/content/api-docs/system/rate-limit-quotas.mdx
+++ b/website/content/api-docs/system/rate-limit-quotas.mdx
@@ -6,7 +6,7 @@ description: The `/sys/quotas/rate-limit` endpoint is used to create, edit and d
# `/sys/quotas/rate-limit`
-@include 'alerts/restricted-root.mdx'
+@include 'alerts/restricted-admin.mdx'
The `/sys/quotas/rate-limit` endpoint is used to create, edit and delete rate limit quotas.
diff --git a/website/content/partials/api/restricted-endpoints.mdx b/website/content/partials/api/restricted-endpoints.mdx
index 6d2d63e152cf..1e94d400a8fb 100644
--- a/website/content/partials/api/restricted-endpoints.mdx
+++ b/website/content/partials/api/restricted-endpoints.mdx
@@ -34,9 +34,9 @@ API path | Root | Admin
`sys/mfa/method/*` | YES | NO
`sys/monitor` | YES | YES
`sys/pprof/*` | YES | NO
-`sys/quotas/config` | YES | NO
-`sys/quotas/lease-count` | YES | NO
-`sys/quotas/rate-limit` | YES | NO
+`sys/quotas/config` | YES | YES
+`sys/quotas/lease-count` | YES | YES
+`sys/quotas/rate-limit` | YES | YES
`sys/raw` | YES | NO
`sys/rekey/*` | YES | NO
`sys/rekey-recovery-key` | YES | NO
From 3153673894781bdffb8562435dcc6ac8427d7205 Mon Sep 17 00:00:00 2001
From: Jordan Reimer
Date: Fri, 5 Jan 2024 16:41:57 -0700
Subject: [PATCH 35/39] Sync Destinations List Name Filter Updates (#24695)
* updates destination name filter to use FilterInput component
* simplifies destinations list redirect condition
* fixes issue with sync destination type filter and issue filtering by both name and type
* unsets page query param in sync destination secrets route
---
ui/app/styles/helper-classes/typography.scss | 3 ++
.../components/search-select-placeholder.hbs | 2 +-
.../components/secrets/page/destinations.hbs | 23 +++++++-------
.../components/secrets/page/destinations.ts | 31 +++++++++++++++----
.../page/destinations/create-and-edit.ts | 2 +-
.../destinations/destination/secrets.ts | 20 +++++++++++-
.../routes/secrets/destinations/index.ts | 28 +++++++++++++++--
.../sync/secrets/destinations-test.js | 9 ++++--
ui/tests/helpers/general-selectors.js | 1 +
.../sync/secrets/page/destinations-test.js | 5 ++-
10 files changed, 96 insertions(+), 28 deletions(-)
diff --git a/ui/app/styles/helper-classes/typography.scss b/ui/app/styles/helper-classes/typography.scss
index 4cde9b2d3c16..8466b0c29440 100644
--- a/ui/app/styles/helper-classes/typography.scss
+++ b/ui/app/styles/helper-classes/typography.scss
@@ -110,6 +110,9 @@
}
}
+.opacity-050 {
+ opacity: 0.5;
+}
.opacity-060 {
opacity: 0.6;
}
diff --git a/ui/lib/core/addon/components/search-select-placeholder.hbs b/ui/lib/core/addon/components/search-select-placeholder.hbs
index b168b1c9ab53..32b9adeea1ce 100644
--- a/ui/lib/core/addon/components/search-select-placeholder.hbs
+++ b/ui/lib/core/addon/components/search-select-placeholder.hbs
@@ -6,7 +6,7 @@
- {{or @placeholder "Search"}}
+ {{or @placeholder "Search"}}
diff --git a/ui/lib/sync/addon/components/secrets/page/destinations.hbs b/ui/lib/sync/addon/components/secrets/page/destinations.hbs
index 40eba8842c04..a1cf7ecbea12 100644
--- a/ui/lib/sync/addon/components/secrets/page/destinations.hbs
+++ b/ui/lib/sync/addon/components/secrets/page/destinations.hbs
@@ -28,18 +28,17 @@
class="is-marginless"
data-test-filter="type"
/>
-
+
+
+
diff --git a/ui/lib/sync/addon/components/secrets/page/destinations.ts b/ui/lib/sync/addon/components/secrets/page/destinations.ts
index d4aa6c08b5ca..77ac790f06a0 100644
--- a/ui/lib/sync/addon/components/secrets/page/destinations.ts
+++ b/ui/lib/sync/addon/components/secrets/page/destinations.ts
@@ -9,6 +9,7 @@ import { action } from '@ember/object';
import { getOwner } from '@ember/application';
import errorMessage from 'vault/utils/error-message';
import { findDestination, syncDestinations } from 'core/helpers/sync-destinations';
+import { next } from '@ember/runloop';
import type SyncDestinationModel from 'vault/vault/models/sync/destination';
import type RouterService from '@ember/routing/router-service';
@@ -16,6 +17,7 @@ import type StoreService from 'vault/services/store';
import type FlashMessageService from 'vault/services/flash-messages';
import type { EngineOwner } from 'vault/vault/app-types';
import type { SyncDestinationName, SyncDestinationType } from 'vault/vault/helpers/sync-destinations';
+import type Transition from '@ember/routing/transition';
interface Args {
destinations: Array;
@@ -28,15 +30,31 @@ export default class SyncSecretsDestinationsPageComponent extends Component document.getElementById('name-filter')?.focus());
+ }
+ }
+
// typeFilter arg comes in as destination type but we need to pass the destination display name into the SearchSelect
get typeFilterName() {
return findDestination(this.args.typeFilter)?.name;
}
- get destinationNames() {
- return this.args.destinations.map((destination) => ({ id: destination.name, name: destination.name }));
- }
-
get destinationTypes() {
return syncDestinations().map((d) => ({ id: d.name, name: d.type }));
}
@@ -65,9 +83,10 @@ export default class SyncSecretsDestinationsPageComponent extends Component) {
+ onFilterChange(key: string, value: { id: string; name: string }[] | string | undefined) {
+ const queryValue = Array.isArray(value) ? value[0]?.name : value;
this.router.transitionTo('vault.cluster.sync.secrets.destinations', {
- queryParams: { [key]: selectObject[0]?.name },
+ queryParams: { [key]: queryValue },
});
}
diff --git a/ui/lib/sync/addon/components/secrets/page/destinations/create-and-edit.ts b/ui/lib/sync/addon/components/secrets/page/destinations/create-and-edit.ts
index 04e9331bcfae..6ba75cfe0e39 100644
--- a/ui/lib/sync/addon/components/secrets/page/destinations/create-and-edit.ts
+++ b/ui/lib/sync/addon/components/secrets/page/destinations/create-and-edit.ts
@@ -59,7 +59,7 @@ export default class DestinationsCreateForm extends Component {
@waitFor
*save(event: Event) {
event.preventDefault();
-
+ this.error = '';
// clear out validation warnings
this.modelValidations = null;
const { destination } = this.args;
diff --git a/ui/lib/sync/addon/routes/secrets/destinations/destination/secrets.ts b/ui/lib/sync/addon/routes/secrets/destinations/destination/secrets.ts
index ddf1ca5dd570..9d24413acd00 100644
--- a/ui/lib/sync/addon/routes/secrets/destinations/destination/secrets.ts
+++ b/ui/lib/sync/addon/routes/secrets/destinations/destination/secrets.ts
@@ -8,12 +8,24 @@ import { inject as service } from '@ember/service';
import { hash } from 'rsvp';
import type StoreService from 'vault/services/store';
-import SyncDestinationModel from 'vault/vault/models/sync/destination';
+import type SyncDestinationModel from 'vault/vault/models/sync/destination';
+import type SyncAssociationModel from 'vault/vault/models/sync/association';
+import type Controller from '@ember/controller';
interface SyncDestinationSecretsRouteParams {
page: string;
}
+interface SyncDestinationSecretsRouteModel {
+ destination: SyncDestinationModel;
+ associations: SyncAssociationModel[];
+}
+
+interface SyncDestinationSecretsController extends Controller {
+ model: SyncDestinationSecretsRouteModel;
+ page: number | undefined;
+}
+
export default class SyncDestinationSecretsRoute extends Route {
@service declare readonly store: StoreService;
@@ -35,4 +47,10 @@ export default class SyncDestinationSecretsRoute extends Route {
}),
});
}
+
+ resetController(controller: SyncDestinationSecretsController, isExiting: boolean) {
+ if (isExiting) {
+ controller.set('page', undefined);
+ }
+ }
}
diff --git a/ui/lib/sync/addon/routes/secrets/destinations/index.ts b/ui/lib/sync/addon/routes/secrets/destinations/index.ts
index 132417095f1c..ae8d37cbcca2 100644
--- a/ui/lib/sync/addon/routes/secrets/destinations/index.ts
+++ b/ui/lib/sync/addon/routes/secrets/destinations/index.ts
@@ -11,6 +11,7 @@ import type StoreService from 'vault/services/store';
import type RouterService from '@ember/routing/router-service';
import type { ModelFrom } from 'vault/vault/route';
import type SyncDestinationModel from 'vault/vault/models/sync/destination';
+import type Controller from '@ember/controller';
interface SyncSecretsDestinationsIndexRouteParams {
name: string;
@@ -18,6 +19,19 @@ interface SyncSecretsDestinationsIndexRouteParams {
page: string;
}
+interface SyncSecretsDestinationsRouteModel {
+ destinations: SyncDestinationModel[];
+ nameFilter: string | undefined;
+ typeFilter: string | undefined;
+}
+
+interface SyncSecretsDestinationsController extends Controller {
+ model: SyncSecretsDestinationsRouteModel;
+ page: number | undefined;
+ name: number | undefined;
+ type: number | undefined;
+}
+
export default class SyncSecretsDestinationsIndexRoute extends Route {
@service declare readonly store: StoreService;
@service declare readonly router: RouterService;
@@ -35,7 +49,7 @@ export default class SyncSecretsDestinationsIndexRoute extends Route {
};
redirect(model: ModelFrom) {
- if (model.destinations.length === 0) {
+ if (!model.destinations.meta.total) {
this.router.transitionTo('vault.cluster.sync.secrets.overview');
}
}
@@ -43,7 +57,7 @@ export default class SyncSecretsDestinationsIndexRoute extends Route {
filterData(dataset: Array, name: string, type: string): Array {
let filteredDataset = dataset;
const filter = (key: keyof SyncDestinationModel, value: string) => {
- return dataset.filter((model) => {
+ return filteredDataset.filter((model) => {
return model[key].toLowerCase().includes(value.toLowerCase());
});
};
@@ -68,4 +82,14 @@ export default class SyncSecretsDestinationsIndexRoute extends Route {
typeFilter: params.type,
});
}
+
+ resetController(controller: SyncSecretsDestinationsController, isExiting: boolean) {
+ if (isExiting) {
+ controller.setProperties({
+ page: undefined,
+ name: undefined,
+ type: undefined,
+ });
+ }
+ }
}
diff --git a/ui/tests/acceptance/sync/secrets/destinations-test.js b/ui/tests/acceptance/sync/secrets/destinations-test.js
index 017460621ff1..715d8ab1f5df 100644
--- a/ui/tests/acceptance/sync/secrets/destinations-test.js
+++ b/ui/tests/acceptance/sync/secrets/destinations-test.js
@@ -9,7 +9,7 @@ import { setupMirage } from 'ember-cli-mirage/test-support';
import syncScenario from 'vault/mirage/scenarios/sync';
import syncHandlers from 'vault/mirage/handlers/sync';
import authPage from 'vault/tests/pages/auth';
-import { click, visit } from '@ember/test-helpers';
+import { click, visit, fillIn } from '@ember/test-helpers';
import { PAGE } from 'vault/tests/helpers/sync/sync-selectors';
const { searchSelect, filter, listItem } = PAGE;
@@ -29,6 +29,11 @@ module('Acceptance | sync | destinations', function (hooks) {
assert.dom(listItem).exists({ count: 6 }, 'All destinations render');
await click(`${filter('type')} .ember-basic-dropdown-trigger`);
await click(searchSelect.option());
- assert.dom(listItem).exists({ count: 2 }, 'Filtered destinations render');
+ assert.dom(listItem).exists({ count: 2 }, 'Destinations are filtered by type');
+ await fillIn(filter('name'), 'new');
+ assert.dom(listItem).exists({ count: 1 }, 'Destinations are filtered by type and name');
+ await click(searchSelect.removeSelected);
+ await fillIn(filter('name'), 'gcp');
+ assert.dom(listItem).exists({ count: 1 }, 'Destinations are filtered by name');
});
});
diff --git a/ui/tests/helpers/general-selectors.js b/ui/tests/helpers/general-selectors.js
index 670fda1f4c01..36ee70c5f9bf 100644
--- a/ui/tests/helpers/general-selectors.js
+++ b/ui/tests/helpers/general-selectors.js
@@ -14,6 +14,7 @@ export const SELECTORS = {
icon: (name) => `[data-test-icon="${name}"]`,
tab: (name) => `[data-test-tab="${name}"]`,
filter: (name) => `[data-test-filter="${name}"]`,
+ filterInput: '[data-test-filter-input]',
confirmModalInput: '[data-test-confirmation-modal-input]',
confirmButton: '[data-test-confirm-button]',
emptyStateTitle: '[data-test-empty-state-title]',
diff --git a/ui/tests/integration/components/sync/secrets/page/destinations-test.js b/ui/tests/integration/components/sync/secrets/page/destinations-test.js
index 4366bb379b43..66bf310a79bf 100644
--- a/ui/tests/integration/components/sync/secrets/page/destinations-test.js
+++ b/ui/tests/integration/components/sync/secrets/page/destinations-test.js
@@ -7,7 +7,7 @@ import { module, test } from 'qunit';
import { setupRenderingTest } from 'ember-qunit';
import { setupEngine } from 'ember-engines/test-support';
import { setupMirage } from 'ember-cli-mirage/test-support';
-import { render, click } from '@ember/test-helpers';
+import { render, click, fillIn } from '@ember/test-helpers';
import hbs from 'htmlbars-inline-precompile';
import { allowAllCapabilitiesStub } from 'vault/tests/helpers/stubs';
import sinon from 'sinon';
@@ -97,8 +97,7 @@ module('Integration | Component | sync | Page::Destinations', function (hooks) {
);
// NAME FILTER
- await click(`${filter('name')} .ember-basic-dropdown-trigger`);
- await click(searchSelect.option(searchSelect.optionIndex('destination-aws')));
+ await fillIn(filter('name'), 'destination-aws');
assert.deepEqual(
this.transitionStub.lastCall.args,
['vault.cluster.sync.secrets.destinations', { queryParams: { name: 'destination-aws' } }],
From ee0ccea547ae6a6c3c097e67903d5cc2b9184f2a Mon Sep 17 00:00:00 2001
From: claire bontempo <68122737+hellobontempo@users.noreply.github.com>
Date: Sun, 7 Jan 2024 12:06:03 -0800
Subject: [PATCH 36/39] UI: Changes jwks_ca_pem param to a 'file' edit type
(#24697)
* change jwks_ca_pem to file edit type
* add changelog
---
changelog/24697.txt | 3 +++
ui/app/models/auth-config/jwt.js | 30 +++++++++++++++++++++++++++++-
2 files changed, 32 insertions(+), 1 deletion(-)
create mode 100644 changelog/24697.txt
diff --git a/changelog/24697.txt b/changelog/24697.txt
new file mode 100644
index 000000000000..49492d19b290
--- /dev/null
+++ b/changelog/24697.txt
@@ -0,0 +1,3 @@
+```release-note:bug
+ui: Fixes input for jwks_ca_pem when configuring a JWT auth method
+```
\ No newline at end of file
diff --git a/ui/app/models/auth-config/jwt.js b/ui/app/models/auth-config/jwt.js
index e432800c3021..f675c29c9ffe 100644
--- a/ui/app/models/auth-config/jwt.js
+++ b/ui/app/models/auth-config/jwt.js
@@ -24,12 +24,31 @@ export default AuthConfig.extend({
oidcClientSecret: attr('string', {
label: 'OIDC client secret',
}),
+
oidcDiscoveryCaPem: attr('string', {
label: 'OIDC discovery CA PEM',
editType: 'file',
helpText:
'The CA certificate or chain of certificates, in PEM format, to use to validate connections to the OIDC Discovery URL. If not set, system certificates are used',
}),
+
+ jwksCaPem: attr('string', {
+ label: 'JWKS CA PEM',
+ editType: 'file',
+ }),
+
+ jwksUrl: attr('string', {
+ label: 'JWKS URL',
+ }),
+
+ oidcResponseMode: attr('string', {
+ label: 'OIDC response mode',
+ }),
+
+ oidcResponseTypes: attr('string', {
+ label: 'OIDC response types',
+ }),
+
jwtValidationPubkeys: attr({
label: 'JWT validation public keys',
editType: 'stringArray',
@@ -38,14 +57,23 @@ export default AuthConfig.extend({
jwtSupportedAlgs: attr({
label: 'JWT supported algorithms',
}),
+
boundIssuer: attr('string', {
helpText: 'The value against which to match the iss claim in a JWT',
}),
+
fieldGroups: computed('constructor.modelName', 'newFields', function () {
const type = this.constructor.modelName.split('/')[1].toUpperCase();
let groups = [
{
- default: ['oidcDiscoveryUrl', 'defaultRole'],
+ default: [
+ 'oidcDiscoveryUrl',
+ 'defaultRole',
+ 'jwksCaPem',
+ 'jwksUrl',
+ 'oidcResponseMode',
+ 'oidcResponseTypes',
+ ],
},
{
[`${type} Options`]: [
From 6e537bb376d29e7054ca7eab854f02eb481808e9 Mon Sep 17 00:00:00 2001
From: Tom Proctor
Date: Mon, 8 Jan 2024 12:21:13 +0000
Subject: [PATCH 37/39] Support reloading database plugins across multiple
mounts (#24512)
* Support reloading database plugins across multiple mounts
* Add clarifying comment to MountEntry.Path field
* Tests: Replace non-parallelisable t.Setenv with plugin env settings
---
builtin/logical/database/backend_test.go | 271 +++++++++---------
.../logical/database/versioning_large_test.go | 7 +-
builtin/plugin/backend_test.go | 5 +-
changelog/24512.txt | 6 +
http/plugin_test.go | 6 +-
vault/external_tests/plugin/plugin_test.go | 32 +++
vault/logical_system.go | 25 +-
vault/mount.go | 2 +-
vault/plugin_reload.go | 47 ++-
9 files changed, 233 insertions(+), 168 deletions(-)
create mode 100644 changelog/24512.txt
diff --git a/builtin/logical/database/backend_test.go b/builtin/logical/database/backend_test.go
index 0554365e1c35..f330e5e5c2a6 100644
--- a/builtin/logical/database/backend_test.go
+++ b/builtin/logical/database/backend_test.go
@@ -6,12 +6,15 @@ package database
import (
"context"
"database/sql"
+ "encoding/json"
+ "errors"
"fmt"
"log"
"net/url"
"os"
"reflect"
"strings"
+ "sync"
"testing"
"time"
@@ -35,12 +38,26 @@ import (
"github.com/mitchellh/mapstructure"
)
+func getClusterPostgresDBWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) {
+ t.Helper()
+ cluster, sys := getClusterWithFactory(t, factory)
+ vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed",
+ []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)})
+ return cluster, sys
+}
+
func getClusterPostgresDB(t *testing.T) (*vault.TestCluster, logical.SystemView) {
+ t.Helper()
+ cluster, sys := getClusterPostgresDBWithFactory(t, Factory)
+ return cluster, sys
+}
+
+func getClusterWithFactory(t *testing.T, factory logical.Factory) (*vault.TestCluster, logical.SystemView) {
t.Helper()
pluginDir := corehelpers.MakeTestPluginDir(t)
coreConfig := &vault.CoreConfig{
LogicalBackends: map[string]logical.Factory{
- "database": Factory,
+ "database": factory,
},
BuiltinRegistry: builtinplugins.Registry,
PluginDirectory: pluginDir,
@@ -53,36 +70,14 @@ func getClusterPostgresDB(t *testing.T) (*vault.TestCluster, logical.SystemView)
cores := cluster.Cores
vault.TestWaitActive(t, cores[0].Core)
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
sys := vault.TestDynamicSystemView(cores[0].Core, nil)
- vault.TestAddTestPlugin(t, cores[0].Core, "postgresql-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_PostgresMultiplexed", []string{})
return cluster, sys
}
func getCluster(t *testing.T) (*vault.TestCluster, logical.SystemView) {
t.Helper()
- pluginDir := corehelpers.MakeTestPluginDir(t)
- coreConfig := &vault.CoreConfig{
- LogicalBackends: map[string]logical.Factory{
- "database": Factory,
- },
- BuiltinRegistry: builtinplugins.Registry,
- PluginDirectory: pluginDir,
- }
-
- cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{
- HandlerFunc: vaulthttp.Handler,
- })
- cluster.Start()
- cores := cluster.Cores
- vault.TestWaitActive(t, cores[0].Core)
-
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
- sys := vault.TestDynamicSystemView(cores[0].Core, nil)
-
+ cluster, sys := getClusterWithFactory(t, Factory)
return cluster, sys
}
@@ -515,7 +510,7 @@ func TestBackend_basic(t *testing.T) {
if credsResp.Secret.TTL != 5*time.Minute {
t.Fatalf("unexpected TTL of %d", credsResp.Secret.TTL)
}
- if !testCredsExist(t, credsResp, connURL) {
+ if !testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should exist")
}
@@ -535,7 +530,7 @@ func TestBackend_basic(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
- if testCredsExist(t, credsResp, connURL) {
+ if testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should not exist")
}
}
@@ -553,7 +548,7 @@ func TestBackend_basic(t *testing.T) {
if err != nil || (credsResp != nil && credsResp.IsError()) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
- if !testCredsExist(t, credsResp, connURL) {
+ if !testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should exist")
}
@@ -586,108 +581,118 @@ func TestBackend_basic(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
- if testCredsExist(t, credsResp, connURL) {
+ if testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should not exist")
}
}
}
-func TestBackend_connectionCrud(t *testing.T) {
- cluster, sys := getClusterPostgresDB(t)
- defer cluster.Cleanup()
+// singletonDBFactory allows us to reach into the internals of a databaseBackend
+// even when it's been created by a call to the sys mount. The factory method
+// satisfies the logical.Factory type, and lazily creates the databaseBackend
+// once the SystemView has been provided because the factory method itself is an
+// input for creating the test cluster and its system view.
+type singletonDBFactory struct {
+ once sync.Once
+ db *databaseBackend
+
+ sys logical.SystemView
+}
+
+// factory satisfies the logical.Factory type.
+func (s *singletonDBFactory) factory(context.Context, *logical.BackendConfig) (logical.Backend, error) {
+ if s.sys == nil {
+ return nil, errors.New("sys is nil")
+ }
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
- config.System = sys
+ config.System = s.sys
- b, err := Factory(context.Background(), config)
+ var err error
+ s.once.Do(func() {
+ var b logical.Backend
+ b, err = Factory(context.Background(), config)
+ s.db = b.(*databaseBackend)
+ })
if err != nil {
- t.Fatal(err)
+ return nil, err
}
- defer b.Cleanup(context.Background())
+ if s.db == nil {
+ return nil, errors.New("db is nil")
+ }
+ return s.db, nil
+}
+
+func TestBackend_connectionCrud(t *testing.T) {
+ dbFactory := &singletonDBFactory{}
+ cluster, sys := getClusterPostgresDBWithFactory(t, dbFactory.factory)
+ defer cluster.Cleanup()
+
+ dbFactory.sys = sys
+ client := cluster.Cores[0].Client.Logical()
cleanup, connURL := postgreshelper.PrepareTestContainer(t, "13.4-buster")
defer cleanup()
+ // Mount the database plugin.
+ resp, err := client.Write("sys/mounts/database", map[string]interface{}{
+ "type": "database",
+ })
+ if err != nil {
+ t.Fatalf("err:%s resp:%#v\n", err, resp)
+ }
+
// Configure a connection
- data := map[string]interface{}{
+ resp, err = client.Write("database/config/plugin-test", map[string]interface{}{
"connection_url": "test",
"plugin_name": "postgresql-database-plugin",
"verify_connection": false,
- }
- req := &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err := b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ })
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Configure a second connection to confirm below it doesn't get restarted.
- data = map[string]interface{}{
+ resp, err = client.Write("database/config/plugin-test-hana", map[string]interface{}{
"connection_url": "test",
"plugin_name": "hana-database-plugin",
"verify_connection": false,
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test-hana",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ })
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Create a role
- data = map[string]interface{}{
+ resp, err = client.Write("database/roles/plugin-role-test", map[string]interface{}{
"db_name": "plugin-test",
"creation_statements": testRole,
"revocation_statements": defaultRevocationSQL,
"default_ttl": "5m",
"max_ttl": "10m",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "roles/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ })
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Update the connection
- data = map[string]interface{}{
+ resp, err = client.Write("database/config/plugin-test", map[string]interface{}{
"connection_url": connURL,
"plugin_name": "postgresql-database-plugin",
"allowed_roles": []string{"plugin-role-test"},
"username": "postgres",
"password": "secret",
"private_key": "PRIVATE_KEY",
- }
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ })
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
if len(resp.Warnings) == 0 {
t.Fatalf("expected warning about password in url %s, resp:%#v\n", connURL, resp)
}
- req.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ resp, err = client.Read("database/config/plugin-test")
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
returnedConnectionDetails := resp.Data["connection_details"].(map[string]interface{})
@@ -703,11 +708,16 @@ func TestBackend_connectionCrud(t *testing.T) {
}
// Replace connection url with templated version
- req.Operation = logical.UpdateOperation
- connURL = strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
- data["connection_url"] = connURL
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ templatedConnURL := strings.ReplaceAll(connURL, "postgres:secret", "{{username}}:{{password}}")
+ resp, err = client.Write("database/config/plugin-test", map[string]interface{}{
+ "connection_url": templatedConnURL,
+ "plugin_name": "postgresql-database-plugin",
+ "allowed_roles": []string{"plugin-role-test"},
+ "username": "postgres",
+ "password": "secret",
+ "private_key": "PRIVATE_KEY",
+ })
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@@ -716,36 +726,38 @@ func TestBackend_connectionCrud(t *testing.T) {
"plugin_name": "postgresql-database-plugin",
"connection_details": map[string]interface{}{
"username": "postgres",
- "connection_url": connURL,
+ "connection_url": templatedConnURL,
},
- "allowed_roles": []string{"plugin-role-test"},
- "root_credentials_rotate_statements": []string(nil),
+ "allowed_roles": []any{"plugin-role-test"},
+ "root_credentials_rotate_statements": []any{},
"password_policy": "",
"plugin_version": "",
}
- req.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ resp, err = client.Read("database/config/plugin-test")
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
delete(resp.Data["connection_details"].(map[string]interface{}), "name")
if diff := deep.Equal(resp.Data, expected); diff != nil {
- t.Fatal(diff)
+ t.Fatal(strings.Join(diff, "\n"))
}
// Test endpoints for reloading plugins.
- for _, reloadPath := range []string{
- "reset/plugin-test",
- "reload/postgresql-database-plugin",
+ for _, reload := range []struct {
+ path string
+ data map[string]any
+ checkCount bool
+ }{
+ {"database/reset/plugin-test", nil, false},
+ {"database/reload/postgresql-database-plugin", nil, true},
+ {"sys/plugins/reload/backend", map[string]any{
+ "plugin": "postgresql-database-plugin",
+ }, false},
} {
getConnectionID := func(name string) string {
t.Helper()
- dbBackend, ok := b.(*databaseBackend)
- if !ok {
- t.Fatal("could not convert logical.Backend to databaseBackend")
- }
- dbi := dbBackend.connections.Get(name)
+ dbi := dbFactory.db.connections.Get(name)
if dbi == nil {
t.Fatal("no plugin-test dbi")
}
@@ -753,14 +765,8 @@ func TestBackend_connectionCrud(t *testing.T) {
}
initialID := getConnectionID("plugin-test")
hanaID := getConnectionID("plugin-test-hana")
- req = &logical.Request{
- Operation: logical.UpdateOperation,
- Path: reloadPath,
- Storage: config.StorageView,
- Data: map[string]interface{}{},
- }
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ resp, err = client.Write(reload.path, reload.data)
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
if initialID == getConnectionID("plugin-test") {
@@ -769,54 +775,43 @@ func TestBackend_connectionCrud(t *testing.T) {
if hanaID != getConnectionID("plugin-test-hana") {
t.Fatal("hana plugin got restarted but shouldn't have been")
}
- if strings.HasPrefix(reloadPath, "reload/") {
- if expected := 1; expected != resp.Data["count"] {
- t.Fatalf("expected %d but got %d", expected, resp.Data["count"])
+ if reload.checkCount {
+ actual, err := resp.Data["count"].(json.Number).Int64()
+ if err != nil {
+ t.Fatal(err)
}
- if expected := []string{"plugin-test"}; !reflect.DeepEqual(expected, resp.Data["connections"]) {
+ if expected := 1; expected != int(actual) {
+ t.Fatalf("expected %d but got %d", expected, resp.Data["count"].(int))
+ }
+ if expected := []any{"plugin-test"}; !reflect.DeepEqual(expected, resp.Data["connections"]) {
t.Fatalf("expected %v but got %v", expected, resp.Data["connections"])
}
}
}
// Get creds
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.ReadOperation,
- Path: "creds/plugin-role-test",
- Storage: config.StorageView,
- Data: data,
- }
- credsResp, err := b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (credsResp != nil && credsResp.IsError()) {
+ credsResp, err := client.Read("database/creds/plugin-role-test")
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
- credCheckURL := dbutil.QueryHelper(connURL, map[string]string{
+ credCheckURL := dbutil.QueryHelper(templatedConnURL, map[string]string{
"username": "postgres",
"password": "secret",
})
- if !testCredsExist(t, credsResp, credCheckURL) {
+ if !testCredsExist(t, credsResp.Data, credCheckURL) {
t.Fatalf("Creds should exist")
}
// Delete Connection
- data = map[string]interface{}{}
- req = &logical.Request{
- Operation: logical.DeleteOperation,
- Path: "config/plugin-test",
- Storage: config.StorageView,
- Data: data,
- }
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ resp, err = client.Delete("database/config/plugin-test")
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
// Read connection
- req.Operation = logical.ReadOperation
- resp, err = b.HandleRequest(namespace.RootContext(nil), req)
- if err != nil || (resp != nil && resp.IsError()) {
+ resp, err = client.Read("database/config/plugin-test")
+ if err != nil {
t.Fatalf("err:%s resp:%#v\n", err, resp)
}
@@ -1190,7 +1185,7 @@ func TestBackend_allowedRoles(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
- if !testCredsExist(t, credsResp, connURL) {
+ if !testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should exist")
}
@@ -1224,7 +1219,7 @@ func TestBackend_allowedRoles(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
- if !testCredsExist(t, credsResp, connURL) {
+ if !testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should exist")
}
@@ -1271,7 +1266,7 @@ func TestBackend_allowedRoles(t *testing.T) {
t.Fatalf("err:%s resp:%#v\n", err, credsResp)
}
- if !testCredsExist(t, credsResp, connURL) {
+ if !testCredsExist(t, credsResp.Data, connURL) {
t.Fatalf("Creds should exist")
}
}
@@ -1581,13 +1576,13 @@ func TestNewDatabaseWrapper_IgnoresBuiltinVersion(t *testing.T) {
}
}
-func testCredsExist(t *testing.T, resp *logical.Response, connURL string) bool {
+func testCredsExist(t *testing.T, data map[string]any, connURL string) bool {
t.Helper()
var d struct {
Username string `mapstructure:"username"`
Password string `mapstructure:"password"`
}
- if err := mapstructure.Decode(resp.Data, &d); err != nil {
+ if err := mapstructure.Decode(data, &d); err != nil {
t.Fatal(err)
}
log.Printf("[TRACE] Generated credentials: %v", d)
diff --git a/builtin/logical/database/versioning_large_test.go b/builtin/logical/database/versioning_large_test.go
index bacb4a6a7123..be936c760336 100644
--- a/builtin/logical/database/versioning_large_test.go
+++ b/builtin/logical/database/versioning_large_test.go
@@ -25,9 +25,10 @@ func TestPlugin_lifecycle(t *testing.T) {
cluster, sys := getCluster(t)
defer cluster.Cleanup()
- vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", []string{})
- vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", []string{})
- vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", []string{})
+ env := []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)}
+ vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v4-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV4", env)
+ vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v5-database-plugin", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV5", env)
+ vault.TestAddTestPlugin(t, cluster.Cores[0].Core, "mock-v6-database-plugin-muxed", consts.PluginTypeDatabase, "", "TestBackend_PluginMain_MockV6Multiplexed", env)
config := logical.TestBackendConfig()
config.StorageView = &logical.InmemStorage{}
diff --git a/builtin/plugin/backend_test.go b/builtin/plugin/backend_test.go
index c1b7a83d153a..713444061286 100644
--- a/builtin/plugin/backend_test.go
+++ b/builtin/plugin/backend_test.go
@@ -140,9 +140,8 @@ func testConfig(t *testing.T, pluginCmd string) (*logical.BackendConfig, func())
},
}
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
- vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd, []string{})
+ vault.TestAddTestPlugin(t, core.Core, "mock-plugin", consts.PluginTypeSecrets, "", pluginCmd,
+ []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)})
return config, func() {
cluster.Cleanup()
diff --git a/changelog/24512.txt b/changelog/24512.txt
new file mode 100644
index 000000000000..efed04a22535
--- /dev/null
+++ b/changelog/24512.txt
@@ -0,0 +1,6 @@
+```release-note:change
+plugins: Add a warning to the response from sys/plugins/reload/backend if no plugins were reloaded.
+```
+```release-note:improvement
+secrets/database: Support reloading named database plugins using the sys/plugins/reload/backend API endpoint.
+```
diff --git a/http/plugin_test.go b/http/plugin_test.go
index fa67187621a8..b215a6b1c6bc 100644
--- a/http/plugin_test.go
+++ b/http/plugin_test.go
@@ -5,6 +5,7 @@ package http
import (
"encoding/json"
+ "fmt"
"io/ioutil"
"os"
"reflect"
@@ -55,10 +56,9 @@ func getPluginClusterAndCore(t *testing.T, logger log.Logger) (*vault.TestCluste
cores := cluster.Cores
core := cores[0]
- os.Setenv(pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)
-
vault.TestWaitActive(benchhelpers.TBtoT(t), core.Core)
- vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain", []string{})
+ vault.TestAddTestPlugin(benchhelpers.TBtoT(t), core.Core, "mock-plugin", consts.PluginTypeSecrets, "", "TestPlugin_PluginMain",
+ []string{fmt.Sprintf("%s=%s", pluginutil.PluginCACertPEMEnv, cluster.CACertPEMFile)})
// Mount the mock plugin
err = core.Client.Sys().Mount("mock", &api.MountInput{
diff --git a/vault/external_tests/plugin/plugin_test.go b/vault/external_tests/plugin/plugin_test.go
index e29340a17ccc..affe8fa6b888 100644
--- a/vault/external_tests/plugin/plugin_test.go
+++ b/vault/external_tests/plugin/plugin_test.go
@@ -560,6 +560,9 @@ func testSystemBackend_PluginReload(t *testing.T, reqData map[string]interface{}
if resp.Data["reload_id"] == nil {
t.Fatal("no reload_id in response")
}
+ if len(resp.Warnings) != 0 {
+ t.Fatal(resp.Warnings)
+ }
for i := 0; i < 2; i++ {
// Ensure internal backed value is reset
@@ -578,6 +581,35 @@ func testSystemBackend_PluginReload(t *testing.T, reqData map[string]interface{}
}
}
+func TestSystemBackend_PluginReload_WarningIfNoneReloaded(t *testing.T) {
+ cluster := testSystemBackendMock(t, 1, 2, logical.TypeLogical, "v5")
+ defer cluster.Cleanup()
+
+ core := cluster.Cores[0]
+ client := core.Client
+
+ for _, backendType := range []logical.BackendType{logical.TypeLogical, logical.TypeCredential} {
+ t.Run(backendType.String(), func(t *testing.T) {
+ // Perform plugin reload
+ resp, err := client.Logical().Write("sys/plugins/reload/backend", map[string]any{
+ "plugin": "does-not-exist",
+ })
+ if err != nil {
+ t.Fatalf("err: %v", err)
+ }
+ if resp == nil {
+ t.Fatalf("bad: %v", resp)
+ }
+ if resp.Data["reload_id"] == nil {
+ t.Fatal("no reload_id in response")
+ }
+ if len(resp.Warnings) == 0 {
+ t.Fatal("expected warning")
+ }
+ })
+ }
+}
+
// testSystemBackendMock returns a systemBackend with the desired number
// of mounted mock plugin backends. numMounts alternates between different
// ways of providing the plugin_name.
diff --git a/vault/logical_system.go b/vault/logical_system.go
index 1ae623546e60..621595485004 100644
--- a/vault/logical_system.go
+++ b/vault/logical_system.go
@@ -738,11 +738,24 @@ func (b *SystemBackend) handlePluginReloadUpdate(ctx context.Context, req *logic
return logical.ErrorResponse("plugin or mounts must be provided"), nil
}
+ resp := logical.Response{
+ Data: map[string]interface{}{
+ "reload_id": req.ID,
+ },
+ }
+
if pluginName != "" {
- err := b.Core.reloadMatchingPlugin(ctx, pluginName)
+ reloaded, err := b.Core.reloadMatchingPlugin(ctx, pluginName)
if err != nil {
return nil, err
}
+ if reloaded == 0 {
+ if scope == globalScope {
+ resp.AddWarning("no plugins were reloaded locally (but they may be reloaded on other nodes)")
+ } else {
+ resp.AddWarning("no plugins were reloaded")
+ }
+ }
} else if len(pluginMounts) > 0 {
err := b.Core.reloadMatchingPluginMounts(ctx, pluginMounts)
if err != nil {
@@ -750,20 +763,14 @@ func (b *SystemBackend) handlePluginReloadUpdate(ctx context.Context, req *logic
}
}
- r := logical.Response{
- Data: map[string]interface{}{
- "reload_id": req.ID,
- },
- }
-
if scope == globalScope {
err := handleGlobalPluginReload(ctx, b.Core, req.ID, pluginName, pluginMounts)
if err != nil {
return nil, err
}
- return logical.RespondWithStatusCode(&r, req, http.StatusAccepted)
+ return logical.RespondWithStatusCode(&resp, req, http.StatusAccepted)
}
- return &r, nil
+ return &resp, nil
}
func (b *SystemBackend) handlePluginRuntimeCatalogUpdate(ctx context.Context, _ *logical.Request, d *framework.FieldData) (*logical.Response, error) {
diff --git a/vault/mount.go b/vault/mount.go
index 158dbd93565f..d70e8193603f 100644
--- a/vault/mount.go
+++ b/vault/mount.go
@@ -322,7 +322,7 @@ const mountStateUnmounting = "unmounting"
// MountEntry is used to represent a mount table entry
type MountEntry struct {
Table string `json:"table"` // The table it belongs to
- Path string `json:"path"` // Mount Path
+ Path string `json:"path"` // Mount Path, as provided in the mount API call but with a trailing slash, i.e. no auth/ or namespace prefix.
Type string `json:"type"` // Logical backend Type. NB: This is the plugin name, e.g. my-vault-plugin, NOT plugin type (e.g. auth).
Description string `json:"description"` // User-provided description
UUID string `json:"uuid"` // Barrier view UUID
diff --git a/vault/plugin_reload.go b/vault/plugin_reload.go
index 7fa6e936e60e..938c47eb34bf 100644
--- a/vault/plugin_reload.go
+++ b/vault/plugin_reload.go
@@ -70,10 +70,10 @@ func (c *Core) reloadMatchingPluginMounts(ctx context.Context, mounts []string)
return errors
}
-// reloadPlugin reloads all mounted backends that are of
-// plugin pluginName (name of the plugin as registered in
-// the plugin catalog).
-func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) error {
+// reloadMatchingPlugin reloads all mounted backends that are named pluginName
+// (name of the plugin as registered in the plugin catalog). It returns the
+// number of plugins that were reloaded and an error if any.
+func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) (reloaded int, err error) {
c.mountsLock.RLock()
defer c.mountsLock.RUnlock()
c.authLock.RLock()
@@ -81,25 +81,49 @@ func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) erro
ns, err := namespace.FromContext(ctx)
if err != nil {
- return err
+ return reloaded, err
}
- // Filter mount entries that only matches the plugin name
for _, entry := range c.mounts.Entries {
// We dont reload mounts that are not in the same namespace
if ns.ID != entry.Namespace().ID {
continue
}
+
if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) {
err := c.reloadBackendCommon(ctx, entry, false)
if err != nil {
- return err
+ return reloaded, err
+ }
+ reloaded++
+ c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "namespace", entry.Namespace(), "path", entry.Path, "version", entry.Version)
+ } else if entry.Type == "database" {
+ // The combined database plugin is itself a secrets engine, but
+ // knowledge of whether a database plugin is in use within a particular
+ // mount is internal to the combined database plugin's storage, so
+ // we delegate the reload request with an internally routed request.
+ req := &logical.Request{
+ Operation: logical.UpdateOperation,
+ Path: entry.Path + "reload/" + pluginName,
+ }
+ resp, err := c.router.Route(ctx, req)
+ if err != nil {
+ return reloaded, err
+ }
+ if resp == nil {
+ return reloaded, fmt.Errorf("failed to reload %q database plugin(s) mounted under %s", pluginName, entry.Path)
+ }
+ if resp.IsError() {
+ return reloaded, fmt.Errorf("failed to reload %q database plugin(s) mounted under %s: %s", pluginName, entry.Path, resp.Error())
+ }
+
+ if count, ok := resp.Data["count"].(int); ok && count > 0 {
+ c.logger.Info("successfully reloaded database plugin(s)", "plugin", pluginName, "namespace", entry.Namespace(), "path", entry.Path, "connections", resp.Data["connections"])
+ reloaded += count
}
- c.logger.Info("successfully reloaded plugin", "plugin", pluginName, "path", entry.Path, "version", entry.Version)
}
}
- // Filter auth mount entries that ony matches the plugin name
for _, entry := range c.auth.Entries {
// We dont reload mounts that are not in the same namespace
if ns.ID != entry.Namespace().ID {
@@ -109,13 +133,14 @@ func (c *Core) reloadMatchingPlugin(ctx context.Context, pluginName string) erro
if entry.Type == pluginName || (entry.Type == "plugin" && entry.Config.PluginName == pluginName) {
err := c.reloadBackendCommon(ctx, entry, true)
if err != nil {
- return err
+ return reloaded, err
}
+ reloaded++
c.logger.Info("successfully reloaded plugin", "plugin", entry.Accessor, "path", entry.Path, "version", entry.Version)
}
}
- return nil
+ return reloaded, nil
}
// reloadBackendCommon is a generic method to reload a backend provided a
From 2047ce752724c0b3e2e6629d3ccf5f5ba9e53eb6 Mon Sep 17 00:00:00 2001
From: Kuba Wieczorek
Date: Mon, 8 Jan 2024 13:57:43 +0000
Subject: [PATCH 38/39] [VAULT-22480] Add audit fallback device (#24583)
Co-authored-by: Peter Wilson
---
audit/types.go | 5 +
builtin/audit/file/backend.go | 25 +++
builtin/audit/file/backend_test.go | 126 ++++++++++++
builtin/audit/socket/backend.go | 23 +++
builtin/audit/socket/backend_test.go | 192 ++++++++++++++++++
builtin/audit/syslog/backend.go | 25 +++
builtin/audit/syslog/backend_test.go | 125 ++++++++++++
helper/testhelpers/corehelpers/corehelpers.go | 8 +-
vault/audit.go | 16 +-
vault/audit_broker.go | 184 +++++++++++++++--
vault/audit_broker_test.go | 115 +++++++++++
vault/audit_test.go | 63 ++++++
12 files changed, 888 insertions(+), 19 deletions(-)
diff --git a/audit/types.go b/audit/types.go
index 3434ff84d840..8d8bd158c339 100644
--- a/audit/types.go
+++ b/audit/types.go
@@ -279,6 +279,11 @@ type Backend interface {
// nodes for node and pipeline registration.
event.PipelineReader
+ // IsFallback can be used to determine if this audit backend device is intended to
+ // be used as a fallback to catch all events that are not written when only using
+ // filtered pipelines.
+ IsFallback() bool
+
// LogRequest is used to synchronously log a request. This is done after the
// request is authorized but before the request is executed. The arguments
// MUST not be modified in any way. They should be deep copied if this is
diff --git a/builtin/audit/file/backend.go b/builtin/audit/file/backend.go
index 2681ee244e99..44fd2438e032 100644
--- a/builtin/audit/file/backend.go
+++ b/builtin/audit/file/backend.go
@@ -16,6 +16,7 @@ import (
"sync/atomic"
"github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-secure-stdlib/parseutil"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/internal/observability/event"
"github.com/hashicorp/vault/sdk/helper/salt"
@@ -36,6 +37,7 @@ var _ audit.Backend = (*Backend)(nil)
// or reset the write cursor, this should be done in the future.
type Backend struct {
f *os.File
+ fallback bool
fileLock sync.RWMutex
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
@@ -60,6 +62,21 @@ func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool,
return nil, fmt.Errorf("%s: nil salt view", op)
}
+ // The config options 'fallback' and 'filter' are mutually exclusive, a fallback
+ // device catches everything, so it cannot be allowed to filter.
+ var fallback bool
+ var err error
+ if fallbackRaw, ok := conf.Config["fallback"]; ok {
+ fallback, err = parseutil.ParseBool(fallbackRaw)
+ if err != nil {
+ return nil, fmt.Errorf("%s: unable to parse 'fallback': %w", op, err)
+ }
+ }
+
+ if _, ok := conf.Config["filter"]; ok && fallback {
+ return nil, fmt.Errorf("%s: cannot configure a fallback device with a filter: %w", op, event.ErrInvalidParameter)
+ }
+
// Get file path from config or fall back to the old option name ('path') for compatibility
// (see commit bac4fe0799a372ba1245db642f3f6cd1f1d02669).
var filePath string
@@ -106,6 +123,7 @@ func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool,
}
b := &Backend{
+ fallback: fallback,
filePath: filePath,
formatConfig: cfg,
mode: mode,
@@ -550,3 +568,10 @@ func (b *Backend) EventType() eventlogger.EventType {
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
+
+// IsFallback can be used to determine if this audit backend device is intended to
+// be used as a fallback to catch all events that are not written when only using
+// filtered pipelines.
+func (b *Backend) IsFallback() bool {
+ return b.fallback
+}
diff --git a/builtin/audit/file/backend_test.go b/builtin/audit/file/backend_test.go
index 17ea7fd20365..1e47f0f2cd96 100644
--- a/builtin/audit/file/backend_test.go
+++ b/builtin/audit/file/backend_test.go
@@ -576,3 +576,129 @@ func TestBackend_configureFilterFormatterSink(t *testing.T) {
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}
+
+// TestBackend_Factory_Conf is used to ensure that any configuration which is
+// supplied, is validated and tested.
+func TestBackend_Factory_Conf(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isErrorExpected bool
+ expectedErrorMessage string
+ }{
+ "nil-salt-config": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: nil,
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "file.Factory: nil salt config",
+ },
+ "nil-salt-view": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: &salt.Config{},
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "file.Factory: nil salt view",
+ },
+ "fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "true",
+ "file_path": discard,
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "file.Factory: cannot configure a fallback device with a filter: invalid parameter",
+ },
+ "non-fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "false",
+ "file_path": discard,
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: false,
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+
+ switch {
+ case tc.isErrorExpected:
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrorMessage)
+ default:
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ }
+ })
+ }
+}
+
+// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed
+// and set correctly, then exposed via the interface method IsFallback().
+func TestBackend_IsFallback(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isFallbackExpected bool
+ }{
+ "fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "true",
+ "file_path": discard,
+ },
+ },
+ isFallbackExpected: true,
+ },
+ "no-fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "false",
+ "file_path": discard,
+ },
+ },
+ isFallbackExpected: false,
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ require.Equal(t, tc.isFallbackExpected, be.IsFallback())
+ })
+ }
+}
diff --git a/builtin/audit/socket/backend.go b/builtin/audit/socket/backend.go
index 09662c2ab683..84629efe3108 100644
--- a/builtin/audit/socket/backend.go
+++ b/builtin/audit/socket/backend.go
@@ -29,6 +29,7 @@ type Backend struct {
sync.Mutex
address string
connection net.Conn
+ fallback bool
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
name string
@@ -73,12 +74,27 @@ func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool,
return nil, fmt.Errorf("%s: failed to parse 'write_timeout': %w", op, err)
}
+ // The config options 'fallback' and 'filter' are mutually exclusive, a fallback
+ // device catches everything, so it cannot be allowed to filter.
+ var fallback bool
+ if fallbackRaw, ok := conf.Config["fallback"]; ok {
+ fallback, err = parseutil.ParseBool(fallbackRaw)
+ if err != nil {
+ return nil, fmt.Errorf("%s: unable to parse 'fallback': %w", op, err)
+ }
+ }
+
+ if _, ok := conf.Config["filter"]; ok && fallback {
+ return nil, fmt.Errorf("%s: cannot configure a fallback device with a filter: %w", op, event.ErrInvalidParameter)
+ }
+
cfg, err := formatterConfig(conf.Config)
if err != nil {
return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
}
b := &Backend{
+ fallback: fallback,
address: address,
formatConfig: cfg,
name: conf.MountPath,
@@ -443,3 +459,10 @@ func (b *Backend) EventType() eventlogger.EventType {
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
+
+// IsFallback can be used to determine if this audit backend device is intended to
+// be used as a fallback to catch all events that are not written when only using
+// filtered pipelines.
+func (b *Backend) IsFallback() bool {
+ return b.fallback
+}
diff --git a/builtin/audit/socket/backend_test.go b/builtin/audit/socket/backend_test.go
index d1dfc384720c..3693f8fdeeb4 100644
--- a/builtin/audit/socket/backend_test.go
+++ b/builtin/audit/socket/backend_test.go
@@ -4,10 +4,13 @@
package socket
import (
+ "context"
"testing"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/sdk/helper/salt"
+ "github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/require"
)
@@ -329,3 +332,192 @@ func TestBackend_configureFilterFormatterSink(t *testing.T) {
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}
+
+// TestBackend_Factory_Conf is used to ensure that any configuration which is
+// supplied, is validated and tested.
+func TestBackend_Factory_Conf(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isErrorExpected bool
+ expectedErrorMessage string
+ }{
+ "nil-salt-config": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: nil,
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: nil salt config",
+ },
+ "nil-salt-view": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: &salt.Config{},
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: nil salt view",
+ },
+ "no-address": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{},
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: address is required",
+ },
+ "empty-address": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": "",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: error configuring sink node: socket.(Backend).configureSinkNode: address is required: invalid parameter",
+ },
+ "whitespace-address": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": " ",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: error configuring sink node: socket.(Backend).configureSinkNode: address is required: invalid parameter",
+ },
+ "write-duration-valid": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": "hashicorp.com",
+ "write_timeout": "5s",
+ },
+ },
+ isErrorExpected: false,
+ },
+ "write-duration-not-valid": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": "hashicorp.com",
+ "write_timeout": "qwerty",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: failed to parse 'write_timeout': time: invalid duration \"qwerty\"",
+ },
+ "non-fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": "hashicorp.com",
+ "write_timeout": "5s",
+ "fallback": "false",
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: false,
+ },
+ "fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "address": "hashicorp.com",
+ "write_timeout": "2s",
+ "fallback": "true",
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "socket.Factory: cannot configure a fallback device with a filter: invalid parameter",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+
+ switch {
+ case tc.isErrorExpected:
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrorMessage)
+ default:
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ }
+ })
+ }
+}
+
+// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed
+// and set correctly, then exposed via the interface method IsFallback().
+func TestBackend_IsFallback(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isFallbackExpected bool
+ }{
+ "fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "qwerty",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "true",
+ "address": "hashicorp.com",
+ "write_timeout": "5s",
+ },
+ },
+ isFallbackExpected: true,
+ },
+ "no-fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "qwerty",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "false",
+ "address": "hashicorp.com",
+ "write_timeout": "5s",
+ },
+ },
+ isFallbackExpected: false,
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ require.Equal(t, tc.isFallbackExpected, be.IsFallback())
+ })
+ }
+}
diff --git a/builtin/audit/syslog/backend.go b/builtin/audit/syslog/backend.go
index 45d6e0762daa..dff6b5800382 100644
--- a/builtin/audit/syslog/backend.go
+++ b/builtin/audit/syslog/backend.go
@@ -12,6 +12,7 @@ import (
"sync"
"github.com/hashicorp/eventlogger"
+ "github.com/hashicorp/go-secure-stdlib/parseutil"
gsyslog "github.com/hashicorp/go-syslog"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/internal/observability/event"
@@ -23,6 +24,7 @@ var _ audit.Backend = (*Backend)(nil)
// Backend is the audit backend for the syslog-based audit store.
type Backend struct {
+ fallback bool
formatter *audit.EntryFormatterWriter
formatConfig audit.FormatterConfig
logger gsyslog.Syslogger
@@ -58,6 +60,21 @@ func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool,
tag = "vault"
}
+ // The config options 'fallback' and 'filter' are mutually exclusive, a fallback
+ // device catches everything, so it cannot be allowed to filter.
+ var fallback bool
+ var err error
+ if fallbackRaw, ok := conf.Config["fallback"]; ok {
+ fallback, err = parseutil.ParseBool(fallbackRaw)
+ if err != nil {
+ return nil, fmt.Errorf("%s: unable to parse 'fallback': %w", op, err)
+ }
+ }
+
+ if _, ok := conf.Config["filter"]; ok && fallback {
+ return nil, fmt.Errorf("%s: cannot configure a fallback device with a filter: %w", op, event.ErrInvalidParameter)
+ }
+
cfg, err := formatterConfig(conf.Config)
if err != nil {
return nil, fmt.Errorf("%s: failed to create formatter config: %w", op, err)
@@ -70,6 +87,7 @@ func Factory(_ context.Context, conf *audit.BackendConfig, useEventLogger bool,
}
b := &Backend{
+ fallback: fallback,
formatConfig: cfg,
logger: logger,
name: conf.MountPath,
@@ -347,3 +365,10 @@ func (b *Backend) EventType() eventlogger.EventType {
func (b *Backend) HasFiltering() bool {
return len(b.nodeIDList) > 0 && b.nodeMap[b.nodeIDList[0]].Type() == eventlogger.NodeTypeFilter
}
+
+// IsFallback can be used to determine if this audit backend device is intended to
+// be used as a fallback to catch all events that are not written when only using
+// filtered pipelines.
+func (b *Backend) IsFallback() bool {
+ return b.fallback
+}
diff --git a/builtin/audit/syslog/backend_test.go b/builtin/audit/syslog/backend_test.go
index 4aeaa5d0da5c..ba0b990803ef 100644
--- a/builtin/audit/syslog/backend_test.go
+++ b/builtin/audit/syslog/backend_test.go
@@ -4,10 +4,13 @@
package syslog
import (
+ "context"
"testing"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/vault/audit"
+ "github.com/hashicorp/vault/sdk/helper/salt"
+ "github.com/hashicorp/vault/sdk/logical"
"github.com/stretchr/testify/require"
)
@@ -311,3 +314,125 @@ func TestBackend_configureFilterFormatterSink(t *testing.T) {
node = b.nodeMap[id]
require.Equal(t, eventlogger.NodeTypeSink, node.Type())
}
+
+// TestBackend_Factory_Conf is used to ensure that any configuration which is
+// supplied, is validated and tested.
+func TestBackend_Factory_Conf(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isErrorExpected bool
+ expectedErrorMessage string
+ }{
+ "nil-salt-config": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: nil,
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "syslog.Factory: nil salt config",
+ },
+ "nil-salt-view": {
+ backendConfig: &audit.BackendConfig{
+ SaltConfig: &salt.Config{},
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "syslog.Factory: nil salt view",
+ },
+ "non-fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "false",
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: false,
+ },
+ "fallback-device-with-filter": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "discard",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "true",
+ "filter": "mount_type == kv",
+ },
+ },
+ isErrorExpected: true,
+ expectedErrorMessage: "syslog.Factory: cannot configure a fallback device with a filter: invalid parameter",
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+
+ switch {
+ case tc.isErrorExpected:
+ require.Error(t, err)
+ require.EqualError(t, err, tc.expectedErrorMessage)
+ default:
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ }
+ })
+ }
+}
+
+// TestBackend_IsFallback ensures that the 'fallback' config setting is parsed
+// and set correctly, then exposed via the interface method IsFallback().
+func TestBackend_IsFallback(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+
+ tests := map[string]struct {
+ backendConfig *audit.BackendConfig
+ isFallbackExpected bool
+ }{
+ "fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "qwerty",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "true",
+ },
+ },
+ isFallbackExpected: true,
+ },
+ "no-fallback": {
+ backendConfig: &audit.BackendConfig{
+ MountPath: "qwerty",
+ SaltConfig: &salt.Config{},
+ SaltView: &logical.InmemStorage{},
+ Config: map[string]string{
+ "fallback": "false",
+ },
+ },
+ isFallbackExpected: false,
+ },
+ }
+
+ for name, tc := range tests {
+ name := name
+ tc := tc
+ t.Run(name, func(t *testing.T) {
+ t.Parallel()
+
+ be, err := Factory(ctx, tc.backendConfig, true, nil)
+ require.NoError(t, err)
+ require.NotNil(t, be)
+ require.Equal(t, tc.isFallbackExpected, be.IsFallback())
+ })
+ }
+}
diff --git a/helper/testhelpers/corehelpers/corehelpers.go b/helper/testhelpers/corehelpers/corehelpers.go
index c2d6bc8a3ce7..8c3d7cfb966f 100644
--- a/helper/testhelpers/corehelpers/corehelpers.go
+++ b/helper/testhelpers/corehelpers/corehelpers.go
@@ -535,7 +535,7 @@ func (n *NoopAudit) Invalidate(_ context.Context) {
// the audit.Backend interface.
func (n *NoopAudit) RegisterNodesAndPipeline(broker *eventlogger.Broker, name string) error {
for id, node := range n.nodeMap {
- if err := broker.RegisterNode(id, node, eventlogger.WithNodeRegistrationPolicy(eventlogger.DenyOverwrite)); err != nil {
+ if err := broker.RegisterNode(id, node); err != nil {
return err
}
}
@@ -546,7 +546,7 @@ func (n *NoopAudit) RegisterNodesAndPipeline(broker *eventlogger.Broker, name st
NodeIDs: n.nodeIDList,
}
- return broker.RegisterPipeline(pipeline, eventlogger.WithPipelineRegistrationPolicy(eventlogger.DenyOverwrite))
+ return broker.RegisterPipeline(pipeline)
}
type TestLogger struct {
@@ -632,3 +632,7 @@ func (n *NoopAudit) Nodes() map[eventlogger.NodeID]eventlogger.Node {
func (n *NoopAudit) NodeIDs() []eventlogger.NodeID {
return n.nodeIDList
}
+
+func (n *NoopAudit) IsFallback() bool {
+ return false
+}
diff --git a/vault/audit.go b/vault/audit.go
index a3d6fcfab9b8..7f2e5cac47af 100644
--- a/vault/audit.go
+++ b/vault/audit.go
@@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"os"
+ "strconv"
"strings"
"time"
@@ -81,6 +82,17 @@ func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage
return fmt.Errorf("backend path must be specified")
}
+ if fallbackRaw, ok := entry.Options["fallback"]; ok {
+ fallback, err := parseutil.ParseBool(fallbackRaw)
+ if err != nil {
+ return fmt.Errorf("unable to enable audit device '%s', cannot parse supplied 'fallback' setting: %w", entry.Path, err)
+ }
+
+ // Reassigning the fallback value means we can ensure that the formatting
+ // of it as a string is consistent for future comparisons.
+ entry.Options["fallback"] = strconv.FormatBool(fallback)
+ }
+
// Update the audit table
c.auditLock.Lock()
defer c.auditLock.Unlock()
@@ -88,6 +100,8 @@ func (c *Core) enableAudit(ctx context.Context, entry *MountEntry, updateStorage
// Look for matching name
for _, ent := range c.audit.Entries {
switch {
+ case entry.Options["fallback"] == "true" && ent.Options["fallback"] == "true":
+ return fmt.Errorf("unable to enable audit device '%s', a fallback device already exists '%s'", entry.Path, ent.Path)
// Existing is sql/mysql/ new is sql/ or
// existing is sql/ and new is sql/mysql/
case strings.HasPrefix(ent.Path, entry.Path):
@@ -531,7 +545,7 @@ func (c *Core) newAuditBackend(ctx context.Context, entry *MountEntry, view logi
!disableEventLogger,
c.auditedHeaders)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("unable to create new audit backend: %w", err)
}
if be == nil {
return nil, fmt.Errorf("nil backend returned from %q factory function", entry.Type)
diff --git a/vault/audit_broker.go b/vault/audit_broker.go
index 514c3b5a77dc..fbfa0d276915 100644
--- a/vault/audit_broker.go
+++ b/vault/audit_broker.go
@@ -12,9 +12,9 @@ import (
"sync"
"time"
- "github.com/armon/go-metrics"
"github.com/hashicorp/eventlogger"
"github.com/hashicorp/go-hclog"
+ "github.com/hashicorp/go-metrics"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/vault/audit"
"github.com/hashicorp/vault/helper/namespace"
@@ -32,32 +32,52 @@ type backendEntry struct {
type AuditBroker struct {
sync.RWMutex
backends map[string]backendEntry
- logger hclog.Logger
+ // broker is used to register pipelines for all devices except a fallback device.
broker *eventlogger.Broker
+
+ // fallbackBroker is used to register a pipeline to be used as a fallback
+ // in situations where we cannot use the eventlogger.Broker to guarantee that
+ // the required number of sinks were successfully written to. This situation
+ // occurs when all the audit devices registered with the broker use filtering.
+ // NOTE: there should only ever be a single device registered on the fallbackBroker.
+ fallbackBroker *eventlogger.Broker
+
+ // fallbackName stores the name (path) of the audit device which has been configured
+ // as the fallback pipeline (its eventlogger.PipelineID).
+ fallbackName string
+ logger hclog.Logger
}
// NewAuditBroker creates a new audit broker
func NewAuditBroker(log hclog.Logger, useEventLogger bool) (*AuditBroker, error) {
var eventBroker *eventlogger.Broker
+ var fallbackBroker *eventlogger.Broker
var err error
// The reason for this check is due to 1.15.x supporting the env var:
// 'VAULT_AUDIT_DISABLE_EVENTLOGGER'
// When NewAuditBroker is called, it is supplied a bool to determine whether
- // we initialize the broker, which are left nil otherwise.
+ // we initialize the broker (and fallback broker), which are left nil otherwise.
// In 1.16.x this check should go away and the env var removed.
if useEventLogger {
eventBroker, err = eventlogger.NewBroker()
if err != nil {
return nil, fmt.Errorf("error creating event broker for audit events: %w", err)
}
+
+ // Set up the broker that will support a single fallback device.
+ fallbackBroker, err = eventlogger.NewBroker()
+ if err != nil {
+ return nil, fmt.Errorf("error creating event fallback broker for audit event: %w", err)
+ }
}
b := &AuditBroker{
- backends: make(map[string]backendEntry),
- logger: log,
- broker: eventBroker,
+ backends: make(map[string]backendEntry),
+ logger: log,
+ broker: eventBroker,
+ fallbackBroker: fallbackBroker,
}
return b, nil
}
@@ -74,19 +94,42 @@ func (a *AuditBroker) Register(name string, b audit.Backend, local bool) error {
return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
}
+ // If the backend is already registered, we cannot re-register it.
+ if a.isRegistered(name) {
+ return fmt.Errorf("%s: backend already registered '%s'", op, name)
+ }
+
+ // Fallback devices are singleton instances, we cannot register more than one or overwrite the existing one.
+ if b.IsFallback() && a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())) {
+ existing, err := a.existingFallbackName()
+ if err != nil {
+ return fmt.Errorf("%s: existing fallback device already registered: %w", op, err)
+ }
+
+ return fmt.Errorf("%s: existing fallback device already registered: %q", op, existing)
+ }
+
// The reason for this check is due to 1.15.x supporting the env var:
// 'VAULT_AUDIT_DISABLE_EVENTLOGGER'
// When NewAuditBroker is called, it is supplied a bool to determine whether
- // we initialize the broker, which are left nil otherwise.
+ // we initialize the broker (and fallback broker), which are left nil otherwise.
// In 1.16.x this check should go away and the env var removed.
if a.broker != nil {
if name != b.Name() {
return fmt.Errorf("%s: audit registration failed due to device name mismatch: %q, %q", op, name, b.Name())
}
- err := a.register(name, b)
- if err != nil {
- return fmt.Errorf("%s: unable to register device for %q: %w", op, name, err)
+ switch {
+ case b.IsFallback():
+ err := a.registerFallback(name, b)
+ if err != nil {
+ return fmt.Errorf("%s: unable to register fallback device for %q: %w", op, name, err)
+ }
+ default:
+ err := a.register(name, b)
+ if err != nil {
+ return fmt.Errorf("%s: unable to register device for %q: %w", op, name, err)
+ }
}
}
@@ -110,6 +153,12 @@ func (a *AuditBroker) Deregister(ctx context.Context, name string) error {
return fmt.Errorf("%s: name is required: %w", op, event.ErrInvalidParameter)
}
+ // If the backend isn't actually registered, then there's nothing to do.
+ // We don't return any error so that Deregister can be idempotent.
+ if !a.isRegistered(name) {
+ return nil
+ }
+
// Remove the Backend from the map first, so that if an error occurs while
// removing the pipeline and nodes, we can quickly exit this method with
// the error.
@@ -118,23 +167,37 @@ func (a *AuditBroker) Deregister(ctx context.Context, name string) error {
// The reason for this check is due to 1.15.x supporting the env var:
// 'VAULT_AUDIT_DISABLE_EVENTLOGGER'
// When NewAuditBroker is called, it is supplied a bool to determine whether
- // we initialize the broker, which are left nil otherwise.
+ // we initialize the broker (and fallback broker), which are left nil otherwise.
// In 1.16.x this check should go away and the env var removed.
if a.broker != nil {
- err := a.deregister(ctx, name)
- if err != nil {
- return fmt.Errorf("%s: deregistration failed for audit device %q: %w", op, name, err)
+ switch {
+ case name == a.fallbackName:
+ err := a.deregisterFallback(ctx, name)
+ if err != nil {
+ return fmt.Errorf("%s: deregistration failed for fallback audit device %q: %w", op, name, err)
+ }
+ default:
+ err := a.deregister(ctx, name)
+ if err != nil {
+ return fmt.Errorf("%s: deregistration failed for audit device %q: %w", op, name, err)
+ }
}
}
return nil
}
-// IsRegistered is used to check if a given audit backend is registered
+// IsRegistered is used to check if a given audit backend is registered.
func (a *AuditBroker) IsRegistered(name string) bool {
a.RLock()
defer a.RUnlock()
+ return a.isRegistered(name)
+}
+
+// isRegistered is used to check if a given audit backend is registered.
+// This method should be used within the AuditBroker to prevent locking issues.
+func (a *AuditBroker) isRegistered(name string) bool {
_, ok := a.backends[name]
return ok
}
@@ -236,6 +299,9 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *logical.LogInput, head
e.Data = in
+ // There may be cases where only the fallback device was added but no other
+ // normal audit devices, so check if the broker had an audit based pipeline
+ // registered before trying to send to it.
var status eventlogger.Status
if a.broker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())) {
status, err = a.broker.Send(ctx, eventlogger.EventType(event.AuditType.String()), e)
@@ -255,6 +321,15 @@ func (a *AuditBroker) LogRequest(ctx context.Context, in *logical.LogInput, head
retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...))
return retErr.ErrorOrNil()
}
+
+ // If a fallback device is registered we can rely on that to 'catch all'
+ // and also the broker level guarantee for completed sinks.
+ if a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())) {
+ status, err = a.fallbackBroker.Send(ctx, eventlogger.EventType(event.AuditType.String()), e)
+ if err != nil {
+ retErr = multierror.Append(retErr, multierror.Append(fmt.Errorf("auditing request to fallback device failed: %w", err), status.Warnings...))
+ }
+ }
}
}
@@ -349,6 +424,9 @@ func (a *AuditBroker) LogResponse(ctx context.Context, in *logical.LogInput, hea
defer auditCancel()
auditContext = namespace.ContextWithNamespace(auditContext, ns)
+ // There may be cases where only the fallback device was added but no other
+ // normal audit devices, so check if the broker had an audit based pipeline
+ // registered before trying to send to it.
var status eventlogger.Status
if a.broker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())) {
status, err = a.broker.Send(auditContext, eventlogger.EventType(event.AuditType.String()), e)
@@ -368,6 +446,15 @@ func (a *AuditBroker) LogResponse(ctx context.Context, in *logical.LogInput, hea
retErr = multierror.Append(retErr, multierror.Append(errors.New("error during audit pipeline processing"), status.Warnings...))
return retErr.ErrorOrNil()
}
+
+ // If a fallback device is registered we can rely on that to 'catch all'
+ // and also the broker level guarantee for completed sinks.
+ if a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())) {
+ status, err = a.fallbackBroker.Send(auditContext, eventlogger.EventType(event.AuditType.String()), e)
+ if err != nil {
+ retErr = multierror.Append(retErr, multierror.Append(fmt.Errorf("auditing response to fallback device failed: %w", err), status.Warnings...))
+ }
+ }
}
}
@@ -391,13 +478,19 @@ func (a *AuditBroker) Invalidate(ctx context.Context, key string) {
// guarantee provided by setting the threshold to 1, and must set it to 0.
// If you are registering an audit device, you should first check if that backend
// does not have filtering before querying the backends via requiredSuccessThresholdSinks.
+// backends may also contain a fallback device, which should be ignored as it is
+// handled by the fallbackBroker.
func (a *AuditBroker) requiredSuccessThresholdSinks() int {
threshold := 0
// We might need to check over all the existing backends to discover if any
// don't use filtering.
for _, be := range a.backends {
- if !be.backend.HasFiltering() {
+ switch {
+ case be.backend.IsFallback():
+ // Ignore fallback devices as they're handled by a separate broker.
+ continue
+ case !be.backend.HasFiltering():
threshold = 1
break
}
@@ -432,6 +525,65 @@ func registerNodesAndPipeline(broker *eventlogger.Broker, b audit.Backend) error
return nil
}
+// existingFallbackName returns the name of the fallback device which is registered
+// with the AuditBroker.
+func (a *AuditBroker) existingFallbackName() (string, error) {
+ const op = "vault.(AuditBroker).existingFallbackName"
+
+ for _, be := range a.backends {
+ if be.backend.IsFallback() {
+ return be.backend.Name(), nil
+ }
+ }
+
+ return "", fmt.Errorf("%s: existing fallback device name is missing", op)
+}
+
+// registerFallback can be used to register a fallback device, it will also
+// configure the success threshold required for sinks.
+func (a *AuditBroker) registerFallback(name string, backend audit.Backend) error {
+ const op = "vault.(AuditBroker).registerFallback"
+
+ err := registerNodesAndPipeline(a.fallbackBroker, backend)
+ if err != nil {
+ return fmt.Errorf("%s: fallback device pipeline registration error: %w", op, err)
+ }
+
+ // Store the name of the fallback audit device so that we can check when
+ // deregistering if the device is the single fallback one.
+ a.fallbackName = backend.Name()
+
+ // We need to turn on the threshold for the fallback broker, so we can
+ // guarantee it ends up somewhere
+ err = a.fallbackBroker.SetSuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()), 1)
+ if err != nil {
+ return fmt.Errorf("%s: unable to configure fallback sink success threshold (1) for %q: %w", op, name, err)
+ }
+
+ return nil
+}
+
+// deregisterFallback can be used to deregister a fallback audit device, it will
+// also configure the success threshold required for sinks.
+func (a *AuditBroker) deregisterFallback(ctx context.Context, name string) error {
+ const op = "vault.(AuditBroker).deregisterFallback"
+
+ err := a.fallbackBroker.SetSuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()), 0)
+ if err != nil {
+ return fmt.Errorf("%s: unable to configure fallback sink success threshold (0) for %q: %w", op, name, err)
+ }
+
+ _, err = a.fallbackBroker.RemovePipelineAndNodes(ctx, eventlogger.EventType(event.AuditType.String()), eventlogger.PipelineID(name))
+ if err != nil {
+ return fmt.Errorf("%s: unable to deregister fallback device %q: %w", op, name, err)
+ }
+
+ // Clear the fallback device name now we've deregistered.
+ a.fallbackName = ""
+
+ return nil
+}
+
// register can be used to register a normal audit device, it will also calculate
// and configure the success threshold required for sinks.
func (a *AuditBroker) register(name string, backend audit.Backend) error {
diff --git a/vault/audit_broker_test.go b/vault/audit_broker_test.go
index a7fa891fcfdb..18efaa560102 100644
--- a/vault/audit_broker_test.go
+++ b/vault/audit_broker_test.go
@@ -141,3 +141,118 @@ func TestAuditBroker_Deregister_SuccessThresholdSinks(t *testing.T) {
require.True(t, ok)
require.Equal(t, 1, res)
}
+
+// TestAuditBroker_Register_Fallback ensures we can register a fallback device.
+func TestAuditBroker_Register_Fallback(t *testing.T) {
+ t.Parallel()
+
+ l := corehelpers.NewTestLogger(t)
+ a, err := NewAuditBroker(l, true)
+ require.NoError(t, err)
+ require.NotNil(t, a)
+
+ path := "juan/"
+ fallbackBackend := testAuditBackend(t, path, map[string]string{"fallback": "true"})
+ err = a.Register(path, fallbackBackend, false)
+ require.NoError(t, err)
+ require.True(t, a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())))
+ require.Equal(t, path, a.fallbackName)
+ threshold, found := a.fallbackBroker.SuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()))
+ require.True(t, found)
+ require.Equal(t, 1, threshold)
+}
+
+// TestAuditBroker_Register_FallbackMultiple tests that trying to register more
+// than a single fallback device results in the correct error.
+func TestAuditBroker_Register_FallbackMultiple(t *testing.T) {
+ t.Parallel()
+
+ l := corehelpers.NewTestLogger(t)
+ a, err := NewAuditBroker(l, true)
+ require.NoError(t, err)
+ require.NotNil(t, a)
+
+ path1 := "juan1/"
+ fallbackBackend1 := testAuditBackend(t, path1, map[string]string{"fallback": "true"})
+ err = a.Register(path1, fallbackBackend1, false)
+ require.NoError(t, err)
+ require.True(t, a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())))
+ require.Equal(t, path1, a.fallbackName)
+
+ path2 := "juan2/"
+ fallbackBackend2 := testAuditBackend(t, path2, map[string]string{"fallback": "true"})
+ err = a.Register(path1, fallbackBackend2, false)
+ require.Error(t, err)
+ require.EqualError(t, err, "vault.(AuditBroker).Register: backend already registered 'juan1/'")
+ require.True(t, a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())))
+ require.Equal(t, path1, a.fallbackName)
+}
+
+// TestAuditBroker_Deregister_Fallback ensures that we can deregister a fallback
+// device successfully.
+func TestAuditBroker_Deregister_Fallback(t *testing.T) {
+ t.Parallel()
+
+ l := corehelpers.NewTestLogger(t)
+ a, err := NewAuditBroker(l, true)
+ require.NoError(t, err)
+ require.NotNil(t, a)
+
+ path := "juan/"
+ fallbackBackend := testAuditBackend(t, path, map[string]string{"fallback": "true"})
+ err = a.Register(path, fallbackBackend, false)
+ require.NoError(t, err)
+ require.True(t, a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())))
+ require.Equal(t, path, a.fallbackName)
+
+ threshold, found := a.fallbackBroker.SuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()))
+ require.True(t, found)
+ require.Equal(t, 1, threshold)
+
+ err = a.Deregister(context.Background(), path)
+ require.NoError(t, err)
+ require.False(t, a.fallbackBroker.IsAnyPipelineRegistered(eventlogger.EventType(event.AuditType.String())))
+ require.Equal(t, "", a.fallbackName)
+
+ threshold, found = a.fallbackBroker.SuccessThresholdSinks(eventlogger.EventType(event.AuditType.String()))
+ require.True(t, found)
+ require.Equal(t, 0, threshold)
+}
+
+// TestAuditBroker_Deregister_Multiple ensures that we can call deregister multiple
+// times without issue if is no matching backend registered.
+func TestAuditBroker_Deregister_Multiple(t *testing.T) {
+ t.Parallel()
+
+ l := corehelpers.NewTestLogger(t)
+ a, err := NewAuditBroker(l, true)
+ require.NoError(t, err)
+ require.NotNil(t, a)
+
+ err = a.Deregister(context.Background(), "foo")
+ require.NoError(t, err)
+
+ err = a.Deregister(context.Background(), "foo2")
+ require.NoError(t, err)
+}
+
+// TestAuditBroker_Register_MultipleFails checks for failure when we try to
+// re-register an audit backend.
+func TestAuditBroker_Register_MultipleFails(t *testing.T) {
+ t.Parallel()
+
+ l := corehelpers.NewTestLogger(t)
+ a, err := NewAuditBroker(l, true)
+ require.NoError(t, err)
+ require.NotNil(t, a)
+
+ path := "b2-no-filter"
+ noFilterBackend := testAuditBackend(t, path, map[string]string{})
+
+ err = a.Register(path, noFilterBackend, false)
+ require.NoError(t, err)
+
+ err = a.Register(path, noFilterBackend, false)
+ require.Error(t, err)
+ require.EqualError(t, err, "vault.(AuditBroker).Register: backend already registered 'b2-no-filter'")
+}
diff --git a/vault/audit_test.go b/vault/audit_test.go
index afecafaea245..87f06f4a5731 100644
--- a/vault/audit_test.go
+++ b/vault/audit_test.go
@@ -237,6 +237,69 @@ func TestCore_EnableAudit_Local(t *testing.T) {
}
}
+// TestAudit_enableAudit_fallback_invalid ensures that supplying a bad value for
+// 'fallback' in options gives us the correct error.
+func TestAudit_enableAudit_fallback_invalid(t *testing.T) {
+ entry := &MountEntry{
+ Path: "noop/",
+ Options: map[string]string{
+ "fallback": "juan",
+ },
+ }
+
+ cluster := NewTestCluster(t, nil, nil)
+ cluster.Start()
+ defer cluster.Cleanup()
+ core := cluster.Cores[0]
+ core.auditBackends["noop"] = corehelpers.NoopAuditFactory(nil)
+ err := core.enableAudit(context.Background(), entry, false)
+ require.Error(t, err)
+ require.EqualError(t, err, "unable to enable audit device 'noop/', cannot parse supplied 'fallback' setting: cannot parse '' as bool: strconv.ParseBool: parsing \"juan\": invalid syntax")
+}
+
+// TestAudit_enableAudit_fallback_two ensures trying to enable a second fallback
+// device returns the correct error.
+func TestAudit_enableAudit_fallback_two(t *testing.T) {
+ entry1 := &MountEntry{
+ Table: auditTableType,
+ Path: "noop1/",
+ Type: "noop",
+ UUID: "abcd",
+ Accessor: "noop1-abcd",
+ NamespaceID: namespace.RootNamespaceID,
+ Options: map[string]string{
+ "fallback": "TRUE",
+ },
+ namespace: namespace.RootNamespace,
+ }
+
+ entry2 := &MountEntry{
+ Table: auditTableType,
+ Path: "noop2/",
+ Type: "noop",
+ UUID: "abcd",
+ Accessor: "noop2-abcd",
+ NamespaceID: namespace.RootNamespaceID,
+ Options: map[string]string{
+ "fallback": "1",
+ },
+ namespace: namespace.RootNamespace,
+ }
+
+ cluster := NewTestCluster(t, nil, nil)
+ cluster.Start()
+ defer cluster.Cleanup()
+ core := cluster.Cores[0]
+ core.auditBackends["noop"] = corehelpers.NoopAuditFactory(nil)
+ ctx := namespace.ContextWithNamespace(context.Background(), namespace.RootNamespace)
+ err := core.enableAudit(ctx, entry1, false)
+ require.NoError(t, err)
+
+ err = core.enableAudit(ctx, entry2, false)
+ require.Error(t, err)
+ require.EqualError(t, err, "unable to enable audit device 'noop2/', a fallback device already exists 'noop1/'")
+}
+
func TestCore_DisableAudit(t *testing.T) {
c, keys, _ := TestCoreUnsealed(t)
c.auditBackends["noop"] = corehelpers.NoopAuditFactory(nil)
From d13edc610706724ab6337ea45617a8df49e8a012 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Mon, 8 Jan 2024 11:15:57 -0500
Subject: [PATCH 39/39] Bump golang.org/x/crypto from 0.14.0 to 0.17.0 in /sdk
(#24576)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.14.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.17.0)
---
updated-dependencies:
- dependency-name: golang.org/x/crypto
dependency-type: direct:production
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Violet Hynes
---
sdk/go.mod | 8 ++++----
sdk/go.sum | 16 ++++++++--------
2 files changed, 12 insertions(+), 12 deletions(-)
diff --git a/sdk/go.mod b/sdk/go.mod
index 5bd6a0a98adf..862175f672b6 100644
--- a/sdk/go.mod
+++ b/sdk/go.mod
@@ -46,9 +46,9 @@ require (
github.com/ryanuber/go-glob v1.0.0
github.com/stretchr/testify v1.8.3
go.uber.org/atomic v1.9.0
- golang.org/x/crypto v0.14.0
+ golang.org/x/crypto v0.17.0
golang.org/x/net v0.17.0
- golang.org/x/text v0.13.0
+ golang.org/x/text v0.14.0
google.golang.org/grpc v1.57.2
google.golang.org/protobuf v1.31.0
)
@@ -104,8 +104,8 @@ require (
go.opencensus.io v0.24.0 // indirect
golang.org/x/mod v0.9.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
- golang.org/x/sys v0.13.0 // indirect
- golang.org/x/term v0.13.0 // indirect
+ golang.org/x/sys v0.15.0 // indirect
+ golang.org/x/term v0.15.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.7.0 // indirect
google.golang.org/api v0.134.0 // indirect
diff --git a/sdk/go.sum b/sdk/go.sum
index b71ecfa543b8..74c09ebd6e94 100644
--- a/sdk/go.sum
+++ b/sdk/go.sum
@@ -543,8 +543,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
-golang.org/x/crypto v0.14.0 h1:wBqGXzWJW6m1XrIKlAH0Hs1JJ7+9KBwnIO8v66Q9cHc=
-golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4=
+golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
+golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -683,14 +683,14 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE=
-golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek=
-golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U=
+golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
+golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -702,8 +702,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k=
-golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=