Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

r/aws_backup_plan: Adding resource for managing AWS Backup plans #7350

Merged
merged 17 commits into from
Mar 18, 2019
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions aws/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
"aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_backup_plan": resourceAwsBackupPlan(),
"aws_backup_vault": resourceAwsBackupVault(),
"aws_budgets_budget": resourceAwsBudgetsBudget(),
"aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(),
Expand Down
360 changes: 360 additions & 0 deletions aws/resource_aws_backup_plan.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,360 @@
package aws

import (
"bytes"
"fmt"
"log"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/backup"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)

func resourceAwsBackupPlan() *schema.Resource {
return &schema.Resource{
Create: resourceAwsBackupPlanCreate,
Read: resourceAwsBackupPlanRead,
Update: resourceAwsBackupPlanUpdate,
Delete: resourceAwsBackupPlanDelete,

Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"rule": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"rule_name": {
Type: schema.TypeString,
Required: true,
},
"target_vault_name": {
Type: schema.TypeString,
Required: true,
},
"schedule": {
Type: schema.TypeString,
Optional: true,
},
"start_window": {
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"completion_window": {
Type: schema.TypeInt,
Optional: true,
Default: 180,
},
"lifecycle": {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you please add an acceptance test that exercises this configuration block? Thanks! I think line 156 should be a something like m["lifecycle"] = []interface{}{l}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One question that's been nagging me as I've been working on these resources... How do I test for attributes in a list where the identifier appears to be a random number instead of an ordered number? For example:

  rule.# = 1
  rule.160206105.completion_window = 180
  rule.160206105.lifecycle.# = 1
  rule.160206105.lifecycle.0.cold_storage_after = 30
  rule.160206105.lifecycle.0.delete_after = 160
  rule.160206105.recovery_point_tags.% = 0
  rule.160206105.rule_name = tf_acc_test_backup_rule_7563947093289689272
  rule.160206105.schedule = cron(0 12 * * ? *)
  rule.160206105.start_window = 60
  rule.160206105.target_vault_name = tf_acc_test_backup_vault_7563947093289689272

How do I test the attributes of rule when 160206105 changes with each test run?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately in these cases, the randomization gets complicated for the acceptance testing. You'll probably want to make the rule names static (unless the randomization there actually matters). While it is technically possible you could try to calculate the hash of the TypeSet, once you're in this situation it is generally easier to take the resultant API object returned by the Exists TestCheckFunc and check the values within it directly, e.g.

func testAccCheckBackupPlanRules(plan *backup.GetBackupPlanOutput, expectedRules []*backup.Rule) resource.TestCheckFunc {
	return func(s *terraform.State) error {
		if a, e := len(plan.Plan.Rules), len(expectedRules); a != e {
			return fmt.Errorf("expected %d Backup Plan Rules, got: %d", e, a)
		}

		for _, expectedRule := range expectedRules {
			for _, rule := range plan.Plan.Rules {
				if aws.StringValue(rule.RuleName) != aws.StringValue(expectedRule.RuleName) {
					continue
				}

				ruleName := aws.StringValue(rule.RuleName)

				if a, e := aws.StringValue(rule.Schedule), aws.StringValue(expectedRule.Schedule); a != e {
					return fmt.Errorf("expected Backup Plan Rule (%s) schedule to match (%s), got: %s", ruleName, e, a)
				}

				// Check other various attributes as appropriate

				break
			}
		}

		return nil
	}
}

// In a TestStep, filling in rule details as necessary
testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan),
testAccCheckBackupPlanRules(&plan, []*backup.Rule{
  {
    RuleName: aws.String("test"),
    Schedule: aws.String("cron(0 12 * * ? *)"),
  },
})

As an added assurance to the above, acceptance tests that implement TestSteps with ImportStateVerify: true will automatically ensure that the resource read function is correctly able to re-read the API object and generate the same Terraform state.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah ok, that makes sense. There really isn't a requirement for the plan to be unique other than to help me avoid running into dangling resources while testing. For this test, I'm going to opt to not make it use the random int so I can reliably test for the existence of the lifecycle.

Type: schema.TypeList,
Optional: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cold_storage_after": {
Type: schema.TypeInt,
Optional: true,
},
"delete_after": {
Type: schema.TypeInt,
Optional: true,
},
},
},
},
"recovery_point_tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
Set: resourceAwsPlanRuleHash,
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"version": {
Type: schema.TypeString,
Computed: true,
},
"tags": tagsSchema(),
bflad marked this conversation as resolved.
Show resolved Hide resolved
},
}
}

func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

plan := &backup.PlanInput{
BackupPlanName: aws.String(d.Get("name").(string)),
}

rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List())

plan.Rules = rules

input := &backup.CreateBackupPlanInput{
BackupPlan: plan,
}

if v, ok := d.GetOk("tags"); ok {
input.BackupPlanTags = tagsFromMapGeneric(v.(map[string]interface{}))
}

resp, err := conn.CreateBackupPlan(input)
if err != nil {
return fmt.Errorf("error creating Backup Plan: %s", err)
}

d.SetId(*resp.BackupPlanId)

return resourceAwsBackupPlanRead(d, meta)
}

func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

input := &backup.GetBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
}

resp, err := conn.GetBackupPlan(input)
if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") {
log.Printf("[WARN] Backup Plan (%s) not found, removing from state", d.Id())
d.SetId("")
return nil
}

if err != nil {
slapula marked this conversation as resolved.
Show resolved Hide resolved
return fmt.Errorf("error reading Backup Plan: %s", err)
}

rule := &schema.Set{F: resourceAwsPlanRuleHash}

for _, r := range resp.BackupPlan.Rules {
m := make(map[string]interface{})

m["completion_window"] = aws.Int64Value(r.CompletionWindowMinutes)
m["recovery_point_tags"] = aws.StringValueMap(r.RecoveryPointTags)
m["rule_name"] = aws.StringValue(r.RuleName)
m["schedule"] = aws.StringValue(r.ScheduleExpression)
m["start_window"] = aws.Int64Value(r.StartWindowMinutes)
m["target_vault_name"] = aws.StringValue(r.TargetBackupVaultName)

if r.Lifecycle != nil {
l := make(map[string]interface{})
l["delete_after"] = aws.Int64Value(r.Lifecycle.DeleteAfterDays)
l["cold_storage_after"] = aws.Int64Value(r.Lifecycle.MoveToColdStorageAfterDays)
m["lifecycle"] = []interface{}{l}
}

rule.Add(m)
}
if err := d.Set("rule", rule); err != nil {
return fmt.Errorf("error setting rule: %s", err)
}

tagsOutput, err := conn.ListTags(&backup.ListTagsInput{
ResourceArn: resp.BackupPlanArn,
})
if err != nil {
return fmt.Errorf("error listing tags AWS Backup plan %s: %s", d.Id(), err)
}

if err := d.Set("tags", tagsToMapGeneric(tagsOutput.Tags)); err != nil {
return fmt.Errorf("error setting tags on AWS Backup plan %s: %s", d.Id(), err)
}

d.Set("arn", resp.BackupPlanArn)
d.Set("version", resp.VersionId)

return nil
}

func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

plan := &backup.PlanInput{
BackupPlanName: aws.String(d.Get("name").(string)),
}

rules := expandBackupPlanRules(d.Get("rule").(*schema.Set).List())

plan.Rules = rules

input := &backup.UpdateBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
BackupPlan: plan,
}

_, err := conn.UpdateBackupPlan(input)
if err != nil {
return fmt.Errorf("error updating Backup Plan: %s", err)
}

if d.HasChange("tags") {
resourceArn := d.Get("arn").(string)
oraw, nraw := d.GetChange("tags")
create, remove := diffTagsGeneric(oraw.(map[string]interface{}), nraw.(map[string]interface{}))

if len(remove) > 0 {
log.Printf("[DEBUG] Removing tags: %#v", remove)
keys := make([]*string, 0, len(remove))
for k := range remove {
keys = append(keys, aws.String(k))
}

_, err := conn.UntagResource(&backup.UntagResourceInput{
ResourceArn: aws.String(resourceArn),
TagKeyList: keys,
})
if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") {
log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id())
d.SetId("")
return nil
}
if err != nil {
return fmt.Errorf("Error removing tags for (%s): %s", d.Id(), err)
}
}
if len(create) > 0 {
log.Printf("[DEBUG] Creating tags: %#v", create)
_, err := conn.TagResource(&backup.TagResourceInput{
ResourceArn: aws.String(resourceArn),
Tags: create,
})
if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") {
log.Printf("[WARN] Backup Plan %s not found, removing from state", d.Id())
d.SetId("")
return nil
}
if err != nil {
return fmt.Errorf("Error setting tags for (%s): %s", d.Id(), err)
}
}
}

return resourceAwsBackupPlanRead(d, meta)
}

func resourceAwsBackupPlanDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

input := &backup.DeleteBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
}

_, err := conn.DeleteBackupPlan(input)
if isAWSErr(err, backup.ErrCodeResourceNotFoundException, "") {
return nil
}

if err != nil {
slapula marked this conversation as resolved.
Show resolved Hide resolved
return fmt.Errorf("error deleting Backup Plan: %s", err)
}

return nil
}

func expandBackupPlanRules(l []interface{}) []*backup.RuleInput {
rules := []*backup.RuleInput{}

for _, i := range l {
item := i.(map[string]interface{})
rule := &backup.RuleInput{}

if item["rule_name"] != "" {
rule.RuleName = aws.String(item["rule_name"].(string))
}
if item["target_vault_name"] != "" {
rule.TargetBackupVaultName = aws.String(item["target_vault_name"].(string))
}
if item["schedule"] != "" {
rule.ScheduleExpression = aws.String(item["schedule"].(string))
}
if item["start_window"] != nil {
rule.StartWindowMinutes = aws.Int64(int64(item["start_window"].(int)))
}
if item["completion_window"] != nil {
rule.CompletionWindowMinutes = aws.Int64(int64(item["completion_window"].(int)))
}

if item["recovery_point_tags"] != nil {
rule.RecoveryPointTags = tagsFromMapGeneric(item["recovery_point_tags"].(map[string]interface{}))
}

var lifecycle map[string]interface{}
if i.(map[string]interface{})["lifecycle"] != nil {
lifecycleRaw := i.(map[string]interface{})["lifecycle"].([]interface{})
if len(lifecycleRaw) == 1 {
lifecycle = lifecycleRaw[0].(map[string]interface{})
lcValues := &backup.Lifecycle{}
if lifecycle["delete_after"] != nil {
lcValues.DeleteAfterDays = aws.Int64(int64(lifecycle["delete_after"].(int)))
}
if lifecycle["cold_storage_after"] != nil {
lcValues.MoveToColdStorageAfterDays = aws.Int64(int64(lifecycle["cold_storage_after"].(int)))
}
rule.Lifecycle = lcValues
}

}

rules = append(rules, rule)
}

return rules
}

func resourceAwsPlanRuleHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})

if v.(map[string]interface{})["lifecycle"] != nil {
lcRaw := v.(map[string]interface{})["lifecycle"].([]interface{})
if len(lcRaw) == 1 {
l := lcRaw[0].(map[string]interface{})
if w, ok := l["delete_after"]; ok {
buf.WriteString(fmt.Sprintf("%v-", w))
}

if w, ok := l["cold_storage_after"]; ok {
buf.WriteString(fmt.Sprintf("%v-", w))
}
}
}

if v, ok := m["completion_window"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(interface{})))
}

if v, ok := m["recovery_point_tags"]; ok {
buf.WriteString(fmt.Sprintf("%v-", v))
}

if v, ok := m["rule_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

if v, ok := m["schedule"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

if v, ok := m["start_window"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(interface{})))
}

if v, ok := m["target_vault_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

return hashcode.String(buf.String())
}
Loading