Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

r/aws_backup_plan: Adding resource for managing AWS Backup plans #7350

Merged
merged 17 commits into from
Mar 18, 2019
Merged
Show file tree
Hide file tree
Changes from 13 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions aws/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -323,6 +323,7 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
"aws_autoscaling_schedule": resourceAwsAutoscalingSchedule(),
"aws_backup_plan": resourceAwsBackupPlan(),
"aws_backup_vault": resourceAwsBackupVault(),
"aws_budgets_budget": resourceAwsBudgetsBudget(),
"aws_cloud9_environment_ec2": resourceAwsCloud9EnvironmentEc2(),
Expand Down
311 changes: 311 additions & 0 deletions aws/resource_aws_backup_plan.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,311 @@
package aws

import (
"bytes"
"fmt"

"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/backup"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/schema"
)

func resourceAwsBackupPlan() *schema.Resource {
return &schema.Resource{
Create: resourceAwsBackupPlanCreate,
Read: resourceAwsBackupPlanRead,
Update: resourceAwsBackupPlanUpdate,
Delete: resourceAwsBackupPlanDelete,

Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"rule": {
Type: schema.TypeSet,
Required: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"rule_name": {
Type: schema.TypeString,
Required: true,
},
"target_vault_name": {
Type: schema.TypeString,
Required: true,
},
"schedule": {
Type: schema.TypeString,
Optional: true,
},
"start_window": {
Type: schema.TypeInt,
Optional: true,
Default: 60,
},
"completion_window": {
Type: schema.TypeInt,
Optional: true,
Default: 180,
},
"lifecycle": {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can you please add an acceptance test that exercises this configuration block? Thanks! I think line 156 should be a something like m["lifecycle"] = []interface{}{l}

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

One question that's been nagging me as I've been working on these resources... How do I test for attributes in a list where the identifier appears to be a random number instead of an ordered number? For example:

  rule.# = 1
  rule.160206105.completion_window = 180
  rule.160206105.lifecycle.# = 1
  rule.160206105.lifecycle.0.cold_storage_after = 30
  rule.160206105.lifecycle.0.delete_after = 160
  rule.160206105.recovery_point_tags.% = 0
  rule.160206105.rule_name = tf_acc_test_backup_rule_7563947093289689272
  rule.160206105.schedule = cron(0 12 * * ? *)
  rule.160206105.start_window = 60
  rule.160206105.target_vault_name = tf_acc_test_backup_vault_7563947093289689272

How do I test the attributes of rule when 160206105 changes with each test run?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Unfortunately in these cases, the randomization gets complicated for the acceptance testing. You'll probably want to make the rule names static (unless the randomization there actually matters). While it is technically possible you could try to calculate the hash of the TypeSet, once you're in this situation it is generally easier to take the resultant API object returned by the Exists TestCheckFunc and check the values within it directly, e.g.

func testAccCheckBackupPlanRules(plan *backup.GetBackupPlanOutput, expectedRules []*backup.Rule) resource.TestCheckFunc {
	return func(s *terraform.State) error {
		if a, e := len(plan.Plan.Rules), len(expectedRules); a != e {
			return fmt.Errorf("expected %d Backup Plan Rules, got: %d", e, a)
		}

		for _, expectedRule := range expectedRules {
			for _, rule := range plan.Plan.Rules {
				if aws.StringValue(rule.RuleName) != aws.StringValue(expectedRule.RuleName) {
					continue
				}

				ruleName := aws.StringValue(rule.RuleName)

				if a, e := aws.StringValue(rule.Schedule), aws.StringValue(expectedRule.Schedule); a != e {
					return fmt.Errorf("expected Backup Plan Rule (%s) schedule to match (%s), got: %s", ruleName, e, a)
				}

				// Check other various attributes as appropriate

				break
			}
		}

		return nil
	}
}

// In a TestStep, filling in rule details as necessary
testAccCheckAwsBackupPlanExists("aws_backup_plan.test", &plan),
testAccCheckBackupPlanRules(&plan, []*backup.Rule{
  {
    RuleName: aws.String("test"),
    Schedule: aws.String("cron(0 12 * * ? *)"),
  },
})

As an added assurance to the above, acceptance tests that implement TestSteps with ImportStateVerify: true will automatically ensure that the resource read function is correctly able to re-read the API object and generate the same Terraform state.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ah ok, that makes sense. There really isn't a requirement for the plan to be unique other than to help me avoid running into dangling resources while testing. For this test, I'm going to opt to not make it use the random int so I can reliably test for the existence of the lifecycle.

Type: schema.TypeMap,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The Terraform Provider SDK does not currently support TypeMap configuration blocks. Did you mean TypeList and MaxItems: 1?

Suggested change
Type: schema.TypeMap,
Type: schema.TypeList,
MaxItems: 1,

Copy link
Contributor Author

@slapula slapula Mar 16, 2019

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is this something that has been depreciated? I seem to see TypeMap used elsewhere in this provider.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

A TypeMap attribute can only currently[1] be used with a single primative type element (TypeString, TypeInt, or TypeFloat). Any Elem usage outside of that with TypeMap is an implementation bug and/or a quirk that just happens to work in the current Terraform Provider SDK and the usage may not be supported in the future.

In Terraform 0.12 core, we can start supporting a TypeMap configuration block syntax, such as:

my_configuration_block "key1" {
  child_argument = ""
}

However its implementation is still in its design phases and is not present in the Terraform Provider SDK. See also: https://github.com/hashicorp/terraform/issues/19749#issuecomment-450256714

[1]: The underlying type system in Terraform 0.12 removes this restriction. When the Terraform Provider SDK supports dynamic elements in a future update and we upgrade to that version of the Terraform Provider SDK (requires removing Terraform 0.11 support in a later major version of the Terraform AWS Provider) we can support multiple value types in TypeMap.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Oh wow, TIL. Thanks for the explanation! 😁

Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"cold_storage_after": {
Type: schema.TypeInt,
Optional: true,
},
"delete_after": {
Type: schema.TypeInt,
Optional: true,
},
},
},
},
"recovery_point_tags": {
Type: schema.TypeMap,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
},
},
Set: resourceAwsPlanRuleHash,
},
"tags": {
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"arn": {
Type: schema.TypeString,
Computed: true,
},
"version": {
Type: schema.TypeString,
Computed: true,
},
},
}
}

func resourceAwsBackupPlanCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

plan := &backup.PlanInput{
BackupPlanName: aws.String(d.Get("name").(string)),
}

rules := gatherPlanRules(d)

plan.Rules = rules

input := &backup.CreateBackupPlanInput{
BackupPlan: plan,
}

if v, ok := d.GetOk("tags"); ok {
input.BackupPlanTags = v.(map[string]*string)
}

resp, err := conn.CreateBackupPlan(input)
if err != nil {
return err
slapula marked this conversation as resolved.
Show resolved Hide resolved
}

d.SetId(*resp.BackupPlanId)

return resourceAwsBackupPlanRead(d, meta)
}

func resourceAwsBackupPlanRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

input := &backup.GetBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
}

resp, err := conn.GetBackupPlan(input)
if err != nil {
slapula marked this conversation as resolved.
Show resolved Hide resolved
return err
slapula marked this conversation as resolved.
Show resolved Hide resolved
}

rule := &schema.Set{F: resourceAwsPlanRuleHash}

for _, r := range resp.BackupPlan.Rules {
m := make(map[string]interface{})

if r.CompletionWindowMinutes != nil {
slapula marked this conversation as resolved.
Show resolved Hide resolved
m["completion_window"] = *r.CompletionWindowMinutes
}
if r.Lifecycle != nil {
l := map[string]int64{}
if r.Lifecycle.DeleteAfterDays != nil {
l["delete_after"] = *r.Lifecycle.DeleteAfterDays
}
if r.Lifecycle.MoveToColdStorageAfterDays != nil {
l["cold_storage_after"] = *r.Lifecycle.MoveToColdStorageAfterDays
}
m["lifecycle"] = l
}
if r.RecoveryPointTags != nil {
m["recovery_point_tags"] = r.RecoveryPointTags
}
m["rule_name"] = *r.RuleName
if r.ScheduleExpression != nil {
m["schedule"] = *r.ScheduleExpression
}
if r.StartWindowMinutes != nil {
m["start_window"] = *r.StartWindowMinutes
}
m["target_vault_name"] = *r.TargetBackupVaultName

rule.Add(m)
}
d.Set("rule", rule)
slapula marked this conversation as resolved.
Show resolved Hide resolved

d.Set("arn", resp.BackupPlanArn)
d.Set("version", resp.VersionId)

return nil
}

func resourceAwsBackupPlanUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

plan := &backup.PlanInput{
BackupPlanName: aws.String(d.Get("name").(string)),
}

rules := gatherPlanRules(d)

plan.Rules = rules

input := &backup.UpdateBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
BackupPlan: plan,
}

_, err := conn.UpdateBackupPlan(input)
if err != nil {
return err
slapula marked this conversation as resolved.
Show resolved Hide resolved
}

return resourceAwsBackupPlanRead(d, meta)
}

func resourceAwsBackupPlanDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).backupconn

input := &backup.DeleteBackupPlanInput{
BackupPlanId: aws.String(d.Id()),
}

_, err := conn.DeleteBackupPlan(input)
if err != nil {
slapula marked this conversation as resolved.
Show resolved Hide resolved
return err
slapula marked this conversation as resolved.
Show resolved Hide resolved
}

return nil
}

func gatherPlanRules(d *schema.ResourceData) []*backup.RuleInput {
slapula marked this conversation as resolved.
Show resolved Hide resolved
rules := []*backup.RuleInput{}
planRules := d.Get("rule").(*schema.Set).List()

for _, i := range planRules {
item := i.(map[string]interface{})
lifecycle := i.(map[string]interface{})["lifecycle"].(map[string]interface{})
slapula marked this conversation as resolved.
Show resolved Hide resolved
rule := &backup.RuleInput{}

if item["rule_name"] != "" {
rule.RuleName = aws.String(item["rule_name"].(string))
}
if item["target_vault_name"] != "" {
rule.TargetBackupVaultName = aws.String(item["target_vault_name"].(string))
}
if item["schedule"] != "" {
rule.ScheduleExpression = aws.String(item["schedule"].(string))
}
if item["start_window"] != nil {
rule.StartWindowMinutes = aws.Int64(int64(item["start_window"].(int)))
}
if item["completion_window"] != nil {
rule.CompletionWindowMinutes = aws.Int64(int64(item["completion_window"].(int)))
}
if lifecycle["delete_after"] != nil {
rule.Lifecycle.DeleteAfterDays = aws.Int64(int64(lifecycle["delete_after"].(int)))
}
if lifecycle["cold_storage_after"] != nil {
rule.Lifecycle.MoveToColdStorageAfterDays = aws.Int64(int64(lifecycle["cold_storage_after"].(int)))
}
if item["recovery_point_tags"] != nil {
tagsUnwrapped := make(map[string]*string)
for key, value := range item["recovery_point_tags"].(map[string]interface{}) {
tagsUnwrapped[key] = aws.String(value.(string))
}
rule.RecoveryPointTags = tagsUnwrapped
}

rules = append(rules, rule)
}

return rules
}

func resourceAwsPlanRuleHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})

if v.(map[string]interface{})["lifecycle"] != nil {
l := v.(map[string]interface{})["lifecycle"].(map[string]interface{})
if w, ok := l["delete_after"]; ok {
buf.WriteString(fmt.Sprintf("%d-", w.(int)))
}

if w, ok := l["cold_storage_after"]; ok {
buf.WriteString(fmt.Sprintf("%d-", w.(int)))
}
}

if v, ok := m["completion_window"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(interface{})))
}

if v, ok := m["recovery_point_tags"]; ok {
switch t := v.(type) {
case map[string]*string:
buf.WriteString(fmt.Sprintf("%v-", v.(map[string]*string)))
case map[string]interface{}:
tagsUnwrapped := make(map[string]*string)
for key, value := range m["recovery_point_tags"].(map[string]interface{}) {
tagsUnwrapped[key] = aws.String(value.(string))
}
buf.WriteString(fmt.Sprintf("%v-", tagsUnwrapped))
default:
fmt.Println("invalid type: ", t)
}
}

if v, ok := m["rule_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

if v, ok := m["schedule"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

if v, ok := m["start_window"]; ok {
buf.WriteString(fmt.Sprintf("%d-", v.(interface{})))
}

if v, ok := m["target_vault_name"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}

return hashcode.String(buf.String())
}
Loading