From 4323b491304540e721d026dbb5627154d4865cf9 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 19 Jan 2021 15:30:34 -0800 Subject: [PATCH 01/14] enable Packer fmt to read from stdin --- command/fmt.go | 7 +++-- hcl2template/formatter.go | 60 ++++++++++++++++++++++++++------------- 2 files changed, 45 insertions(+), 22 deletions(-) diff --git a/command/fmt.go b/command/fmt.go index 822d5b3a152..5b15ed772db 100644 --- a/command/fmt.go +++ b/command/fmt.go @@ -75,8 +75,11 @@ Usage: packer fmt [options] [TEMPLATE] configuration files (.pkr.hcl) and variable files (.pkrvars.hcl) are updated. JSON files (.json) are not modified. - If TEMPATE is "." the current directory will be used. The given content must - be in Packer's HCL2 configuration language; JSON is not supported. + If TEMPATE is "." the current directory will be used. + If TEMPATE is "-" then content will be read from STDIN. + + The given content must be in Packer's HCL2 configuration language; JSON is + not supported. Options: -check Check if the input is formatted. Exit status will be 0 if all diff --git a/hcl2template/formatter.go b/hcl2template/formatter.go index e6b1c66340c..e83974a154d 100644 --- a/hcl2template/formatter.go +++ b/hcl2template/formatter.go @@ -31,25 +31,33 @@ func NewHCL2Formatter() *HCL2Formatter { // // Path can be a directory or a file. func (f *HCL2Formatter) Format(path string) (int, hcl.Diagnostics) { - hclFiles, _, diags := GetHCL2Files(path, hcl2FileExt, hcl2JsonFileExt) - if diags.HasErrors() { - return 0, diags - } - hclVarFiles, _, diags := GetHCL2Files(path, hcl2VarFileExt, hcl2VarJsonFileExt) - if diags.HasErrors() { - return 0, diags - } + var allHclFiles []string + var diags []*hcl.Diagnostic + + if path == "-" { + allHclFiles = []string{"-"} + } else { + hclFiles, _, diags := GetHCL2Files(path, hcl2FileExt, hcl2JsonFileExt) + if diags.HasErrors() { + return 0, diags + } + + hclVarFiles, _, diags := GetHCL2Files(path, hcl2VarFileExt, hcl2VarJsonFileExt) + if diags.HasErrors() { + return 0, diags + } - allHclFiles := append(hclFiles, hclVarFiles...) + allHclFiles = append(hclFiles, hclVarFiles...) - if len(allHclFiles) == 0 { - diags = append(diags, &hcl.Diagnostic{ - Severity: hcl.DiagError, - Summary: fmt.Sprintf("Cannot tell whether %s contains HCL2 configuration data", path), - }) + if len(allHclFiles) == 0 { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: fmt.Sprintf("Cannot tell whether %s contains HCL2 configuration data", path), + }) - return 0, diags + return 0, diags + } } if f.parser == nil { @@ -80,9 +88,17 @@ func (f *HCL2Formatter) processFile(filename string) ([]byte, error) { f.Output = os.Stdout } - in, err := os.Open(filename) - if err != nil { - return nil, fmt.Errorf("failed to open %s: %s", filename, err) + var in io.Reader + var err error + + if filename == "-" { + in = os.Stdin + f.ShowDiff = false + } else { + in, err = os.Open(filename) + if err != nil { + return nil, fmt.Errorf("failed to open %s: %s", filename, err) + } } inSrc, err := ioutil.ReadAll(in) @@ -105,8 +121,12 @@ func (f *HCL2Formatter) processFile(filename string) ([]byte, error) { _, _ = f.Output.Write(s) if f.Write { - if err := ioutil.WriteFile(filename, outSrc, 0644); err != nil { - return nil, err + if filename == "-" { + f.Output.Write(outSrc) + } else { + if err := ioutil.WriteFile(filename, outSrc, 0644); err != nil { + return nil, err + } } } From 40df74e95afca30ba0aa8dbfa50cfd9fdad05d88 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 19 Jan 2021 15:34:18 -0800 Subject: [PATCH 02/14] linting --- hcl2template/formatter.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hcl2template/formatter.go b/hcl2template/formatter.go index e83974a154d..6a3db089b8c 100644 --- a/hcl2template/formatter.go +++ b/hcl2template/formatter.go @@ -122,7 +122,7 @@ func (f *HCL2Formatter) processFile(filename string) ([]byte, error) { if f.Write { if filename == "-" { - f.Output.Write(outSrc) + _, _ = f.Output.Write(outSrc) } else { if err := ioutil.WriteFile(filename, outSrc, 0644); err != nil { return nil, err From 2b0996daa6acff5e6416857bd934a2f6fbaebb01 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Wed, 20 Jan 2021 11:30:50 -0800 Subject: [PATCH 03/14] dont print filename if its reading from stdin --- hcl2template/formatter.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/hcl2template/formatter.go b/hcl2template/formatter.go index 6a3db089b8c..d02d421a459 100644 --- a/hcl2template/formatter.go +++ b/hcl2template/formatter.go @@ -117,8 +117,10 @@ func (f *HCL2Formatter) processFile(filename string) ([]byte, error) { return nil, nil } - s := []byte(fmt.Sprintf("%s\n", filename)) - _, _ = f.Output.Write(s) + if filename != "-" { + s := []byte(fmt.Sprintf("%s\n", filename)) + _, _ = f.Output.Write(s) + } if f.Write { if filename == "-" { From 23b50e5a7a3c2a6a65b371a63601182c8bbb2400 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Wed, 20 Jan 2021 16:21:07 -0500 Subject: [PATCH 04/14] Update ansible local links --- website/content/docs/provisioners/ansible-local.mdx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/website/content/docs/provisioners/ansible-local.mdx b/website/content/docs/provisioners/ansible-local.mdx index ffa29878312..c6b49e3c09c 100644 --- a/website/content/docs/provisioners/ansible-local.mdx +++ b/website/content/docs/provisioners/ansible-local.mdx @@ -18,7 +18,7 @@ mode on the remote/guest VM using Playbook and Role files that exist on the guest VM. This means ansible must be installed on the remote/guest VM. Playbooks and Roles can be uploaded from your build machine (the one running Packer) to the vm. Ansible is then run on the guest machine in [local -mode](https://docs.ansible.com/ansible/playbooks_delegation.html#local-playbooks) +mode](https://docs.ansible.com/ansible/latest/playbooks_delegation.html#local-playbooks) via the `ansible-playbook` command. -> **Note:** Ansible will _not_ be installed automatically by this @@ -187,7 +187,7 @@ Optional: - `galaxy_file` (string) - A requirements file which provides a way to install roles with the [ansible-galaxy - cli](http://docs.ansible.com/ansible/galaxy.html#the-ansible-galaxy-command-line-tool) + cli](http://docs.ansible.com/ansible/latest/galaxy.html#the-ansible-galaxy-command-line-tool) on the remote machine. By default, this is empty. - `galaxy_command` (string) - The command to invoke ansible-galaxy. By From d1ada744e17e6d1e223568653031e21385063c83 Mon Sep 17 00:00:00 2001 From: Sylvia Moss Date: Fri, 22 Jan 2021 14:49:45 +0100 Subject: [PATCH 05/14] Aws Secrets Manager data sources (#10505) --- cmd/packer-plugin-amazon/main.go | 2 + command/plugin.go | 4 +- datasource/amazon/ami/data.go | 19 +- datasource/amazon/secretsmanager/data.go | 168 ++++++++++++++++ .../amazon/secretsmanager/data.hcl2spec.go | 99 ++++++++++ .../amazon/secretsmanager/data_acc_test.go | 179 ++++++++++++++++++ datasource/amazon/secretsmanager/data_test.go | 39 ++++ go.mod | 2 +- go.sum | 8 + hcl2template/types.datasource.go | 7 + hcl2template/types.hcl_post-processor.go | 8 + hcl2template/types.hcl_provisioner.go | 8 + .../packer-plugin-sdk/acctest/datasources.go | 7 + .../aws/secretsmanager/secretsmanager.go | 18 +- .../packer-plugin-sdk/version/version.go | 4 +- vendor/modules.txt | 2 +- .../{amazon-ami.mdx => amazon/ami.mdx} | 7 +- .../content/docs/datasources/amazon/index.mdx | 42 ++++ .../datasources/amazon/secretsmanager.mdx | 51 +++++ .../ami/DatasourceOutput-not-required.mdx | 13 ++ .../secretsmanager/Config-not-required.mdx | 10 + .../amazon/secretsmanager/Config-required.mdx | 4 + .../DatasourceOutput-not-required.mdx | 12 ++ website/data/docs-navigation.js | 49 +++-- 24 files changed, 723 insertions(+), 39 deletions(-) create mode 100644 datasource/amazon/secretsmanager/data.go create mode 100644 datasource/amazon/secretsmanager/data.hcl2spec.go create mode 100644 datasource/amazon/secretsmanager/data_acc_test.go create mode 100644 datasource/amazon/secretsmanager/data_test.go rename website/content/docs/datasources/{amazon-ami.mdx => amazon/ami.mdx} (81%) create mode 100644 website/content/docs/datasources/amazon/index.mdx create mode 100644 website/content/docs/datasources/amazon/secretsmanager.mdx create mode 100644 website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx create mode 100644 website/content/partials/datasource/amazon/secretsmanager/Config-not-required.mdx create mode 100644 website/content/partials/datasource/amazon/secretsmanager/Config-required.mdx create mode 100644 website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx diff --git a/cmd/packer-plugin-amazon/main.go b/cmd/packer-plugin-amazon/main.go index 01d77d70ded..e05a799de9b 100644 --- a/cmd/packer-plugin-amazon/main.go +++ b/cmd/packer-plugin-amazon/main.go @@ -10,6 +10,7 @@ import ( "github.com/hashicorp/packer/builder/amazon/ebsvolume" "github.com/hashicorp/packer/builder/osc/chroot" amazonami "github.com/hashicorp/packer/datasource/amazon/ami" + "github.com/hashicorp/packer/datasource/amazon/secretsmanager" amazonimport "github.com/hashicorp/packer/post-processor/amazon-import" ) @@ -21,6 +22,7 @@ func main() { pps.RegisterBuilder("ebsvolume", new(ebsvolume.Builder)) pps.RegisterPostProcessor("import", new(amazonimport.PostProcessor)) pps.RegisterDatasource("ami", new(amazonami.Datasource)) + pps.RegisterDatasource("secretsmanager", new(secretsmanager.Datasource)) err := pps.Run() if err != nil { fmt.Fprintln(os.Stderr, err.Error()) diff --git a/command/plugin.go b/command/plugin.go index cfc50570015..b5be6a4fa97 100644 --- a/command/plugin.go +++ b/command/plugin.go @@ -66,6 +66,7 @@ import ( vsphereisobuilder "github.com/hashicorp/packer/builder/vsphere/iso" yandexbuilder "github.com/hashicorp/packer/builder/yandex" amazonamidatasource "github.com/hashicorp/packer/datasource/amazon/ami" + amazonsecretsmanagerdatasource "github.com/hashicorp/packer/datasource/amazon/secretsmanager" alicloudimportpostprocessor "github.com/hashicorp/packer/post-processor/alicloud-import" amazonimportpostprocessor "github.com/hashicorp/packer/post-processor/amazon-import" artificepostprocessor "github.com/hashicorp/packer/post-processor/artifice" @@ -214,7 +215,8 @@ var PostProcessors = map[string]packersdk.PostProcessor{ } var Datasources = map[string]packersdk.Datasource{ - "amazon-ami": new(amazonamidatasource.Datasource), + "amazon-ami": new(amazonamidatasource.Datasource), + "amazon-secretsmanager": new(amazonsecretsmanagerdatasource.Datasource), } var pluginRegexp = regexp.MustCompile("packer-(builder|post-processor|provisioner|datasource)-(.+)") diff --git a/datasource/amazon/ami/data.go b/datasource/amazon/ami/data.go index 77a10e1f5ab..11f7563b439 100644 --- a/datasource/amazon/ami/data.go +++ b/datasource/amazon/ami/data.go @@ -1,3 +1,4 @@ +//go:generate struct-markdown //go:generate mapstructure-to-hcl2 -type DatasourceOutput,Config package ami @@ -50,12 +51,18 @@ func (d *Datasource) Configure(raws ...interface{}) error { } type DatasourceOutput struct { - ID string `mapstructure:"id"` - Name string `mapstructure:"name"` - CreationDate string `mapstructure:"creation_date"` - Owner string `mapstructure:"owner"` - OwnerName string `mapstructure:"owner_name"` - Tags map[string]string `mapstructure:"tags"` + // The ID of the AMI. + ID string `mapstructure:"id"` + // The name of the AMI. + Name string `mapstructure:"name"` + // The date of creation of the AMI. + CreationDate string `mapstructure:"creation_date"` + // The AWS account ID of the owner. + Owner string `mapstructure:"owner"` + // The owner alias. + OwnerName string `mapstructure:"owner_name"` + // The key/value combination of the tags assigned to the AMI. + Tags map[string]string `mapstructure:"tags"` } func (d *Datasource) OutputSpec() hcldec.ObjectSpec { diff --git a/datasource/amazon/secretsmanager/data.go b/datasource/amazon/secretsmanager/data.go new file mode 100644 index 00000000000..e28ff4b8f41 --- /dev/null +++ b/datasource/amazon/secretsmanager/data.go @@ -0,0 +1,168 @@ +//go:generate struct-markdown +//go:generate mapstructure-to-hcl2 -type DatasourceOutput,Config +package secretsmanager + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer-plugin-sdk/hcl2helper" + packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + "github.com/hashicorp/packer-plugin-sdk/template/config" + awscommon "github.com/hashicorp/packer/builder/amazon/common" + "github.com/hashicorp/packer/builder/amazon/common/awserrors" + "github.com/zclconf/go-cty/cty" +) + +type Datasource struct { + config Config +} + +type Config struct { + // Specifies the secret containing the version that you want to retrieve. + // You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. + Name string `mapstructure:"name" required:"true"` + // Optional key for JSON secrets that contain more than one value. When set, the `value` output will + // contain the value for the provided key. + Key string `mapstructure:"key"` + // Specifies the unique identifier of the version of the secret that you want to retrieve. + // Overrides version_stage. + VersionId string `mapstructure:"version_id"` + // Specifies the secret version that you want to retrieve by the staging label attached to the version. + // Defaults to AWSCURRENT. + VersionStage string `mapstructure:"version_stage"` + awscommon.AccessConfig `mapstructure:",squash"` +} + +func (d *Datasource) ConfigSpec() hcldec.ObjectSpec { + return d.config.FlatMapstructure().HCL2Spec() +} + +func (d *Datasource) Configure(raws ...interface{}) error { + err := config.Decode(&d.config, nil, raws...) + if err != nil { + return err + } + + var errs *packersdk.MultiError + errs = packersdk.MultiErrorAppend(errs, d.config.AccessConfig.Prepare()...) + + if d.config.Name == "" { + errs = packersdk.MultiErrorAppend(errs, fmt.Errorf("a 'name' must be provided")) + } + + if d.config.VersionStage == "" { + d.config.VersionStage = "AWSCURRENT" + } + + if errs != nil && len(errs.Errors) > 0 { + return errs + } + return nil +} + +type DatasourceOutput struct { + // When a [key](#key) is provided, this will be the value for that key. If a key is not provided, + // `value` will contain the first value found in the secret string. + Value string `mapstructure:"value"` + // The decrypted part of the protected secret information that + // was originally provided as a string. + SecretString string `mapstructure:"secret_string"` + // The decrypted part of the protected secret information that + // was originally provided as a binary. Base64 encoded. + SecretBinary string `mapstructure:"secret_binary"` + // The unique identifier of this version of the secret. + VersionId string `mapstructure:"version_id"` +} + +func (d *Datasource) OutputSpec() hcldec.ObjectSpec { + return (&DatasourceOutput{}).FlatMapstructure().HCL2Spec() +} + +func (d *Datasource) Execute() (cty.Value, error) { + session, err := d.config.Session() + if err != nil { + return cty.NullVal(cty.EmptyObject), err + } + + input := &secretsmanager.GetSecretValueInput{ + SecretId: aws.String(d.config.Name), + } + + version := "" + if d.config.VersionId != "" { + input.VersionId = aws.String(d.config.VersionId) + version = d.config.VersionId + } else { + input.VersionStage = aws.String(d.config.VersionStage) + version = d.config.VersionStage + } + + secretsApi := secretsmanager.New(session) + secret, err := secretsApi.GetSecretValue(input) + if err != nil { + if awserrors.Matches(err, secretsmanager.ErrCodeResourceNotFoundException, "") { + return cty.NullVal(cty.EmptyObject), fmt.Errorf("Secrets Manager Secret %q Version %q not found", d.config.Name, version) + } + if awserrors.Matches(err, secretsmanager.ErrCodeInvalidRequestException, "You can’t perform this operation on the secret because it was deleted") { + return cty.NullVal(cty.EmptyObject), fmt.Errorf("Secrets Manager Secret %q Version %q not found", d.config.Name, version) + } + return cty.NullVal(cty.EmptyObject), fmt.Errorf("error reading Secrets Manager Secret Version: %s", err) + } + + value, err := getSecretValue(aws.StringValue(secret.SecretString), d.config.Key) + if err != nil { + return cty.NullVal(cty.EmptyObject), fmt.Errorf("error to get secret value: %q", err.Error()) + } + + versionId := aws.StringValue(secret.VersionId) + output := DatasourceOutput{ + Value: value, + SecretString: aws.StringValue(secret.SecretString), + SecretBinary: fmt.Sprintf("%s", secret.SecretBinary), + VersionId: versionId, + } + return hcl2helper.HCL2ValueFromConfig(output, d.OutputSpec()), nil +} + +func getSecretValue(secretString string, key string) (string, error) { + var secretValue map[string]interface{} + blob := []byte(secretString) + + //For those plaintext secrets just return the value + if json.Valid(blob) != true { + return secretString, nil + } + + err := json.Unmarshal(blob, &secretValue) + if err != nil { + return "", err + } + + if key == "" { + for _, v := range secretValue { + return getStringSecretValue(v) + } + } + + if v, ok := secretValue[key]; ok { + return getStringSecretValue(v) + } + + return "", nil +} + +func getStringSecretValue(v interface{}) (string, error) { + switch valueType := v.(type) { + case string: + return valueType, nil + case float64: + return strconv.FormatFloat(valueType, 'f', 0, 64), nil + default: + return "", fmt.Errorf("Unsupported secret value type: %T", valueType) + } +} diff --git a/datasource/amazon/secretsmanager/data.hcl2spec.go b/datasource/amazon/secretsmanager/data.hcl2spec.go new file mode 100644 index 00000000000..6100a97bb17 --- /dev/null +++ b/datasource/amazon/secretsmanager/data.hcl2spec.go @@ -0,0 +1,99 @@ +// Code generated by "mapstructure-to-hcl2 -type DatasourceOutput,Config"; DO NOT EDIT. + +package secretsmanager + +import ( + "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer/builder/amazon/common" + "github.com/zclconf/go-cty/cty" +) + +// FlatConfig is an auto-generated flat version of Config. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatConfig struct { + Name *string `mapstructure:"name" required:"true" cty:"name" hcl:"name"` + Key *string `mapstructure:"key" cty:"key" hcl:"key"` + VersionId *string `mapstructure:"version_id" cty:"version_id" hcl:"version_id"` + VersionStage *string `mapstructure:"version_stage" cty:"version_stage" hcl:"version_stage"` + AccessKey *string `mapstructure:"access_key" required:"true" cty:"access_key" hcl:"access_key"` + AssumeRole *common.FlatAssumeRoleConfig `mapstructure:"assume_role" required:"false" cty:"assume_role" hcl:"assume_role"` + CustomEndpointEc2 *string `mapstructure:"custom_endpoint_ec2" required:"false" cty:"custom_endpoint_ec2" hcl:"custom_endpoint_ec2"` + CredsFilename *string `mapstructure:"shared_credentials_file" required:"false" cty:"shared_credentials_file" hcl:"shared_credentials_file"` + DecodeAuthZMessages *bool `mapstructure:"decode_authorization_messages" required:"false" cty:"decode_authorization_messages" hcl:"decode_authorization_messages"` + InsecureSkipTLSVerify *bool `mapstructure:"insecure_skip_tls_verify" required:"false" cty:"insecure_skip_tls_verify" hcl:"insecure_skip_tls_verify"` + MaxRetries *int `mapstructure:"max_retries" required:"false" cty:"max_retries" hcl:"max_retries"` + MFACode *string `mapstructure:"mfa_code" required:"false" cty:"mfa_code" hcl:"mfa_code"` + ProfileName *string `mapstructure:"profile" required:"false" cty:"profile" hcl:"profile"` + RawRegion *string `mapstructure:"region" required:"true" cty:"region" hcl:"region"` + SecretKey *string `mapstructure:"secret_key" required:"true" cty:"secret_key" hcl:"secret_key"` + SkipMetadataApiCheck *bool `mapstructure:"skip_metadata_api_check" cty:"skip_metadata_api_check" hcl:"skip_metadata_api_check"` + SkipCredsValidation *bool `mapstructure:"skip_credential_validation" cty:"skip_credential_validation" hcl:"skip_credential_validation"` + Token *string `mapstructure:"token" required:"false" cty:"token" hcl:"token"` + VaultAWSEngine *common.FlatVaultAWSEngineOptions `mapstructure:"vault_aws_engine" required:"false" cty:"vault_aws_engine" hcl:"vault_aws_engine"` + PollingConfig *common.FlatAWSPollingConfig `mapstructure:"aws_polling" required:"false" cty:"aws_polling" hcl:"aws_polling"` +} + +// FlatMapstructure returns a new FlatConfig. +// FlatConfig is an auto-generated flat version of Config. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatConfig) +} + +// HCL2Spec returns the hcl spec of a Config. +// This spec is used by HCL to read the fields of Config. +// The decoded values from this spec will then be applied to a FlatConfig. +func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "name": &hcldec.AttrSpec{Name: "name", Type: cty.String, Required: false}, + "key": &hcldec.AttrSpec{Name: "key", Type: cty.String, Required: false}, + "version_id": &hcldec.AttrSpec{Name: "version_id", Type: cty.String, Required: false}, + "version_stage": &hcldec.AttrSpec{Name: "version_stage", Type: cty.String, Required: false}, + "access_key": &hcldec.AttrSpec{Name: "access_key", Type: cty.String, Required: false}, + "assume_role": &hcldec.BlockSpec{TypeName: "assume_role", Nested: hcldec.ObjectSpec((*common.FlatAssumeRoleConfig)(nil).HCL2Spec())}, + "custom_endpoint_ec2": &hcldec.AttrSpec{Name: "custom_endpoint_ec2", Type: cty.String, Required: false}, + "shared_credentials_file": &hcldec.AttrSpec{Name: "shared_credentials_file", Type: cty.String, Required: false}, + "decode_authorization_messages": &hcldec.AttrSpec{Name: "decode_authorization_messages", Type: cty.Bool, Required: false}, + "insecure_skip_tls_verify": &hcldec.AttrSpec{Name: "insecure_skip_tls_verify", Type: cty.Bool, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, + "mfa_code": &hcldec.AttrSpec{Name: "mfa_code", Type: cty.String, Required: false}, + "profile": &hcldec.AttrSpec{Name: "profile", Type: cty.String, Required: false}, + "region": &hcldec.AttrSpec{Name: "region", Type: cty.String, Required: false}, + "secret_key": &hcldec.AttrSpec{Name: "secret_key", Type: cty.String, Required: false}, + "skip_metadata_api_check": &hcldec.AttrSpec{Name: "skip_metadata_api_check", Type: cty.Bool, Required: false}, + "skip_credential_validation": &hcldec.AttrSpec{Name: "skip_credential_validation", Type: cty.Bool, Required: false}, + "token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false}, + "vault_aws_engine": &hcldec.BlockSpec{TypeName: "vault_aws_engine", Nested: hcldec.ObjectSpec((*common.FlatVaultAWSEngineOptions)(nil).HCL2Spec())}, + "aws_polling": &hcldec.BlockSpec{TypeName: "aws_polling", Nested: hcldec.ObjectSpec((*common.FlatAWSPollingConfig)(nil).HCL2Spec())}, + } + return s +} + +// FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. +// Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. +type FlatDatasourceOutput struct { + Value *string `mapstructure:"value" cty:"value" hcl:"value"` + SecretString *string `mapstructure:"secret_string" cty:"secret_string" hcl:"secret_string"` + SecretBinary *string `mapstructure:"secret_binary" cty:"secret_binary" hcl:"secret_binary"` + VersionId *string `mapstructure:"version_id" cty:"version_id" hcl:"version_id"` +} + +// FlatMapstructure returns a new FlatDatasourceOutput. +// FlatDatasourceOutput is an auto-generated flat version of DatasourceOutput. +// Where the contents a fields with a `mapstructure:,squash` tag are bubbled up. +func (*DatasourceOutput) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } { + return new(FlatDatasourceOutput) +} + +// HCL2Spec returns the hcl spec of a DatasourceOutput. +// This spec is used by HCL to read the fields of DatasourceOutput. +// The decoded values from this spec will then be applied to a FlatDatasourceOutput. +func (*FlatDatasourceOutput) HCL2Spec() map[string]hcldec.Spec { + s := map[string]hcldec.Spec{ + "value": &hcldec.AttrSpec{Name: "value", Type: cty.String, Required: false}, + "secret_string": &hcldec.AttrSpec{Name: "secret_string", Type: cty.String, Required: false}, + "secret_binary": &hcldec.AttrSpec{Name: "secret_binary", Type: cty.String, Required: false}, + "version_id": &hcldec.AttrSpec{Name: "version_id", Type: cty.String, Required: false}, + } + return s +} diff --git a/datasource/amazon/secretsmanager/data_acc_test.go b/datasource/amazon/secretsmanager/data_acc_test.go new file mode 100644 index 00000000000..2aeec663b21 --- /dev/null +++ b/datasource/amazon/secretsmanager/data_acc_test.go @@ -0,0 +1,179 @@ +package secretsmanager + +import ( + "context" + "fmt" + "io/ioutil" + "os" + "os/exec" + "regexp" + "testing" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/service/secretsmanager" + "github.com/hashicorp/packer-plugin-sdk/acctest" + "github.com/hashicorp/packer-plugin-sdk/retry" + awscommon "github.com/hashicorp/packer/builder/amazon/common" + "github.com/hashicorp/packer/builder/amazon/common/awserrors" +) + +func TestAmazonSecretsManager(t *testing.T) { + secret := &AmazonSecret{ + Name: "packer_datasource_secretsmanager_test_secret", + Key: "packer_test_key", + Value: "this_is_the_packer_test_secret_value", + Description: "this is a secret used in a packer acc test", + } + + testCase := &acctest.DatasourceTestCase{ + Name: "amazon_secretsmanager_datasource_basic_test", + Setup: func() error { + return secret.Create() + }, + Teardown: func() error { + return secret.Delete() + }, + Template: testDatasourceBasic, + Type: "amazon-secrestmanager", + Check: func(buildCommand *exec.Cmd, logfile string) error { + if buildCommand.ProcessState != nil { + if buildCommand.ProcessState.ExitCode() != 0 { + return fmt.Errorf("Bad exit code. Logfile: %s", logfile) + } + } + + logs, err := os.Open(logfile) + if err != nil { + return fmt.Errorf("Unable find %s", logfile) + } + defer logs.Close() + + logsBytes, err := ioutil.ReadAll(logs) + if err != nil { + return fmt.Errorf("Unable to read %s", logfile) + } + logsString := string(logsBytes) + + valueLog := fmt.Sprintf("null.basic-example: secret value: %s", secret.Value) + secretStringLog := fmt.Sprintf("null.basic-example: secret secret_string: %s", fmt.Sprintf("{%s:%s}", secret.Key, secret.Value)) + versionIdLog := fmt.Sprintf("null.basic-example: secret version_id: %s", aws.StringValue(secret.Info.VersionId)) + secretValueLog := fmt.Sprintf("null.basic-example: secret value: %s", secret.Value) + + if matched, _ := regexp.MatchString(valueLog+".*", logsString); !matched { + t.Fatalf("logs doesn't contain expected arn %q", logsString) + } + if matched, _ := regexp.MatchString(secretStringLog+".*", logsString); !matched { + t.Fatalf("logs doesn't contain expected secret_string %q", logsString) + } + if matched, _ := regexp.MatchString(versionIdLog+".*", logsString); !matched { + t.Fatalf("logs doesn't contain expected version_id %q", logsString) + } + if matched, _ := regexp.MatchString(secretValueLog+".*", logsString); !matched { + t.Fatalf("logs doesn't contain expected value %q", logsString) + } + return nil + }, + } + acctest.TestDatasource(t, testCase) +} + +const testDatasourceBasic = ` +data "amazon-secretsmanager" "test" { + name = "packer_datasource_secretsmanager_test_secret" + key = "packer_test_key" +} + +locals { + value = data.amazon-secretsmanager.test.value + secret_string = data.amazon-secretsmanager.test.secret_string + version_id = data.amazon-secretsmanager.test.version_id + secret_value = jsondecode(data.amazon-secretsmanager.test.secret_string)["packer_test_key"] +} + +source "null" "basic-example" { + communicator = "none" +} + +build { + sources = [ + "source.null.basic-example" + ] + + provisioner "shell-local" { + inline = [ + "echo secret value: ${local.value}", + "echo secret secret_string: ${local.secret_string}", + "echo secret version_id: ${local.version_id}", + "echo secret value: ${local.secret_value}" + ] + } +} +` + +type AmazonSecret struct { + Name string + Key string + Value string + Description string + + Info *secretsmanager.CreateSecretOutput + manager *secretsmanager.SecretsManager +} + +func (as *AmazonSecret) Create() error { + if as.manager == nil { + accessConfig := &awscommon.AccessConfig{} + session, err := accessConfig.Session() + if err != nil { + return fmt.Errorf("Unable to create aws session %s", err.Error()) + } + as.manager = secretsmanager.New(session) + } + + newSecret := &secretsmanager.CreateSecretInput{ + Description: aws.String(as.Description), + Name: aws.String(as.Name), + SecretString: aws.String(fmt.Sprintf(`{%q:%q}`, as.Key, as.Value)), + } + + secret := new(secretsmanager.CreateSecretOutput) + var err error + err = retry.Config{ + Tries: 11, + ShouldRetry: func(err error) bool { + if awserrors.Matches(err, "ResourceExistsException", "") { + _ = as.Delete() + return true + } + if awserrors.Matches(err, "InvalidRequestException", "already scheduled for deletion") { + return true + } + return false + }, + RetryDelay: (&retry.Backoff{InitialBackoff: 200 * time.Millisecond, MaxBackoff: 30 * time.Second, Multiplier: 2}).Linear, + }.Run(context.TODO(), func(_ context.Context) error { + secret, err = as.manager.CreateSecret(newSecret) + return err + }) + as.Info = secret + return err +} + +func (as *AmazonSecret) Delete() error { + if as.manager == nil { + accessConfig := &awscommon.AccessConfig{} + session, err := accessConfig.Session() + if err != nil { + return fmt.Errorf("Unable to create aws session %s", err.Error()) + } + as.manager = secretsmanager.New(session) + } + + secret := &secretsmanager.DeleteSecretInput{ + ForceDeleteWithoutRecovery: aws.Bool(true), + SecretId: aws.String(as.Name), + } + _, err := as.manager.DeleteSecret(secret) + return err +} diff --git a/datasource/amazon/secretsmanager/data_test.go b/datasource/amazon/secretsmanager/data_test.go new file mode 100644 index 00000000000..5802b3b29d0 --- /dev/null +++ b/datasource/amazon/secretsmanager/data_test.go @@ -0,0 +1,39 @@ +package secretsmanager + +import ( + "testing" +) + +func TestDatasourceConfigure_EmptySecretId(t *testing.T) { + datasource := Datasource{ + config: Config{}, + } + if err := datasource.Configure(nil); err == nil { + t.Fatalf("Should error if secret id is not specified") + } +} + +func TestDatasourceConfigure_Dafaults(t *testing.T) { + datasource := Datasource{ + config: Config{ + Name: "arn:1223", + }, + } + if err := datasource.Configure(nil); err != nil { + t.Fatalf("err: %s", err) + } + if datasource.config.VersionStage != "AWSCURRENT" { + t.Fatalf("VersionStage not set correctly") + } +} + +func TestDatasourceConfigure(t *testing.T) { + datasource := Datasource{ + config: Config{ + Name: "arn:1223", + }, + } + if err := datasource.Configure(nil); err != nil { + t.Fatalf("err: %s", err) + } +} diff --git a/go.mod b/go.mod index 875879d924f..b8dee3302cc 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/hashicorp/go-uuid v1.0.2 github.com/hashicorp/go-version v1.2.0 github.com/hashicorp/hcl/v2 v2.8.0 - github.com/hashicorp/packer-plugin-sdk v0.0.7 + github.com/hashicorp/packer-plugin-sdk v0.0.9 github.com/hashicorp/vault/api v1.0.4 github.com/hetznercloud/hcloud-go v1.15.1 github.com/hyperonecom/h1-client-go v0.0.0-20191203060043-b46280e4c4a4 diff --git a/go.sum b/go.sum index 579ae2ce6d2..1d6317ba2e0 100644 --- a/go.sum +++ b/go.sum @@ -359,8 +359,16 @@ github.com/hashicorp/packer v1.6.7-0.20210107234516-6564ee76e807/go.mod h1:fBz28 github.com/hashicorp/packer-plugin-sdk v0.0.6/go.mod h1:Nvh28f+Jmpp2rcaN79bULTouNkGNDRfHckhHKTAXtyU= github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210113192617-8a28198491f7 h1:2N1NAfBCmG1vIkbdlIOb/YbaYXCW40YOllWqMZDjnHM= github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210113192617-8a28198491f7/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= +github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210120130732-6167b5e5b2e8 h1:50/m5nP40RaXnXyd0GHHUd+CfkmcYeTNGAY5eXQlBeY= +github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210120130732-6167b5e5b2e8/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= +github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210121103409-4b079ce99178 h1:AVT2ugu3+UzTDEViAxMFbUzzxgUpSVMMpbuaOEd97HY= +github.com/hashicorp/packer-plugin-sdk v0.0.7-0.20210121103409-4b079ce99178/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= github.com/hashicorp/packer-plugin-sdk v0.0.7 h1:adELlId/KOGWXmQ79L+NwYSgKES6811RVXiRCj4FE0s= github.com/hashicorp/packer-plugin-sdk v0.0.7/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= +github.com/hashicorp/packer-plugin-sdk v0.0.8 h1:/qyCO9YqALnaHSE++y+//tNy68++4SThZctqTwqikrU= +github.com/hashicorp/packer-plugin-sdk v0.0.8/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= +github.com/hashicorp/packer-plugin-sdk v0.0.9 h1:PWX6g0TeAbev5zhiRR91k3Z0wVCqsivs6xyBTRmPMkQ= +github.com/hashicorp/packer-plugin-sdk v0.0.9/go.mod h1:YdWTt5w6cYfaQG7IOi5iorL+3SXnz8hI0gJCi8Db/LI= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hashicorp/serf v0.9.2 h1:yJoyfZXo4Pk2p/M/viW+YLibBFiIbKoP79gu7kDAFP0= github.com/hashicorp/serf v0.9.2/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= diff --git a/hcl2template/types.datasource.go b/hcl2template/types.datasource.go index 9298452dc27..0484927b5aa 100644 --- a/hcl2template/types.datasource.go +++ b/hcl2template/types.datasource.go @@ -6,6 +6,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hclsyntax" packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + hcl2shim "github.com/hashicorp/packer/hcl2template/shim" "github.com/hashicorp/packer/packer" "github.com/zclconf/go-cty/cty" ) @@ -107,6 +108,12 @@ func (cfg *PackerConfig) startDatasource(dataSourceStore packer.DatasourceStore, return nil, diags } + // In case of cty.Unknown values, this will write a equivalent placeholder of the same type + // Unknown types are not recognized by the json marshal during the RPC call and we have to do this here + // to avoid json parsing failures when running the validate command. + // We don't do this before so we can validate if variable types matches correctly on decodeHCL2Spec. + decoded = hcl2shim.WriteUnknownPlaceholderValues(decoded) + if err := datasource.Configure(decoded); err != nil { diags = append(diags, &hcl.Diagnostic{ Summary: err.Error(), diff --git a/hcl2template/types.hcl_post-processor.go b/hcl2template/types.hcl_post-processor.go index 0a703c594ab..b09bcd73c91 100644 --- a/hcl2template/types.hcl_post-processor.go +++ b/hcl2template/types.hcl_post-processor.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + hcl2shim "github.com/hashicorp/packer/hcl2template/shim" "github.com/zclconf/go-cty/cty" ) @@ -55,6 +56,13 @@ func (p *HCL2PostProcessor) HCL2Prepare(buildVars map[string]interface{}) error if diags.HasErrors() { return diags } + + // In case of cty.Unknown values, this will write a equivalent placeholder of the same type + // Unknown types are not recognized by the json marshal during the RPC call and we have to do this here + // to avoid json parsing failures when running the validate command. + // We don't do this before so we can validate if variable types matches correctly on decodeHCL2Spec. + flatPostProcessorCfg = hcl2shim.WriteUnknownPlaceholderValues(flatPostProcessorCfg) + return p.PostProcessor.Configure(p.builderVariables, flatPostProcessorCfg) } diff --git a/hcl2template/types.hcl_provisioner.go b/hcl2template/types.hcl_provisioner.go index daf06c65a9e..df962f3a0ee 100644 --- a/hcl2template/types.hcl_provisioner.go +++ b/hcl2template/types.hcl_provisioner.go @@ -7,6 +7,7 @@ import ( "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" packersdk "github.com/hashicorp/packer-plugin-sdk/packer" + hcl2shim "github.com/hashicorp/packer/hcl2template/shim" "github.com/zclconf/go-cty/cty" ) @@ -59,6 +60,13 @@ func (p *HCL2Provisioner) HCL2Prepare(buildVars map[string]interface{}) error { if diags.HasErrors() { return diags } + + // In case of cty.Unknown values, this will write a equivalent placeholder of the same type + // Unknown types are not recognized by the json marshal during the RPC call and we have to do this here + // to avoid json parsing failures when running the validate command. + // We don't do this before so we can validate if variable types matches correctly on decodeHCL2Spec. + flatProvisionerCfg = hcl2shim.WriteUnknownPlaceholderValues(flatProvisionerCfg) + return p.Provisioner.Prepare(p.builderVariables, flatProvisionerCfg, p.override) } diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/datasources.go b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/datasources.go index a0f23bd65e6..c41dde7426d 100644 --- a/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/datasources.go +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/acctest/datasources.go @@ -46,6 +46,13 @@ func TestDatasource(t *testing.T, testCase *DatasourceTestCase) { return } + if testCase.Setup != nil { + err := testCase.Setup() + if err != nil { + t.Fatalf("test %s setup failed: %s", testCase.Name, err) + } + } + logfile := fmt.Sprintf("packer_log_%s.txt", testCase.Name) templatePath := fmt.Sprintf("./%s.pkr.hcl", testCase.Name) diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go index 67ec3b90bbb..b7e77ab5e4c 100644 --- a/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/template/interpolate/aws/secretsmanager/secretsmanager.go @@ -6,6 +6,7 @@ import ( "encoding/json" "errors" "fmt" + "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" @@ -76,7 +77,7 @@ func (c *Client) GetSecret(spec *SecretSpec) (string, error) { } func getSecretValue(s *SecretString, spec *SecretSpec) (string, error) { - var secretValue map[string]string + var secretValue map[string]interface{} blob := []byte(s.SecretString) //For those plaintext secrets just return the value @@ -96,13 +97,24 @@ func getSecretValue(s *SecretString, spec *SecretSpec) (string, error) { if spec.Key == "" { for _, v := range secretValue { - return v, nil + return getStringSecretValue(v) } } if v, ok := secretValue[spec.Key]; ok { - return v, nil + return getStringSecretValue(v) } return "", fmt.Errorf("No secret found for key %q", spec.Key) } + +func getStringSecretValue(v interface{}) (string, error) { + switch valueType := v.(type) { + case string: + return valueType, nil + case float64: + return strconv.FormatFloat(valueType, 'f', 0, 64), nil + default: + return "", fmt.Errorf("Unsupported secret value type: %T", valueType) + } +} diff --git a/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go index a815dabfbb2..fd262a0a8e0 100644 --- a/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go +++ b/vendor/github.com/hashicorp/packer-plugin-sdk/version/version.go @@ -13,12 +13,12 @@ import ( var GitCommit string // Package version helps plugin creators set and track the sdk version using -const Version = "0.0.7" +const Version = "0.0.9" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release // such as "dev" (in development), "beta", "rc1", etc. -const VersionPrerelease = "" +const VersionPrerelease = "dev" // InitializePluginVersion initializes the SemVer and returns a version var. // If the provided "version" string is not valid, the call to version.Must diff --git a/vendor/modules.txt b/vendor/modules.txt index db5f768a45b..b4702972381 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -355,7 +355,7 @@ github.com/hashicorp/hcl/v2/hclparse github.com/hashicorp/hcl/v2/hclsyntax github.com/hashicorp/hcl/v2/hclwrite github.com/hashicorp/hcl/v2/json -# github.com/hashicorp/packer-plugin-sdk v0.0.7 +# github.com/hashicorp/packer-plugin-sdk v0.0.9 github.com/hashicorp/packer-plugin-sdk/acctest github.com/hashicorp/packer-plugin-sdk/acctest/provisioneracc github.com/hashicorp/packer-plugin-sdk/acctest/testutils diff --git a/website/content/docs/datasources/amazon-ami.mdx b/website/content/docs/datasources/amazon/ami.mdx similarity index 81% rename from website/content/docs/datasources/amazon-ami.mdx rename to website/content/docs/datasources/amazon/ami.mdx index bf2ebe21782..0053dda5ed9 100644 --- a/website/content/docs/datasources/amazon-ami.mdx +++ b/website/content/docs/datasources/amazon/ami.mdx @@ -38,9 +38,4 @@ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. Note that ## Output Data -- `id` - The ID of the AMI. -- `name` - The name of the AMI. -- `creation_date` - The date of creation of the AMI. -- `owner` - The AWS account ID of the owner. -- `owner_name` - The owner alias. -- `tags` - The key/value combination of the tags assigned to the AMI. +@include 'datasource/amazon/ami/DatasourceOutput-not-required.mdx' diff --git a/website/content/docs/datasources/amazon/index.mdx b/website/content/docs/datasources/amazon/index.mdx new file mode 100644 index 00000000000..da4300a02ee --- /dev/null +++ b/website/content/docs/datasources/amazon/index.mdx @@ -0,0 +1,42 @@ +--- +description: | + Packer is able to fetch data from AWS. To achieve this, Packer comes with + data sources to retrieve AMI and secrets information. +page_title: Amazon - Data Sources +sidebar_title: Amazon +--- + +# Amazon Data Sources + +Packer is able to fetch data from AWS. To achieve this, Packer comes with data sources to retrieve AMI and secrets information. +Packer supports the following data sources at the moment: + +- [amazon-ami](/docs/datasources/amazon/ami) - Filter and fetch an Amazon AMI to output all the AMI information. + +- [amazon-secretsmanager](/docs/datasources/amazon/secretsmanager) - Retrieve information +about a Secrets Manager secret version, including its secret value. + + +## Authentication + +The Amazon Data Sources authentication works just like for the [Amazon Builders](/docs/builders/amazon). Both +have the same authentication options and you can refer to the [Amazon Builders authentication](/docs/builders/amazon#authentication) +to learn the options to authenticate for data sources. + +-> **Note:** A data source will start and execute in your own authentication session. The authentication in the data source +doesn't relate with the authentication on Amazon Builders. + +Basic example of an Amazon data source authentication using `assume_role`: + +```hcl +data "amazon-secretsmanager" "basic-example" { + name = "packer_test_secret" + key = "packer_test_key" + + assume_role { + role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME" + session_name = "SESSION_NAME" + external_id = "EXTERNAL_ID" + } +} +``` diff --git a/website/content/docs/datasources/amazon/secretsmanager.mdx b/website/content/docs/datasources/amazon/secretsmanager.mdx new file mode 100644 index 00000000000..625493bcd31 --- /dev/null +++ b/website/content/docs/datasources/amazon/secretsmanager.mdx @@ -0,0 +1,51 @@ +--- +description: | + The Amazon Secrets Manager data source provides information about a Secrets Manager secret version, + including its secret value. + +page_title: Secrets Manager - Data Source +sidebar_title: Secrets Manager +--- + +# Amazon Secrets Manager Data Source + +The Secrets Manager data source provides information about a Secrets Manager secret version, +including its secret value. + +-> **Note:** Data sources is a feature exclusively to HCL2 templates. + +Basic examples of usage: + +```hcl +data "amazon-secretsmanager" "basic-example" { + name = "packer_test_secret" + key = "packer_test_key" + version_stage = "example" +} + +# usage example of the data source output +locals { + value = data.amazon-secretsmanager.basic-example.value + secret_string = data.amazon-secretsmanager.basic-example.secret_string + version_id = data.amazon-secretsmanager.basic-example.version_id + secret_value = jsondecode(data.amazon-secretsmanager.basic-example.secret_string)["packer_test_key"] +} +``` + +Reading key-value pairs from JSON back into a native Packer map can be accomplished +with the [jsondecode() function](/docs/templates/hcl_templates/functions/encoding/jsondecode). + + +## Configuration Reference + +### Required + +@include 'datasource/amazon/secretsmanager/Config-required.mdx' + +### Optional + +@include 'datasource/amazon/secretsmanager/Config-not-required.mdx' + +## Output Data + +@include 'datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx' diff --git a/website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx b/website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx new file mode 100644 index 00000000000..19ada68ff2b --- /dev/null +++ b/website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx @@ -0,0 +1,13 @@ + + +- `id` (string) - The ID of the AMI. + +- `name` (string) - The name of the AMI. + +- `creation_date` (string) - The date of creation of the AMI. + +- `owner` (string) - The AWS account ID of the owner. + +- `owner_name` (string) - The owner alias. + +- `tags` (map[string]string) - The key/value combination of the tags assigned to the AMI. diff --git a/website/content/partials/datasource/amazon/secretsmanager/Config-not-required.mdx b/website/content/partials/datasource/amazon/secretsmanager/Config-not-required.mdx new file mode 100644 index 00000000000..b4a94b5cd87 --- /dev/null +++ b/website/content/partials/datasource/amazon/secretsmanager/Config-not-required.mdx @@ -0,0 +1,10 @@ + + +- `key` (string) - Optional key for JSON secrets that contain more than one value. When set, the `value` output will + contain the value for the provided key. + +- `version_id` (string) - Specifies the unique identifier of the version of the secret that you want to retrieve. + Overrides version_stage. + +- `version_stage` (string) - Specifies the secret version that you want to retrieve by the staging label attached to the version. + Defaults to AWSCURRENT. diff --git a/website/content/partials/datasource/amazon/secretsmanager/Config-required.mdx b/website/content/partials/datasource/amazon/secretsmanager/Config-required.mdx new file mode 100644 index 00000000000..c63df688873 --- /dev/null +++ b/website/content/partials/datasource/amazon/secretsmanager/Config-required.mdx @@ -0,0 +1,4 @@ + + +- `name` (string) - Specifies the secret containing the version that you want to retrieve. + You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret. diff --git a/website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx b/website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx new file mode 100644 index 00000000000..6c34f99a95e --- /dev/null +++ b/website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx @@ -0,0 +1,12 @@ + + +- `value` (string) - When a [key](#key) is provided, this will be the value for that key. If a key is not provided, + `value` will contain the first value found in the secret string. + +- `secret_string` (string) - The decrypted part of the protected secret information that + was originally provided as a string. + +- `secret_binary` (string) - The decrypted part of the protected secret information that + was originally provided as a binary. Base64 encoded. + +- `version_id` (string) - The unique identifier of this version of the secret. diff --git a/website/data/docs-navigation.js b/website/data/docs-navigation.js index dc50183311b..66adf5edd7f 100644 --- a/website/data/docs-navigation.js +++ b/website/data/docs-navigation.js @@ -15,19 +15,19 @@ export default [ { category: 'templates', content: [ - { - category: "legacy_json_templates", - content: [ - 'builders', - 'communicator', - 'engine', - 'post-processors', - 'provisioners', - 'user-variables', - ] - }, - { - category: 'hcl_templates', + { + category: "legacy_json_templates", + content: [ + 'builders', + 'communicator', + 'engine', + 'post-processors', + 'provisioners', + 'user-variables', + ] + }, + { + category: 'hcl_templates', content: [ { category: 'blocks', @@ -191,7 +191,7 @@ export default [ ], }, '----------', - { category: 'communicators', content: ['ssh', 'winrm'] }, + {category: 'communicators', content: ['ssh', 'winrm']}, { category: 'builders', content: [ @@ -211,7 +211,7 @@ export default [ 'googlecompute', 'hetzner-cloud', 'hyperone', - { category: 'hyperv', content: ['iso', 'vmcx'] }, + {category: 'hyperv', content: ['iso', 'vmcx']}, 'linode', 'lxc', 'lxd', @@ -219,14 +219,14 @@ export default [ 'null', 'oneandone', 'openstack', - { category: 'oracle', content: ['classic', 'oci'] }, + {category: 'oracle', content: ['classic', 'oci']}, { category: 'outscale', content: ['chroot', 'bsu', 'bsusurrogate', 'bsuvolume'], }, - { category: 'parallels', content: ['iso', 'pvm'] }, + {category: 'parallels', content: ['iso', 'pvm']}, 'profitbricks', - { category: 'proxmox', content: ['iso', 'clone'] }, + {category: 'proxmox', content: ['iso', 'clone']}, 'qemu', 'scaleway', 'tencentcloud-cvm', @@ -247,7 +247,18 @@ export default [ 'community-supported', ], }, - { category: 'datasources', content: ['amazon-ami'] }, + { + category: 'datasources', + content: [ + { + category: 'amazon', + content: [ + 'ami', + 'secretsmanager' + ], + }, + ] + }, { category: 'provisioners', content: [ From 7d0578c5b790d2b89489d2096b3dd733798adebe Mon Sep 17 00:00:00 2001 From: Sylvia Moss Date: Fri, 22 Jan 2021 17:55:32 +0100 Subject: [PATCH 06/14] add DatasourceOutput type to struct-markdown cmd (#10512) --- cmd/struct-markdown/main.go | 13 ++++++++++++- website/content/docs/datasources/amazon/ami.mdx | 2 +- .../docs/datasources/amazon/secretsmanager.mdx | 2 +- ...Output-not-required.mdx => DatasourceOutput.mdx} | 0 ...Output-not-required.mdx => DatasourceOutput.mdx} | 0 5 files changed, 14 insertions(+), 3 deletions(-) rename website/content/partials/datasource/amazon/ami/{DatasourceOutput-not-required.mdx => DatasourceOutput.mdx} (100%) rename website/content/partials/datasource/amazon/secretsmanager/{DatasourceOutput-not-required.mdx => DatasourceOutput.mdx} (100%) diff --git a/cmd/struct-markdown/main.go b/cmd/struct-markdown/main.go index ed3e583929e..9a6394630b1 100644 --- a/cmd/struct-markdown/main.go +++ b/cmd/struct-markdown/main.go @@ -75,6 +75,11 @@ func main() { Filename: typeSpec.Name.Name + ".mdx", Header: strings.TrimSpace(typeDecl.Doc.Text()), } + dataSourceOutput := Struct{ + SourcePath: sourcePath, + Name: typeSpec.Name.Name, + Filename: typeSpec.Name.Name + ".mdx", + } required := Struct{ SourcePath: sourcePath, Name: typeSpec.Name.Name, @@ -145,6 +150,12 @@ func main() { Type: fieldType, Docs: docs, } + + if typeSpec.Name.Name == "DatasourceOutput" { + dataSourceOutput.Fields = append(dataSourceOutput.Fields, field) + continue + } + if req, err := tags.Get("required"); err == nil && req.Value() == "true" { required.Fields = append(required.Fields, field) } else { @@ -155,7 +166,7 @@ func main() { dir := filepath.Join(projectRoot, "website", "content", "partials", builderName) os.MkdirAll(dir, 0755) - for _, str := range []Struct{header, required, notRequired} { + for _, str := range []Struct{header, dataSourceOutput, required, notRequired} { if len(str.Fields) == 0 && len(str.Header) == 0 { continue } diff --git a/website/content/docs/datasources/amazon/ami.mdx b/website/content/docs/datasources/amazon/ami.mdx index 0053dda5ed9..b8b1a2a637b 100644 --- a/website/content/docs/datasources/amazon/ami.mdx +++ b/website/content/docs/datasources/amazon/ami.mdx @@ -38,4 +38,4 @@ This selects the most recent Ubuntu 16.04 HVM EBS AMI from Canonical. Note that ## Output Data -@include 'datasource/amazon/ami/DatasourceOutput-not-required.mdx' +@include 'datasource/amazon/ami/DatasourceOutput.mdx' diff --git a/website/content/docs/datasources/amazon/secretsmanager.mdx b/website/content/docs/datasources/amazon/secretsmanager.mdx index 625493bcd31..352cf8306fb 100644 --- a/website/content/docs/datasources/amazon/secretsmanager.mdx +++ b/website/content/docs/datasources/amazon/secretsmanager.mdx @@ -48,4 +48,4 @@ with the [jsondecode() function](/docs/templates/hcl_templates/functions/encodin ## Output Data -@include 'datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx' +@include 'datasource/amazon/secretsmanager/DatasourceOutput.mdx' diff --git a/website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx b/website/content/partials/datasource/amazon/ami/DatasourceOutput.mdx similarity index 100% rename from website/content/partials/datasource/amazon/ami/DatasourceOutput-not-required.mdx rename to website/content/partials/datasource/amazon/ami/DatasourceOutput.mdx diff --git a/website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx b/website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput.mdx similarity index 100% rename from website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput-not-required.mdx rename to website/content/partials/datasource/amazon/secretsmanager/DatasourceOutput.mdx From 3aab42b770f519e20d5ddbc60b2eea563c732ed7 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Fri, 22 Jan 2021 09:36:17 -0800 Subject: [PATCH 07/14] remove confusing from_scratch reference which is not relevant in this config --- builder/amazon/common/run_config.go | 3 +-- .../partials/builder/amazon/common/RunConfig-required.mdx | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/builder/amazon/common/run_config.go b/builder/amazon/common/run_config.go index f67c0d5da85..d9019e4ce31 100644 --- a/builder/amazon/common/run_config.go +++ b/builder/amazon/common/run_config.go @@ -196,8 +196,7 @@ type RunConfig struct { SecurityGroupIds []string `mapstructure:"security_group_ids" required:"false"` // The source AMI whose root volume will be copied and // provisioned on the currently running instance. This must be an EBS-backed - // AMI with a root volume snapshot that you have access to. Note: this is not - // used when from_scratch is set to true. + // AMI with a root volume snapshot that you have access to. SourceAmi string `mapstructure:"source_ami" required:"true"` // Filters used to populate the `source_ami` // field. JSON Example: diff --git a/website/content/partials/builder/amazon/common/RunConfig-required.mdx b/website/content/partials/builder/amazon/common/RunConfig-required.mdx index a45dcb33535..9afb503089f 100644 --- a/website/content/partials/builder/amazon/common/RunConfig-required.mdx +++ b/website/content/partials/builder/amazon/common/RunConfig-required.mdx @@ -5,5 +5,4 @@ - `source_ami` (string) - The source AMI whose root volume will be copied and provisioned on the currently running instance. This must be an EBS-backed - AMI with a root volume snapshot that you have access to. Note: this is not - used when from_scratch is set to true. + AMI with a root volume snapshot that you have access to. From 48a31d1b6a86b5b7d86dc22594f80e2f0c2fc90c Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Mon, 25 Jan 2021 04:43:41 -0500 Subject: [PATCH 08/14] Slight markdown fixes (#10517) --- website/content/guides/1.7-plugin-upgrade/index.mdx | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/website/content/guides/1.7-plugin-upgrade/index.mdx b/website/content/guides/1.7-plugin-upgrade/index.mdx index 5a2ee53ed98..e011df7c7be 100644 --- a/website/content/guides/1.7-plugin-upgrade/index.mdx +++ b/website/content/guides/1.7-plugin-upgrade/index.mdx @@ -20,9 +20,10 @@ Packer is currently backwards compatible with the old API because the plugin API In a best-case scenario, all you'll have to do is update the packer imports to use the packer-plugin-sdk import path -github.com/hashicorp/packer with github.com/hashicorp/packer-plugin-sdk. +`github.com/hashicorp/packer` with `github.com/hashicorp/packer-plugin-sdk`. + +But some of the import paths have changed more than that because we've refactored the SDK some to make it easier to discover and use helpful modules. Below are a few common import paths. For a full list of available imports see [Packer Plugin SDK Docs](https://pkg.go.dev/github.com/hashicorp/packer-plugin-sdk/) -But some of the import paths have changed more than that because we've refactored the SDK some to make it easier to discover and use helpful modules. Below are a few common import paths. For a full list of available imports see [Packer Plugin SDK Docs](https://pkg.go.dev/github.com/hashicorp/packer-plugin-sdk/)``` | Old Path | New Path | | ---------| -------- | @@ -145,7 +146,7 @@ In the single binary setup, this you'd have used the name "bar" in your packer t ## Using the Set Architecture for Single Plugins -*** THIS IS ON THE PACKER MASTER BRANCH AND WILL BE AVAILABLE IN V1.7.0 +***THIS IS ON THE PACKER MASTER BRANCH AND WILL BE AVAILABLE IN V1.7.0*** The naming described above could be awkward for users who have given plugins singular names. For example, in a prior world, you may have created a provisioner saved as `packer-provisioner-foo` and accessed it in your template using `"type": "foo"`; Now, if you install a plugin named `packer-plugin-foo`, and register it using NewSet() and RegisterProvisioner() with the name "foo", you'll have to access it in your template using `"type": "foo-foo"` which stutters and breaks backwards compatibility. From 3242b7ee10e96dfa95b2201ea9d73c410c21d7c8 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 25 Jan 2021 01:49:37 -0800 Subject: [PATCH 09/14] =?UTF-8?q?read=20iops=20and=20throughput=20as=20poi?= =?UTF-8?q?nters=20so=20we=20can=20test=20for=20the=20nil=20case;=E2=80=A6?= =?UTF-8?q?=20(#10518)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- builder/amazon/common/block_device.go | 18 +++---- builder/amazon/common/block_device_test.go | 48 +++++++++---------- .../common/BlockDevice-not-required.mdx | 4 +- 3 files changed, 35 insertions(+), 35 deletions(-) diff --git a/builder/amazon/common/block_device.go b/builder/amazon/common/block_device.go index a7cc95b58a4..8957925f651 100644 --- a/builder/amazon/common/block_device.go +++ b/builder/amazon/common/block_device.go @@ -76,7 +76,7 @@ type BlockDevice struct { // See the documentation on // [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) // for more information - IOPS int64 `mapstructure:"iops" required:"false"` + IOPS *int64 `mapstructure:"iops" required:"false"` // Suppresses the specified device included in the block device mapping of // the AMI. NoDevice bool `mapstructure:"no_device" required:"false"` @@ -86,7 +86,7 @@ type BlockDevice struct { // See the documentation on // [Throughput](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) // for more information - Throughput int64 `mapstructure:"throughput" required:"false"` + Throughput *int64 `mapstructure:"throughput" required:"false"` // The virtual device name. See the documentation on Block Device Mapping // for more information. VirtualName string `mapstructure:"virtual_name" required:"false"` @@ -150,12 +150,12 @@ func (blockDevice BlockDevice) BuildEC2BlockDeviceMapping() *ec2.BlockDeviceMapp switch blockDevice.VolumeType { case "io1", "io2", "gp3": - ebsBlockDevice.Iops = aws.Int64(blockDevice.IOPS) + ebsBlockDevice.Iops = blockDevice.IOPS } // Throughput is only valid for gp3 types if blockDevice.VolumeType == "gp3" { - ebsBlockDevice.Throughput = aws.Int64(blockDevice.Throughput) + ebsBlockDevice.Throughput = blockDevice.Throughput } // You cannot specify Encrypted if you specify a Snapshot ID @@ -191,28 +191,28 @@ func (b *BlockDevice) Prepare(ctx *interpolate.Context) error { } if ratio, ok := iopsRatios[b.VolumeType]; b.VolumeSize != 0 && ok { - if b.IOPS/b.VolumeSize > ratio { + if b.IOPS != nil && (*b.IOPS/b.VolumeSize > ratio) { return fmt.Errorf("%s: the maximum ratio of provisioned IOPS to requested volume size "+ "(in GiB) is %v:1 for %s volumes", b.DeviceName, ratio, b.VolumeType) } - if b.IOPS < minIops || b.IOPS > maxIops { + if b.IOPS != nil && (*b.IOPS < minIops || *b.IOPS > maxIops) { return fmt.Errorf("IOPS must be between %d and %d for device %s", minIops, maxIops, b.DeviceName) } } if b.VolumeType == "gp3" { - if b.Throughput < minThroughput || b.Throughput > maxThroughput { + if b.Throughput != nil && (*b.Throughput < minThroughput || *b.Throughput > maxThroughput) { return fmt.Errorf("Throughput must be between %d and %d for device %s", minThroughput, maxThroughput, b.DeviceName) } - if b.IOPS < minIopsGp3 || b.IOPS > maxIopsGp3 { + if b.IOPS != nil && (*b.IOPS < minIopsGp3 || *b.IOPS > maxIopsGp3) { return fmt.Errorf("IOPS must be between %d and %d for device %s", minIopsGp3, maxIopsGp3, b.DeviceName) } - } else if b.Throughput > 0 { + } else if b.Throughput != nil { return fmt.Errorf("Throughput is not available for device %s", b.DeviceName) } diff --git a/builder/amazon/common/block_device_test.go b/builder/amazon/common/block_device_test.go index 67e81746777..8d33b918e77 100644 --- a/builder/amazon/common/block_device_test.go +++ b/builder/amazon/common/block_device_test.go @@ -54,7 +54,7 @@ func TestBlockDevice(t *testing.T) { VolumeType: "io1", VolumeSize: 8, DeleteOnTermination: true, - IOPS: 1000, + IOPS: aws.Int64(1000), }, Result: &ec2.BlockDeviceMapping{ @@ -73,7 +73,7 @@ func TestBlockDevice(t *testing.T) { VolumeType: "io2", VolumeSize: 8, DeleteOnTermination: true, - IOPS: 1000, + IOPS: aws.Int64(1000), }, Result: &ec2.BlockDeviceMapping{ @@ -168,8 +168,8 @@ func TestBlockDevice(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "gp3", VolumeSize: 8, - Throughput: 125, - IOPS: 3000, + Throughput: aws.Int64(125), + IOPS: aws.Int64(3000), DeleteOnTermination: true, Encrypted: config.TriTrue, }, @@ -219,7 +219,7 @@ func TestIOPSValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "io1", - IOPS: 1000, + IOPS: aws.Int64(1000), }, ok: true, }, @@ -227,7 +227,7 @@ func TestIOPSValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "io2", - IOPS: 1000, + IOPS: aws.Int64(1000), }, ok: true, }, @@ -237,7 +237,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io1", VolumeSize: 50, - IOPS: 1000, + IOPS: aws.Int64(1000), }, ok: true, }, @@ -246,7 +246,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io2", VolumeSize: 100, - IOPS: 1000, + IOPS: aws.Int64(1000), }, ok: true, }, @@ -256,7 +256,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io1", VolumeSize: 10, - IOPS: 2000, + IOPS: aws.Int64(2000), }, ok: false, msg: "/dev/sdb: the maximum ratio of provisioned IOPS to requested volume size (in GiB) is 50:1 for io1 volumes", @@ -266,7 +266,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io2", VolumeSize: 50, - IOPS: 30000, + IOPS: aws.Int64(30000), }, ok: false, msg: "/dev/sdb: the maximum ratio of provisioned IOPS to requested volume size (in GiB) is 500:1 for io2 volumes", @@ -277,7 +277,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io2", VolumeSize: 500, - IOPS: 99999, + IOPS: aws.Int64(99999), }, ok: false, msg: "IOPS must be between 100 and 64000 for device /dev/sdb", @@ -288,7 +288,7 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "io2", VolumeSize: 50, - IOPS: 10, + IOPS: aws.Int64(10), }, ok: false, msg: "IOPS must be between 100 and 64000 for device /dev/sdb", @@ -299,8 +299,8 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "gp3", VolumeSize: 50, - Throughput: 125, - IOPS: 99999, + Throughput: aws.Int64(125), + IOPS: aws.Int64(99999), }, ok: false, msg: "IOPS must be between 3000 and 16000 for device /dev/sdb", @@ -311,8 +311,8 @@ func TestIOPSValidation(t *testing.T) { DeviceName: "/dev/sdb", VolumeType: "gp3", VolumeSize: 50, - Throughput: 125, - IOPS: 10, + Throughput: aws.Int64(125), + IOPS: aws.Int64(10), }, ok: false, msg: "IOPS must be between 3000 and 16000 for device /dev/sdb", @@ -346,8 +346,8 @@ func TestThroughputValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "gp3", - Throughput: 125, - IOPS: 3000, + Throughput: aws.Int64(125), + IOPS: aws.Int64(3000), }, ok: true, }, @@ -355,8 +355,8 @@ func TestThroughputValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "gp3", - Throughput: 1000, - IOPS: 3000, + Throughput: aws.Int64(1000), + IOPS: aws.Int64(3000), }, ok: true, }, @@ -365,8 +365,8 @@ func TestThroughputValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "gp3", - Throughput: 1001, - IOPS: 3000, + Throughput: aws.Int64(1001), + IOPS: aws.Int64(3000), }, ok: false, msg: "Throughput must be between 125 and 1000 for device /dev/sdb", @@ -376,8 +376,8 @@ func TestThroughputValidation(t *testing.T) { device: BlockDevice{ DeviceName: "/dev/sdb", VolumeType: "gp3", - Throughput: 124, - IOPS: 3000, + Throughput: aws.Int64(124), + IOPS: aws.Int64(3000), }, ok: false, msg: "Throughput must be between 125 and 1000 for device /dev/sdb", diff --git a/website/content/partials/builder/amazon/common/BlockDevice-not-required.mdx b/website/content/partials/builder/amazon/common/BlockDevice-not-required.mdx index d35ef6f2bb0..f9f7b5c3179 100644 --- a/website/content/partials/builder/amazon/common/BlockDevice-not-required.mdx +++ b/website/content/partials/builder/amazon/common/BlockDevice-not-required.mdx @@ -13,7 +13,7 @@ false will result in an unencrypted device, and true will result in an encrypted one. -- `iops` (int64) - The number of I/O operations per second (IOPS) that the volume supports. +- `iops` (\*int64) - The number of I/O operations per second (IOPS) that the volume supports. See the documentation on [IOPs](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information @@ -23,7 +23,7 @@ - `snapshot_id` (string) - The ID of the snapshot. -- `throughput` (int64) - The throughput for gp3 volumes, only valid for gp3 types +- `throughput` (\*int64) - The throughput for gp3 volumes, only valid for gp3 types See the documentation on [Throughput](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_EbsBlockDevice.html) for more information From 37dcf6183c9ae2fe57266ef3929f6d2576efcad4 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 25 Jan 2021 01:53:51 -0800 Subject: [PATCH 10/14] skip credential validation if we are not exporting an image (#10520) --- builder/vmware/common/driver_esx5.go | 14 ++++++++++---- builder/vmware/common/driver_esx5_test.go | 10 ++++++++++ 2 files changed, 20 insertions(+), 4 deletions(-) diff --git a/builder/vmware/common/driver_esx5.go b/builder/vmware/common/driver_esx5.go index 277197e0171..150ec3d703d 100644 --- a/builder/vmware/common/driver_esx5.go +++ b/builder/vmware/common/driver_esx5.go @@ -328,21 +328,27 @@ func (d *ESX5Driver) Verify() error { } func (d *ESX5Driver) VerifyOvfTool(SkipExport, skipValidateCredentials bool) error { + // We don't use ovftool if we aren't exporting a VM; return without error + // if ovftool isn't on path. + if SkipExport { + return nil + } + err := d.base.VerifyOvfTool(SkipExport, skipValidateCredentials) if err != nil { return err } + if skipValidateCredentials { + return nil + } + log.Printf("Verifying that ovftool credentials are valid...") // check that password is valid by sending a dummy ovftool command // now, so that we don't fail for a simple mistake after a long // build ovftool := GetOVFTool() - if skipValidateCredentials { - return nil - } - if d.Password == "" { return fmt.Errorf("exporting the vm from esxi with ovftool requires " + "that you set a value for remote_password") diff --git a/builder/vmware/common/driver_esx5_test.go b/builder/vmware/common/driver_esx5_test.go index 4fceb6a39cd..d51ac56cd64 100644 --- a/builder/vmware/common/driver_esx5_test.go +++ b/builder/vmware/common/driver_esx5_test.go @@ -92,3 +92,13 @@ func TestESX5Driver_CommHost(t *testing.T) { t.Errorf("bad vm_address: %s", address.(string)) } } + +func TestESX5Driver_VerifyOvfTool(t *testing.T) { + driver := ESX5Driver{} + // should always skip validation if export is skipped, so this should always + // pass even when ovftool is not installed. + err := driver.VerifyOvfTool(true, false) + if err != nil { + t.Fatalf("shouldn't fail ever because should always skip check") + } +} From c3266cc3b076c0eda69a637f175a8cf54ac7db36 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Mon, 25 Jan 2021 01:54:41 -0800 Subject: [PATCH 11/14] remove indexing from cdrom command and let qemu handle it (#10519) --- builder/qemu/step_run.go | 2 +- builder/qemu/step_run_test.go | 38 +++++++++++++++++------------------ 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/builder/qemu/step_run.go b/builder/qemu/step_run.go index 5360955f40e..86611e9e8a6 100644 --- a/builder/qemu/step_run.go +++ b/builder/qemu/step_run.go @@ -260,7 +260,7 @@ func (s *stepRun) getDeviceAndDriveArgs(config *Config, state multistep.StateBag } for i, cdPath := range cdPaths { if config.CDROMInterface == "" { - driveArgs = append(driveArgs, fmt.Sprintf("file=%s,index=%d,media=cdrom", cdPath, i)) + driveArgs = append(driveArgs, fmt.Sprintf("file=%s,media=cdrom", cdPath)) } else if config.CDROMInterface == "virtio-scsi" { driveArgs = append(driveArgs, fmt.Sprintf("file=%s,if=none,index=%d,id=cdrom%d,media=cdrom", cdPath, i, i)) deviceArgs = append(deviceArgs, "virtio-scsi-device", fmt.Sprintf("scsi-cd,drive=cdrom%d", i)) diff --git a/builder/qemu/step_run_test.go b/builder/qemu/step_run_test.go index 533b5317a70..2139c8b8a31 100644 --- a/builder/qemu/step_run_test.go +++ b/builder/qemu/step_run_test.go @@ -52,7 +52,7 @@ func Test_UserOverrides(t *testing.T) { }, []string{ "-display", "gtk", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", "-randomflag1", "127.0.0.1-1234-http/directory", "-randomflag2", "output/directory-myvm", "-device", ",netdev=user.0", @@ -66,7 +66,7 @@ func Test_UserOverrides(t *testing.T) { }, []string{ "-display", "partydisplay", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", "-device", ",netdev=user.0", }, "User input overrides default, rest is populated as normal", @@ -81,7 +81,7 @@ func Test_UserOverrides(t *testing.T) { "-display", "gtk", "-device", "somerandomdevice", "-device", "mynetdevice,netdev=user.0", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "Net device gets added", }, @@ -135,7 +135,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { []string{ "-display", "gtk", "-boot", "once=d", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "Boot value should default to once=d when diskImage isn't set", }, @@ -164,7 +164,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-device", "virtio-scsi-pci,id=scsi0", "-device", "scsi-hd,bus=scsi0.0,drive=drive0", "-drive", "if=none,file=path_to_output,id=drive0,cache=writeback,discard=,format=qcow2", - "-drive", "file=fake_cd_path.iso,index=0,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "virtio-scsi interface, DiskImage true, extra cdrom, detectZeroes off", }, @@ -193,7 +193,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-device", "virtio-scsi-pci,id=scsi0", "-device", "scsi-hd,bus=scsi0.0,drive=drive0", "-drive", "if=none,file=path_to_output,id=drive0,cache=writeback,discard=,format=qcow2,detect-zeroes=on", - "-drive", "file=fake_cd_path.iso,index=0,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "virtio-scsi interface, DiskImage true, extra cdrom, detectZeroes on", }, @@ -224,8 +224,8 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-device", "scsi-hd,bus=scsi0.0,drive=drive1", "-drive", "if=none,file=qemupath1,id=drive0,cache=writeback,discard=,format=qcow2", "-drive", "if=none,file=qemupath2,id=drive1,cache=writeback,discard=,format=qcow2", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", - "-drive", "file=fake_cd_path.iso,index=1,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "virtio-scsi interface, bootable iso, cdrom", }, @@ -256,8 +256,8 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-device", "scsi-hd,bus=scsi0.0,drive=drive1", "-drive", "if=none,file=qemupath1,id=drive0,cache=writeback,discard=,format=qcow2,detect-zeroes=on", "-drive", "if=none,file=qemupath2,id=drive1,cache=writeback,discard=,format=qcow2,detect-zeroes=on", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", - "-drive", "file=fake_cd_path.iso,index=1,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "virtio-scsi interface, DiskImage false, extra cdrom, detect zeroes on", }, @@ -284,7 +284,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-device", "virtio-scsi-pci,id=scsi0", "-device", "scsi-hd,bus=scsi0.0,drive=drive0", "-drive", "if=none,file=output/dir/path/mydisk.qcow2,id=drive0,cache=writeback,discard=,format=qcow2,detect-zeroes=", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "virtio-scsi interface, DiskImage false, no extra disks or cds", }, @@ -302,8 +302,8 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-display", "gtk", "-boot", "once=d", "-drive", "file=output/dir/path/mydisk.qcow2,if=,cache=,discard=,format=,detect-zeroes=", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", - "-drive", "file=fake_cd_path.iso,index=1,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "cd_path is set and DiskImage is false", }, @@ -322,7 +322,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-display", "gtk", "-boot", "once=d", "-drive", "file=output/dir/path/mydisk.qcow2,if=,cache=,discard=,format=,detect-zeroes=", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "empty config", }, @@ -345,7 +345,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { []string{ "-boot", "once=d", "-drive", "file=path_to_output,if=virtio,cache=writeback,format=qcow2", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "version less than 2", }, @@ -369,8 +369,8 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-boot", "once=d", "-drive", "file=qemupath1,if=virtio,cache=writeback,discard=,format=qcow2,detect-zeroes=", "-drive", "file=qemupath2,if=virtio,cache=writeback,discard=,format=qcow2,detect-zeroes=", - "-drive", "file=fake_cd_path.iso,index=1,media=cdrom", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", }, "virtio interface with extra disks", }, @@ -395,7 +395,7 @@ func Test_DriveAndDeviceArgs(t *testing.T) { "-display", "gtk", "-boot", "c", "-drive", "file=path_to_output,if=virtio,cache=writeback,discard=,format=qcow2,detect-zeroes=", - "-drive", "file=fake_cd_path.iso,index=0,media=cdrom", + "-drive", "file=fake_cd_path.iso,media=cdrom", }, "virtio interface with disk image", }, @@ -456,7 +456,7 @@ func Test_OptionalConfigOptionsGetSet(t *testing.T) { "-vnc", ":5,password", "-machine", "type=pc,accel=hvf", "-device", ",netdev=user.0", - "-drive", "file=/path/to/test.iso,index=0,media=cdrom", + "-drive", "file=/path/to/test.iso,media=cdrom", "-qmp", "unix:qmp_path,server,nowait", } From e588029d6a3e8e3ac52b9fee980316faaa96071b Mon Sep 17 00:00:00 2001 From: Roman Mingazeev Date: Mon, 25 Jan 2021 14:26:21 +0300 Subject: [PATCH 12/14] yandex: some fix (#10522) --- builder/yandex/cloud_init.go | 22 +- builder/yandex/step_create_image.go | 1 + builder/yandex/util.go | 7 +- .../yandex-export/post-processor.go | 32 +-- .../yandex-export/post-processor.hcl2spec.go | 239 ++++++++++++------ .../docs/post-processors/yandex-export.mdx | 2 + .../yandex-export/Config-not-required.mdx | 6 - 7 files changed, 187 insertions(+), 122 deletions(-) diff --git a/builder/yandex/cloud_init.go b/builder/yandex/cloud_init.go index d013307ab28..a55993f7944 100644 --- a/builder/yandex/cloud_init.go +++ b/builder/yandex/cloud_init.go @@ -34,16 +34,18 @@ func MergeCloudUserMetaData(usersData ...string) (string, error) { } for i, userData := range usersData { - w, err := data.CreatePart(textproto.MIMEHeader{ - "Content-Disposition": {fmt.Sprintf("attachment; filename=\"user-data-%d\"", i)}, - "Content-Type": {detectContentType(userData)}, - }) - if err != nil { - return "", err - } - _, err = w.Write([]byte(userData)) - if err != nil { - return "", err + if len(userData) != 0 { + w, err := data.CreatePart(textproto.MIMEHeader{ + "Content-Disposition": {fmt.Sprintf("attachment; filename=\"user-data-%d\"", i)}, + "Content-Type": {detectContentType(userData)}, + }) + if err != nil { + return "", err + } + _, err = w.Write([]byte(userData)) + if err != nil { + return "", err + } } } return buff.String(), nil diff --git a/builder/yandex/step_create_image.go b/builder/yandex/step_create_image.go index 66dcd144fa2..15166e1b7c8 100644 --- a/builder/yandex/step_create_image.go +++ b/builder/yandex/step_create_image.go @@ -53,6 +53,7 @@ func (s *stepCreateImage) Run(ctx context.Context, state multistep.StateBag) mul if err != nil { return StepHaltWithError(state, err) } + ui.Say("Success image create...") image, ok := resp.(*compute.Image) if !ok { diff --git a/builder/yandex/util.go b/builder/yandex/util.go index c8d39e2c5dc..9a373fcf79a 100644 --- a/builder/yandex/util.go +++ b/builder/yandex/util.go @@ -31,13 +31,14 @@ func writeSerialLogFile(ctx context.Context, state multistep.StateBag, serialLog sdk := state.Get("sdk").(*ycsdk.SDK) ui := state.Get("ui").(packersdk.Ui) - instanceID := state.Get("instance_id").(string) - if instanceID == "" { + instanceID, ok := state.GetOk("instance_id") + + if !ok || instanceID.(string) == "" { return nil } ui.Say("Try get instance's serial port output and write to file " + serialLogFile) serialOutput, err := sdk.Compute().Instance().GetSerialPortOutput(ctx, &compute.GetInstanceSerialPortOutputRequest{ - InstanceId: instanceID, + InstanceId: instanceID.(string), }) if err != nil { return fmt.Errorf("Failed to get serial port output for instance (id: %s): %s", instanceID, err) diff --git a/post-processor/yandex-export/post-processor.go b/post-processor/yandex-export/post-processor.go index 197adf3ef37..f333c1ca8e9 100644 --- a/post-processor/yandex-export/post-processor.go +++ b/post-processor/yandex-export/post-processor.go @@ -41,6 +41,8 @@ type Config struct { yandex.AccessConfig `mapstructure:",squash"` yandex.CommonConfig `mapstructure:",squash"` ExchangeConfig `mapstructure:",squash"` + communicator.SSH `mapstructure:",squash"` + communicator.Config `mapstructure:"-"` // List of paths to Yandex Object Storage where exported image will be uploaded. // Please be aware that use of space char inside path not supported. @@ -49,12 +51,6 @@ type Config struct { // Paths to Yandex Object Storage where exported image will be uploaded. Paths []string `mapstructure:"paths" required:"true"` - // Path to a PEM encoded private key file to use to authenticate with SSH. - // The `~` can be used in path and will be expanded to the home directory - // of current user. - SSHPrivateKeyFile string `mapstructure:"ssh_private_key_file" required:"false"` - // The username to connect to SSH with. Default `ubuntu` - SSHUsername string `mapstructure:"ssh_username" required:"false"` // The ID of the folder containing the source image. Default `standard-images`. SourceImageFolderID string `mapstructure:"source_image_folder_id" required:"false"` // The source image family to start export process. Default `ubuntu-1604-lts`. @@ -105,9 +101,15 @@ func (p *PostProcessor) Configure(raws ...interface{}) error { if p.config.DiskSizeGb == 0 { p.config.DiskSizeGb = 100 } - if p.config.SSHUsername == "" { - p.config.SSHUsername = "ubuntu" + if p.config.SSH.SSHUsername == "" { + p.config.SSH.SSHUsername = "ubuntu" } + p.config.Config = communicator.Config{ + Type: "ssh", + SSH: p.config.SSH, + } + errs = packersdk.MultiErrorAppend(errs, p.config.Config.Prepare(&p.config.ctx)...) + if p.config.SourceImageID == "" { if p.config.SourceImageFamily == "" { p.config.SourceImageFamily = defaultSourceImageFamily @@ -231,12 +233,6 @@ func (p *PostProcessor) PostProcess(ctx context.Context, ui packersdk.Ui, artifa yandexConfig.DiskName = exporterName } - errs := yandexConfig.Communicator.Prepare(interpolate.NewContext()) - if len(errs) > 0 { - err := &packersdk.MultiError{Errors: errs} - return nil, false, false, err - } - ui.Say(fmt.Sprintf("Validating service_account_id: '%s'...", yandexConfig.ServiceAccountID)) if err := validateServiceAccount(ctx, driver.SDK(), yandexConfig.ServiceAccountID); err != nil { return nil, false, false, err @@ -309,13 +305,7 @@ func ycSaneDefaults(c *Config, md map[string]string) yandex.Config { yandexConfig := yandex.Config{ CommonConfig: c.CommonConfig, AccessConfig: c.AccessConfig, - Communicator: communicator.Config{ - Type: "ssh", - SSH: communicator.SSH{ - SSHUsername: c.SSHUsername, - SSHPrivateKeyFile: c.SSHPrivateKeyFile, - }, - }, + Communicator: c.Config, } if yandexConfig.Metadata == nil { yandexConfig.Metadata = md diff --git a/post-processor/yandex-export/post-processor.hcl2spec.go b/post-processor/yandex-export/post-processor.hcl2spec.go index 016a44b2f56..296bea7aa3c 100644 --- a/post-processor/yandex-export/post-processor.hcl2spec.go +++ b/post-processor/yandex-export/post-processor.hcl2spec.go @@ -4,53 +4,91 @@ package yandexexport import ( "github.com/hashicorp/hcl/v2/hcldec" + "github.com/hashicorp/packer-plugin-sdk/communicator" "github.com/zclconf/go-cty/cty" ) // FlatConfig is an auto-generated flat version of Config. // Where the contents of a field with a `mapstructure:,squash` tag are bubbled up. type FlatConfig struct { - PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"` - PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"` - PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"` - PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"` - PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"` - PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"` - PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"` - PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"` - Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint" hcl:"endpoint"` - ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file" hcl:"service_account_key_file"` - Token *string `mapstructure:"token" required:"true" cty:"token" hcl:"token"` - MaxRetries *int `mapstructure:"max_retries" cty:"max_retries" hcl:"max_retries"` - SerialLogFile *string `mapstructure:"serial_log_file" required:"false" cty:"serial_log_file" hcl:"serial_log_file"` - StateTimeout *string `mapstructure:"state_timeout" required:"false" cty:"state_timeout" hcl:"state_timeout"` - InstanceCores *int `mapstructure:"instance_cores" required:"false" cty:"instance_cores" hcl:"instance_cores"` - InstanceGpus *int `mapstructure:"instance_gpus" required:"false" cty:"instance_gpus" hcl:"instance_gpus"` - InstanceMemory *int `mapstructure:"instance_mem_gb" required:"false" cty:"instance_mem_gb" hcl:"instance_mem_gb"` - InstanceName *string `mapstructure:"instance_name" required:"false" cty:"instance_name" hcl:"instance_name"` - PlatformID *string `mapstructure:"platform_id" required:"false" cty:"platform_id" hcl:"platform_id"` - Labels map[string]string `mapstructure:"labels" required:"false" cty:"labels" hcl:"labels"` - Metadata map[string]string `mapstructure:"metadata" required:"false" cty:"metadata" hcl:"metadata"` - MetadataFromFile map[string]string `mapstructure:"metadata_from_file" cty:"metadata_from_file" hcl:"metadata_from_file"` - Preemptible *bool `mapstructure:"preemptible" cty:"preemptible" hcl:"preemptible"` - DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name" hcl:"disk_name"` - DiskSizeGb *int `mapstructure:"disk_size_gb" required:"false" cty:"disk_size_gb" hcl:"disk_size_gb"` - DiskType *string `mapstructure:"disk_type" required:"false" cty:"disk_type" hcl:"disk_type"` - DiskLabels map[string]string `mapstructure:"disk_labels" required:"false" cty:"disk_labels" hcl:"disk_labels"` - SubnetID *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"` - Zone *string `mapstructure:"zone" required:"false" cty:"zone" hcl:"zone"` - UseIPv4Nat *bool `mapstructure:"use_ipv4_nat" required:"false" cty:"use_ipv4_nat" hcl:"use_ipv4_nat"` - UseIPv6 *bool `mapstructure:"use_ipv6" required:"false" cty:"use_ipv6" hcl:"use_ipv6"` - UseInternalIP *bool `mapstructure:"use_internal_ip" required:"false" cty:"use_internal_ip" hcl:"use_internal_ip"` - FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id" hcl:"folder_id"` - ServiceAccountID *string `mapstructure:"service_account_id" required:"true" cty:"service_account_id" hcl:"service_account_id"` - Paths []string `mapstructure:"paths" required:"true" cty:"paths" hcl:"paths"` - SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" required:"false" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"` - SSHUsername *string `mapstructure:"ssh_username" required:"false" cty:"ssh_username" hcl:"ssh_username"` - SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"` - SourceImageFamily *string `mapstructure:"source_image_family" required:"false" cty:"source_image_family" hcl:"source_image_family"` - SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"` - SourceDiskExtraSize *int `mapstructure:"source_disk_extra_size" required:"false" cty:"source_disk_extra_size" hcl:"source_disk_extra_size"` + PackerBuildName *string `mapstructure:"packer_build_name" cty:"packer_build_name" hcl:"packer_build_name"` + PackerBuilderType *string `mapstructure:"packer_builder_type" cty:"packer_builder_type" hcl:"packer_builder_type"` + PackerCoreVersion *string `mapstructure:"packer_core_version" cty:"packer_core_version" hcl:"packer_core_version"` + PackerDebug *bool `mapstructure:"packer_debug" cty:"packer_debug" hcl:"packer_debug"` + PackerForce *bool `mapstructure:"packer_force" cty:"packer_force" hcl:"packer_force"` + PackerOnError *string `mapstructure:"packer_on_error" cty:"packer_on_error" hcl:"packer_on_error"` + PackerUserVars map[string]string `mapstructure:"packer_user_variables" cty:"packer_user_variables" hcl:"packer_user_variables"` + PackerSensitiveVars []string `mapstructure:"packer_sensitive_variables" cty:"packer_sensitive_variables" hcl:"packer_sensitive_variables"` + Endpoint *string `mapstructure:"endpoint" required:"false" cty:"endpoint" hcl:"endpoint"` + ServiceAccountKeyFile *string `mapstructure:"service_account_key_file" required:"false" cty:"service_account_key_file" hcl:"service_account_key_file"` + Token *string `mapstructure:"token" required:"true" cty:"token" hcl:"token"` + MaxRetries *int `mapstructure:"max_retries" cty:"max_retries" hcl:"max_retries"` + SerialLogFile *string `mapstructure:"serial_log_file" required:"false" cty:"serial_log_file" hcl:"serial_log_file"` + StateTimeout *string `mapstructure:"state_timeout" required:"false" cty:"state_timeout" hcl:"state_timeout"` + InstanceCores *int `mapstructure:"instance_cores" required:"false" cty:"instance_cores" hcl:"instance_cores"` + InstanceGpus *int `mapstructure:"instance_gpus" required:"false" cty:"instance_gpus" hcl:"instance_gpus"` + InstanceMemory *int `mapstructure:"instance_mem_gb" required:"false" cty:"instance_mem_gb" hcl:"instance_mem_gb"` + InstanceName *string `mapstructure:"instance_name" required:"false" cty:"instance_name" hcl:"instance_name"` + PlatformID *string `mapstructure:"platform_id" required:"false" cty:"platform_id" hcl:"platform_id"` + Labels map[string]string `mapstructure:"labels" required:"false" cty:"labels" hcl:"labels"` + Metadata map[string]string `mapstructure:"metadata" required:"false" cty:"metadata" hcl:"metadata"` + MetadataFromFile map[string]string `mapstructure:"metadata_from_file" cty:"metadata_from_file" hcl:"metadata_from_file"` + Preemptible *bool `mapstructure:"preemptible" cty:"preemptible" hcl:"preemptible"` + DiskName *string `mapstructure:"disk_name" required:"false" cty:"disk_name" hcl:"disk_name"` + DiskSizeGb *int `mapstructure:"disk_size_gb" required:"false" cty:"disk_size_gb" hcl:"disk_size_gb"` + DiskType *string `mapstructure:"disk_type" required:"false" cty:"disk_type" hcl:"disk_type"` + DiskLabels map[string]string `mapstructure:"disk_labels" required:"false" cty:"disk_labels" hcl:"disk_labels"` + SubnetID *string `mapstructure:"subnet_id" required:"false" cty:"subnet_id" hcl:"subnet_id"` + Zone *string `mapstructure:"zone" required:"false" cty:"zone" hcl:"zone"` + UseIPv4Nat *bool `mapstructure:"use_ipv4_nat" required:"false" cty:"use_ipv4_nat" hcl:"use_ipv4_nat"` + UseIPv6 *bool `mapstructure:"use_ipv6" required:"false" cty:"use_ipv6" hcl:"use_ipv6"` + UseInternalIP *bool `mapstructure:"use_internal_ip" required:"false" cty:"use_internal_ip" hcl:"use_internal_ip"` + FolderID *string `mapstructure:"folder_id" required:"true" cty:"folder_id" hcl:"folder_id"` + ServiceAccountID *string `mapstructure:"service_account_id" required:"true" cty:"service_account_id" hcl:"service_account_id"` + SSHHost *string `mapstructure:"ssh_host" cty:"ssh_host" hcl:"ssh_host"` + SSHPort *int `mapstructure:"ssh_port" cty:"ssh_port" hcl:"ssh_port"` + SSHUsername *string `mapstructure:"ssh_username" cty:"ssh_username" hcl:"ssh_username"` + SSHPassword *string `mapstructure:"ssh_password" cty:"ssh_password" hcl:"ssh_password"` + SSHKeyPairName *string `mapstructure:"ssh_keypair_name" undocumented:"true" cty:"ssh_keypair_name" hcl:"ssh_keypair_name"` + SSHTemporaryKeyPairName *string `mapstructure:"temporary_key_pair_name" undocumented:"true" cty:"temporary_key_pair_name" hcl:"temporary_key_pair_name"` + SSHTemporaryKeyPairType *string `mapstructure:"temporary_key_pair_type" cty:"temporary_key_pair_type" hcl:"temporary_key_pair_type"` + SSHTemporaryKeyPairBits *int `mapstructure:"temporary_key_pair_bits" cty:"temporary_key_pair_bits" hcl:"temporary_key_pair_bits"` + SSHCiphers []string `mapstructure:"ssh_ciphers" cty:"ssh_ciphers" hcl:"ssh_ciphers"` + SSHClearAuthorizedKeys *bool `mapstructure:"ssh_clear_authorized_keys" cty:"ssh_clear_authorized_keys" hcl:"ssh_clear_authorized_keys"` + SSHKEXAlgos []string `mapstructure:"ssh_key_exchange_algorithms" cty:"ssh_key_exchange_algorithms" hcl:"ssh_key_exchange_algorithms"` + SSHPrivateKeyFile *string `mapstructure:"ssh_private_key_file" undocumented:"true" cty:"ssh_private_key_file" hcl:"ssh_private_key_file"` + SSHCertificateFile *string `mapstructure:"ssh_certificate_file" cty:"ssh_certificate_file" hcl:"ssh_certificate_file"` + SSHPty *bool `mapstructure:"ssh_pty" cty:"ssh_pty" hcl:"ssh_pty"` + SSHTimeout *string `mapstructure:"ssh_timeout" cty:"ssh_timeout" hcl:"ssh_timeout"` + SSHWaitTimeout *string `mapstructure:"ssh_wait_timeout" undocumented:"true" cty:"ssh_wait_timeout" hcl:"ssh_wait_timeout"` + SSHAgentAuth *bool `mapstructure:"ssh_agent_auth" undocumented:"true" cty:"ssh_agent_auth" hcl:"ssh_agent_auth"` + SSHDisableAgentForwarding *bool `mapstructure:"ssh_disable_agent_forwarding" cty:"ssh_disable_agent_forwarding" hcl:"ssh_disable_agent_forwarding"` + SSHHandshakeAttempts *int `mapstructure:"ssh_handshake_attempts" cty:"ssh_handshake_attempts" hcl:"ssh_handshake_attempts"` + SSHBastionHost *string `mapstructure:"ssh_bastion_host" cty:"ssh_bastion_host" hcl:"ssh_bastion_host"` + SSHBastionPort *int `mapstructure:"ssh_bastion_port" cty:"ssh_bastion_port" hcl:"ssh_bastion_port"` + SSHBastionAgentAuth *bool `mapstructure:"ssh_bastion_agent_auth" cty:"ssh_bastion_agent_auth" hcl:"ssh_bastion_agent_auth"` + SSHBastionUsername *string `mapstructure:"ssh_bastion_username" cty:"ssh_bastion_username" hcl:"ssh_bastion_username"` + SSHBastionPassword *string `mapstructure:"ssh_bastion_password" cty:"ssh_bastion_password" hcl:"ssh_bastion_password"` + SSHBastionInteractive *bool `mapstructure:"ssh_bastion_interactive" cty:"ssh_bastion_interactive" hcl:"ssh_bastion_interactive"` + SSHBastionPrivateKeyFile *string `mapstructure:"ssh_bastion_private_key_file" cty:"ssh_bastion_private_key_file" hcl:"ssh_bastion_private_key_file"` + SSHBastionCertificateFile *string `mapstructure:"ssh_bastion_certificate_file" cty:"ssh_bastion_certificate_file" hcl:"ssh_bastion_certificate_file"` + SSHFileTransferMethod *string `mapstructure:"ssh_file_transfer_method" cty:"ssh_file_transfer_method" hcl:"ssh_file_transfer_method"` + SSHProxyHost *string `mapstructure:"ssh_proxy_host" cty:"ssh_proxy_host" hcl:"ssh_proxy_host"` + SSHProxyPort *int `mapstructure:"ssh_proxy_port" cty:"ssh_proxy_port" hcl:"ssh_proxy_port"` + SSHProxyUsername *string `mapstructure:"ssh_proxy_username" cty:"ssh_proxy_username" hcl:"ssh_proxy_username"` + SSHProxyPassword *string `mapstructure:"ssh_proxy_password" cty:"ssh_proxy_password" hcl:"ssh_proxy_password"` + SSHKeepAliveInterval *string `mapstructure:"ssh_keep_alive_interval" cty:"ssh_keep_alive_interval" hcl:"ssh_keep_alive_interval"` + SSHReadWriteTimeout *string `mapstructure:"ssh_read_write_timeout" cty:"ssh_read_write_timeout" hcl:"ssh_read_write_timeout"` + SSHRemoteTunnels []string `mapstructure:"ssh_remote_tunnels" cty:"ssh_remote_tunnels" hcl:"ssh_remote_tunnels"` + SSHLocalTunnels []string `mapstructure:"ssh_local_tunnels" cty:"ssh_local_tunnels" hcl:"ssh_local_tunnels"` + SSHPublicKey []byte `mapstructure:"ssh_public_key" undocumented:"true" cty:"ssh_public_key" hcl:"ssh_public_key"` + SSHPrivateKey []byte `mapstructure:"ssh_private_key" undocumented:"true" cty:"ssh_private_key" hcl:"ssh_private_key"` + Config *communicator.FlatConfig `mapstructure:"-" cty:"-" hcl:"-"` + Paths []string `mapstructure:"paths" required:"true" cty:"paths" hcl:"paths"` + SourceImageFolderID *string `mapstructure:"source_image_folder_id" required:"false" cty:"source_image_folder_id" hcl:"source_image_folder_id"` + SourceImageFamily *string `mapstructure:"source_image_family" required:"false" cty:"source_image_family" hcl:"source_image_family"` + SourceImageID *string `mapstructure:"source_image_id" required:"false" cty:"source_image_id" hcl:"source_image_id"` + SourceDiskExtraSize *int `mapstructure:"source_disk_extra_size" required:"false" cty:"source_disk_extra_size" hcl:"source_disk_extra_size"` } // FlatMapstructure returns a new FlatConfig. @@ -65,47 +103,84 @@ func (*Config) FlatMapstructure() interface{ HCL2Spec() map[string]hcldec.Spec } // The decoded values from this spec will then be applied to a FlatConfig. func (*FlatConfig) HCL2Spec() map[string]hcldec.Spec { s := map[string]hcldec.Spec{ - "packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false}, - "packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false}, - "packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false}, - "packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false}, - "packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false}, - "packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false}, - "packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false}, - "packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false}, - "endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false}, - "service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false}, - "token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false}, - "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, - "serial_log_file": &hcldec.AttrSpec{Name: "serial_log_file", Type: cty.String, Required: false}, - "state_timeout": &hcldec.AttrSpec{Name: "state_timeout", Type: cty.String, Required: false}, - "instance_cores": &hcldec.AttrSpec{Name: "instance_cores", Type: cty.Number, Required: false}, - "instance_gpus": &hcldec.AttrSpec{Name: "instance_gpus", Type: cty.Number, Required: false}, - "instance_mem_gb": &hcldec.AttrSpec{Name: "instance_mem_gb", Type: cty.Number, Required: false}, - "instance_name": &hcldec.AttrSpec{Name: "instance_name", Type: cty.String, Required: false}, - "platform_id": &hcldec.AttrSpec{Name: "platform_id", Type: cty.String, Required: false}, - "labels": &hcldec.AttrSpec{Name: "labels", Type: cty.Map(cty.String), Required: false}, - "metadata": &hcldec.AttrSpec{Name: "metadata", Type: cty.Map(cty.String), Required: false}, - "metadata_from_file": &hcldec.AttrSpec{Name: "metadata_from_file", Type: cty.Map(cty.String), Required: false}, - "preemptible": &hcldec.AttrSpec{Name: "preemptible", Type: cty.Bool, Required: false}, - "disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false}, - "disk_size_gb": &hcldec.AttrSpec{Name: "disk_size_gb", Type: cty.Number, Required: false}, - "disk_type": &hcldec.AttrSpec{Name: "disk_type", Type: cty.String, Required: false}, - "disk_labels": &hcldec.AttrSpec{Name: "disk_labels", Type: cty.Map(cty.String), Required: false}, - "subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false}, - "zone": &hcldec.AttrSpec{Name: "zone", Type: cty.String, Required: false}, - "use_ipv4_nat": &hcldec.AttrSpec{Name: "use_ipv4_nat", Type: cty.Bool, Required: false}, - "use_ipv6": &hcldec.AttrSpec{Name: "use_ipv6", Type: cty.Bool, Required: false}, - "use_internal_ip": &hcldec.AttrSpec{Name: "use_internal_ip", Type: cty.Bool, Required: false}, - "folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false}, - "service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false}, - "paths": &hcldec.AttrSpec{Name: "paths", Type: cty.List(cty.String), Required: false}, - "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, - "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, - "source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false}, - "source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false}, - "source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false}, - "source_disk_extra_size": &hcldec.AttrSpec{Name: "source_disk_extra_size", Type: cty.Number, Required: false}, + "packer_build_name": &hcldec.AttrSpec{Name: "packer_build_name", Type: cty.String, Required: false}, + "packer_builder_type": &hcldec.AttrSpec{Name: "packer_builder_type", Type: cty.String, Required: false}, + "packer_core_version": &hcldec.AttrSpec{Name: "packer_core_version", Type: cty.String, Required: false}, + "packer_debug": &hcldec.AttrSpec{Name: "packer_debug", Type: cty.Bool, Required: false}, + "packer_force": &hcldec.AttrSpec{Name: "packer_force", Type: cty.Bool, Required: false}, + "packer_on_error": &hcldec.AttrSpec{Name: "packer_on_error", Type: cty.String, Required: false}, + "packer_user_variables": &hcldec.AttrSpec{Name: "packer_user_variables", Type: cty.Map(cty.String), Required: false}, + "packer_sensitive_variables": &hcldec.AttrSpec{Name: "packer_sensitive_variables", Type: cty.List(cty.String), Required: false}, + "endpoint": &hcldec.AttrSpec{Name: "endpoint", Type: cty.String, Required: false}, + "service_account_key_file": &hcldec.AttrSpec{Name: "service_account_key_file", Type: cty.String, Required: false}, + "token": &hcldec.AttrSpec{Name: "token", Type: cty.String, Required: false}, + "max_retries": &hcldec.AttrSpec{Name: "max_retries", Type: cty.Number, Required: false}, + "serial_log_file": &hcldec.AttrSpec{Name: "serial_log_file", Type: cty.String, Required: false}, + "state_timeout": &hcldec.AttrSpec{Name: "state_timeout", Type: cty.String, Required: false}, + "instance_cores": &hcldec.AttrSpec{Name: "instance_cores", Type: cty.Number, Required: false}, + "instance_gpus": &hcldec.AttrSpec{Name: "instance_gpus", Type: cty.Number, Required: false}, + "instance_mem_gb": &hcldec.AttrSpec{Name: "instance_mem_gb", Type: cty.Number, Required: false}, + "instance_name": &hcldec.AttrSpec{Name: "instance_name", Type: cty.String, Required: false}, + "platform_id": &hcldec.AttrSpec{Name: "platform_id", Type: cty.String, Required: false}, + "labels": &hcldec.AttrSpec{Name: "labels", Type: cty.Map(cty.String), Required: false}, + "metadata": &hcldec.AttrSpec{Name: "metadata", Type: cty.Map(cty.String), Required: false}, + "metadata_from_file": &hcldec.AttrSpec{Name: "metadata_from_file", Type: cty.Map(cty.String), Required: false}, + "preemptible": &hcldec.AttrSpec{Name: "preemptible", Type: cty.Bool, Required: false}, + "disk_name": &hcldec.AttrSpec{Name: "disk_name", Type: cty.String, Required: false}, + "disk_size_gb": &hcldec.AttrSpec{Name: "disk_size_gb", Type: cty.Number, Required: false}, + "disk_type": &hcldec.AttrSpec{Name: "disk_type", Type: cty.String, Required: false}, + "disk_labels": &hcldec.AttrSpec{Name: "disk_labels", Type: cty.Map(cty.String), Required: false}, + "subnet_id": &hcldec.AttrSpec{Name: "subnet_id", Type: cty.String, Required: false}, + "zone": &hcldec.AttrSpec{Name: "zone", Type: cty.String, Required: false}, + "use_ipv4_nat": &hcldec.AttrSpec{Name: "use_ipv4_nat", Type: cty.Bool, Required: false}, + "use_ipv6": &hcldec.AttrSpec{Name: "use_ipv6", Type: cty.Bool, Required: false}, + "use_internal_ip": &hcldec.AttrSpec{Name: "use_internal_ip", Type: cty.Bool, Required: false}, + "folder_id": &hcldec.AttrSpec{Name: "folder_id", Type: cty.String, Required: false}, + "service_account_id": &hcldec.AttrSpec{Name: "service_account_id", Type: cty.String, Required: false}, + "ssh_host": &hcldec.AttrSpec{Name: "ssh_host", Type: cty.String, Required: false}, + "ssh_port": &hcldec.AttrSpec{Name: "ssh_port", Type: cty.Number, Required: false}, + "ssh_username": &hcldec.AttrSpec{Name: "ssh_username", Type: cty.String, Required: false}, + "ssh_password": &hcldec.AttrSpec{Name: "ssh_password", Type: cty.String, Required: false}, + "ssh_keypair_name": &hcldec.AttrSpec{Name: "ssh_keypair_name", Type: cty.String, Required: false}, + "temporary_key_pair_name": &hcldec.AttrSpec{Name: "temporary_key_pair_name", Type: cty.String, Required: false}, + "temporary_key_pair_type": &hcldec.AttrSpec{Name: "temporary_key_pair_type", Type: cty.String, Required: false}, + "temporary_key_pair_bits": &hcldec.AttrSpec{Name: "temporary_key_pair_bits", Type: cty.Number, Required: false}, + "ssh_ciphers": &hcldec.AttrSpec{Name: "ssh_ciphers", Type: cty.List(cty.String), Required: false}, + "ssh_clear_authorized_keys": &hcldec.AttrSpec{Name: "ssh_clear_authorized_keys", Type: cty.Bool, Required: false}, + "ssh_key_exchange_algorithms": &hcldec.AttrSpec{Name: "ssh_key_exchange_algorithms", Type: cty.List(cty.String), Required: false}, + "ssh_private_key_file": &hcldec.AttrSpec{Name: "ssh_private_key_file", Type: cty.String, Required: false}, + "ssh_certificate_file": &hcldec.AttrSpec{Name: "ssh_certificate_file", Type: cty.String, Required: false}, + "ssh_pty": &hcldec.AttrSpec{Name: "ssh_pty", Type: cty.Bool, Required: false}, + "ssh_timeout": &hcldec.AttrSpec{Name: "ssh_timeout", Type: cty.String, Required: false}, + "ssh_wait_timeout": &hcldec.AttrSpec{Name: "ssh_wait_timeout", Type: cty.String, Required: false}, + "ssh_agent_auth": &hcldec.AttrSpec{Name: "ssh_agent_auth", Type: cty.Bool, Required: false}, + "ssh_disable_agent_forwarding": &hcldec.AttrSpec{Name: "ssh_disable_agent_forwarding", Type: cty.Bool, Required: false}, + "ssh_handshake_attempts": &hcldec.AttrSpec{Name: "ssh_handshake_attempts", Type: cty.Number, Required: false}, + "ssh_bastion_host": &hcldec.AttrSpec{Name: "ssh_bastion_host", Type: cty.String, Required: false}, + "ssh_bastion_port": &hcldec.AttrSpec{Name: "ssh_bastion_port", Type: cty.Number, Required: false}, + "ssh_bastion_agent_auth": &hcldec.AttrSpec{Name: "ssh_bastion_agent_auth", Type: cty.Bool, Required: false}, + "ssh_bastion_username": &hcldec.AttrSpec{Name: "ssh_bastion_username", Type: cty.String, Required: false}, + "ssh_bastion_password": &hcldec.AttrSpec{Name: "ssh_bastion_password", Type: cty.String, Required: false}, + "ssh_bastion_interactive": &hcldec.AttrSpec{Name: "ssh_bastion_interactive", Type: cty.Bool, Required: false}, + "ssh_bastion_private_key_file": &hcldec.AttrSpec{Name: "ssh_bastion_private_key_file", Type: cty.String, Required: false}, + "ssh_bastion_certificate_file": &hcldec.AttrSpec{Name: "ssh_bastion_certificate_file", Type: cty.String, Required: false}, + "ssh_file_transfer_method": &hcldec.AttrSpec{Name: "ssh_file_transfer_method", Type: cty.String, Required: false}, + "ssh_proxy_host": &hcldec.AttrSpec{Name: "ssh_proxy_host", Type: cty.String, Required: false}, + "ssh_proxy_port": &hcldec.AttrSpec{Name: "ssh_proxy_port", Type: cty.Number, Required: false}, + "ssh_proxy_username": &hcldec.AttrSpec{Name: "ssh_proxy_username", Type: cty.String, Required: false}, + "ssh_proxy_password": &hcldec.AttrSpec{Name: "ssh_proxy_password", Type: cty.String, Required: false}, + "ssh_keep_alive_interval": &hcldec.AttrSpec{Name: "ssh_keep_alive_interval", Type: cty.String, Required: false}, + "ssh_read_write_timeout": &hcldec.AttrSpec{Name: "ssh_read_write_timeout", Type: cty.String, Required: false}, + "ssh_remote_tunnels": &hcldec.AttrSpec{Name: "ssh_remote_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_local_tunnels": &hcldec.AttrSpec{Name: "ssh_local_tunnels", Type: cty.List(cty.String), Required: false}, + "ssh_public_key": &hcldec.AttrSpec{Name: "ssh_public_key", Type: cty.List(cty.Number), Required: false}, + "ssh_private_key": &hcldec.AttrSpec{Name: "ssh_private_key", Type: cty.List(cty.Number), Required: false}, + "-": &hcldec.BlockSpec{TypeName: "-", Nested: hcldec.ObjectSpec((*communicator.FlatConfig)(nil).HCL2Spec())}, + "paths": &hcldec.AttrSpec{Name: "paths", Type: cty.List(cty.String), Required: false}, + "source_image_folder_id": &hcldec.AttrSpec{Name: "source_image_folder_id", Type: cty.String, Required: false}, + "source_image_family": &hcldec.AttrSpec{Name: "source_image_family", Type: cty.String, Required: false}, + "source_image_id": &hcldec.AttrSpec{Name: "source_image_id", Type: cty.String, Required: false}, + "source_disk_extra_size": &hcldec.AttrSpec{Name: "source_disk_extra_size", Type: cty.Number, Required: false}, } return s } diff --git a/website/content/docs/post-processors/yandex-export.mdx b/website/content/docs/post-processors/yandex-export.mdx index a25e9085afa..6a0ac5c22df 100644 --- a/website/content/docs/post-processors/yandex-export.mdx +++ b/website/content/docs/post-processors/yandex-export.mdx @@ -28,6 +28,8 @@ As such, assigned Service Account must have write permissions to the Yandex Obje `paths`. A new temporary static access keys from assigned Service Account used to upload image. +Also, you should configure [ssh communicator](/docs/communicators/ssh). Default `ssh_username` to `ubuntu`. + ## Configuration ### Required: diff --git a/website/content/partials/post-processor/yandex-export/Config-not-required.mdx b/website/content/partials/post-processor/yandex-export/Config-not-required.mdx index 58c765e8e5c..a987737422e 100644 --- a/website/content/partials/post-processor/yandex-export/Config-not-required.mdx +++ b/website/content/partials/post-processor/yandex-export/Config-not-required.mdx @@ -1,11 +1,5 @@ -- `ssh_private_key_file` (string) - Path to a PEM encoded private key file to use to authenticate with SSH. - The `~` can be used in path and will be expanded to the home directory - of current user. - -- `ssh_username` (string) - The username to connect to SSH with. Default `ubuntu` - - `source_image_folder_id` (string) - The ID of the folder containing the source image. Default `standard-images`. - `source_image_family` (string) - The source image family to start export process. Default `ubuntu-1604-lts`. From ea7fef699fc2dc60a7f6b2f61e395e6d4ae240a7 Mon Sep 17 00:00:00 2001 From: Wilken Rivera Date: Mon, 25 Jan 2021 10:28:34 -0500 Subject: [PATCH 13/14] Test against deployment url (#10501) * Test against deployment url * Remove infinite redirect configuration * Add DEPLOYMENT_URL for builds against master * website: Update README * tip on seconds * Test with GHA timeout_minutes clause * Add continue on error for poll job * Add empty url check * Move to pull-request path filter * Remove www for packer.io * Apply suggestions from code review * Update path filter Vercel deploys on any change under the website directory. * Use custom action as test Co-authored-by: Adrien Delorme --- .github/workflows/linkchecker.yml | 33 ++++++++++++++++--- .github/workflows/scheduled-link-checker.yml | 3 ++ mlc_config.json | 2 +- website/README.md | 4 ++- .../packer-on-cicd/pipelineing-builds.mdx | 2 +- website/redirects.next.js | 5 --- 6 files changed, 36 insertions(+), 13 deletions(-) diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml index 27ed60cee0f..0eaa2d77420 100644 --- a/.github/workflows/linkchecker.yml +++ b/.github/workflows/linkchecker.yml @@ -1,14 +1,37 @@ -on: [pull_request] -name: Check Markdown links for modified files +on: + pull_request: + paths: + - 'website/**' + +name: Check markdown links on modified website files jobs: + vercel-deployment-poll: + runs-on: ubuntu-latest + timeout-minutes: 3 #cancel job if no deployment is found within x minutes + outputs: + url: ${{ steps.waitForVercelPreviewDeployment.outputs.url }} + steps: + - name: Wait for Vercel preview deployment to be ready + uses: nywilken/wait-for-vercel-preview@master + id: waitForVercelPreviewDeployment + with: + token: ${{ secrets.GITHUB_TOKEN }} + max_timeout: 600 # in seconds, set really high to leverage job timeout-minutes values + allow_inactive: true # needed to ensure we get a URL for a previously released deployment markdown-link-check: + needs: vercel-deployment-poll + if: ${{ needs.vercel-deployment-poll.outputs.url != '' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@master - - uses: gaurav-nelson/github-action-markdown-link-check@v1 + - name: Get Deployment URL + run: + echo "DEPLOYMENT_URL=${{ needs.vercel-deployment-poll.outputs.url }}" >> $GITHUB_ENV + - name: Checkout source branch + uses: actions/checkout@master + - name: Check links + uses: gaurav-nelson/github-action-markdown-link-check@v1 with: use-quiet-mode: 'yes' file-extension: 'mdx' check-modified-files-only: 'yes' folder-path: 'website/content' - diff --git a/.github/workflows/scheduled-link-checker.yml b/.github/workflows/scheduled-link-checker.yml index d626f67d230..498936e472a 100644 --- a/.github/workflows/scheduled-link-checker.yml +++ b/.github/workflows/scheduled-link-checker.yml @@ -6,6 +6,9 @@ jobs: markdown-link-check: runs-on: ubuntu-latest steps: + - name: Set deployment URL env + run: + echo "DEPLOYMENT_URL=https://packer-git-master.hashicorp.vercel.app" >> $GITHUB_ENV - uses: actions/checkout@master - uses: gaurav-nelson/github-action-markdown-link-check@v1 with: diff --git a/mlc_config.json b/mlc_config.json index 5bd08fbb7fa..dc52e164a2a 100644 --- a/mlc_config.json +++ b/mlc_config.json @@ -37,7 +37,7 @@ "replacementPatterns": [ { "pattern": "^/", - "replacement": "https://packer.io/" + "replacement": "{{env.DEPLOYMENT_URL}}/" } ], "timeout": "20s", diff --git a/website/README.md b/website/README.md index 813520dd771..8117006c434 100644 --- a/website/README.md +++ b/website/README.md @@ -379,7 +379,9 @@ You may customize the parameters in any way you'd like. To remove a prerelease f ## Link Validation -The Packer GitHub repository is configured to run a [Markdown Link Check](https://github.com/gaurav-nelson/github-action-markdown-link-check#github-action---markdown-link-check-%EF%B8%8F) on a nightly basis to check for potential broken links within the Packer documentation. There is also a GitHub action that will check any modified `.mdx` files on new pull-requests. +The Packer GitHub repository is configured to run a [Markdown Link Check](https://github.com/gaurav-nelson/github-action-markdown-link-check#github-action---markdown-link-check-%EF%B8%8F) on a nightly basis to check for potential broken links within the Packer documentation. All checks on master will be executed using the BASE_URL set to https://packer.io/. + +There is also a GitHub action that will check any modified `website/content/**/*.mdx` files on new pull-requests. The link checker action for pull-requests will only run when there is a new Vercel deployment; checks will be executed against the Vercel deployment URL. If no deployment is made the check will run but will timeout after 3 minutes since it needs a valid Vercel deployment URL. The master configuration file for the markdown-link-checker is called `mlc_config.json` and is located under the project's root directory. The configuration helps with relative links in the documentation that will be valid once deployed, and configures a few ignored URLs which are valid but may not return a valid 200 HTTP response code due to permissions or DDoS protection settings on the domain. diff --git a/website/content/guides/packer-on-cicd/pipelineing-builds.mdx b/website/content/guides/packer-on-cicd/pipelineing-builds.mdx index 77651cd0203..571962b439d 100644 --- a/website/content/guides/packer-on-cicd/pipelineing-builds.mdx +++ b/website/content/guides/packer-on-cicd/pipelineing-builds.mdx @@ -118,7 +118,7 @@ build { In order to build using this template, create a directory named "http" in your current working directory. Copy the minimal example from our -[preseed guide](https://www.packer.io/guides/automatic-operating-system-installs/preseed_ubuntu#examples) +[preseed guide](https://packer.io/guides/automatic-operating-system-installs/preseed_ubuntu#examples) into a file in your http directory and name it "ubuntu_preseed.cfg". Copy the above json template into your current working directory and save it as "example_virtualbox_iso.json" diff --git a/website/redirects.next.js b/website/redirects.next.js index 2200afa658e..6c6fd8381a4 100644 --- a/website/redirects.next.js +++ b/website/redirects.next.js @@ -146,11 +146,6 @@ module.exports = [ destination: '/docs/templates/hcl_templates/:path*', permanent: true, }, - { - source: '/docs/templates/hcl_templates/:path*', - destination: '/docs/templates/hcl_templates/:path*', - permanent: true, - }, { source: '/docs/templates/hcl_templates/:path*/overview', destination: '/docs/templates/hcl_templates/:path*', From fbbda0f9d99bfa1867c6cb2a7e4bdf6e3ded2ec4 Mon Sep 17 00:00:00 2001 From: Megan Marsh Date: Tue, 26 Jan 2021 01:21:44 -0800 Subject: [PATCH 14/14] Sensitive locals (#10509) * Allow locals to be delcared as individual blocks, and give them the Sensitive flag * add docs for new local block * linting * add tests * modified parsing to use schema, check for dupes properly * update comment fix wording a liiitle * add tests for duplicate variables definition in two different files * remove unnecessary slice initialisation * fix crash by returning when decode error is hit * parseLocalVariables: only treat a local vars if its not nil also return in case of error return locals in case of error too * fix duplicate_locals test for windows Co-authored-by: Adrien Delorme --- hcl2template/parser.go | 29 ++++++---- .../testdata/complete/variables.pkr.hcl | 5 ++ hcl2template/testdata/variables/basic.pkr.hcl | 5 ++ .../variables/duplicate_locals/one.pkr.hcl | 4 ++ .../duplicate_locals/one_copy.pkr.hcl | 4 ++ hcl2template/types.packer_config.go | 19 +++++-- hcl2template/types.packer_config_test.go | 7 +++ hcl2template/types.variables.go | 54 ++++++++++++++++++- hcl2template/types.variables_test.go | 31 +++++++++++ .../docs/templates/hcl_templates/locals.mdx | 16 +++++- .../from-1.5/locals/example-block.mdx | 6 +++ 11 files changed, 163 insertions(+), 17 deletions(-) create mode 100644 hcl2template/testdata/variables/duplicate_locals/one.pkr.hcl create mode 100644 hcl2template/testdata/variables/duplicate_locals/one_copy.pkr.hcl diff --git a/hcl2template/parser.go b/hcl2template/parser.go index 5f5af98cc78..7f34b4b16da 100644 --- a/hcl2template/parser.go +++ b/hcl2template/parser.go @@ -20,6 +20,7 @@ const ( variablesLabel = "variables" variableLabel = "variable" localsLabel = "locals" + localLabel = "local" dataSourceLabel = "data" buildLabel = "build" communicatorLabel = "communicator" @@ -32,6 +33,7 @@ var configSchema = &hcl.BodySchema{ {Type: variablesLabel}, {Type: variableLabel, LabelNames: []string{"name"}}, {Type: localsLabel}, + {Type: localLabel, LabelNames: []string{"name"}}, {Type: dataSourceLabel, LabelNames: []string{"type", "name"}}, {Type: buildLabel}, {Type: communicatorLabel, LabelNames: []string{"type", "name"}}, @@ -257,17 +259,8 @@ func sniffCoreVersionRequirements(body hcl.Body) ([]VersionConstraint, hcl.Diagn return constraints, diags } -func (cfg *PackerConfig) Initialize(opts packer.InitializeOptions) hcl.Diagnostics { - var diags hcl.Diagnostics - - _, moreDiags := cfg.InputVariables.Values() - diags = append(diags, moreDiags...) - _, moreDiags = cfg.LocalVariables.Values() - diags = append(diags, moreDiags...) - diags = append(diags, cfg.evaluateDatasources(opts.SkipDatasourcesExecution)...) - diags = append(diags, cfg.evaluateLocalVariables(cfg.LocalBlocks)...) - - for _, variable := range cfg.InputVariables { +func filterVarsFromLogs(inputOrLocal Variables) { + for _, variable := range inputOrLocal { if !variable.Sensitive { continue } @@ -279,6 +272,20 @@ func (cfg *PackerConfig) Initialize(opts packer.InitializeOptions) hcl.Diagnosti return true, nil }) } +} + +func (cfg *PackerConfig) Initialize(opts packer.InitializeOptions) hcl.Diagnostics { + var diags hcl.Diagnostics + + _, moreDiags := cfg.InputVariables.Values() + diags = append(diags, moreDiags...) + _, moreDiags = cfg.LocalVariables.Values() + diags = append(diags, moreDiags...) + diags = append(diags, cfg.evaluateDatasources(opts.SkipDatasourcesExecution)...) + diags = append(diags, cfg.evaluateLocalVariables(cfg.LocalBlocks)...) + + filterVarsFromLogs(cfg.InputVariables) + filterVarsFromLogs(cfg.LocalVariables) // decode the actual content for _, file := range cfg.files { diff --git a/hcl2template/testdata/complete/variables.pkr.hcl b/hcl2template/testdata/complete/variables.pkr.hcl index 170277e7f74..59120d1c6e8 100644 --- a/hcl2template/testdata/complete/variables.pkr.hcl +++ b/hcl2template/testdata/complete/variables.pkr.hcl @@ -38,3 +38,8 @@ locals { {id = "c"}, ] } + +local "supersecret" { + expression = "${var.image_id}-password" + sensitive = true +} diff --git a/hcl2template/testdata/variables/basic.pkr.hcl b/hcl2template/testdata/variables/basic.pkr.hcl index 8fb119b0ce7..04e31628fd2 100644 --- a/hcl2template/testdata/variables/basic.pkr.hcl +++ b/hcl2template/testdata/variables/basic.pkr.hcl @@ -36,3 +36,8 @@ locals { service_name = "forum" owner = "Community Team" } + +local "supersecret" { + sensitive = true + expression = "secretvar" +} diff --git a/hcl2template/testdata/variables/duplicate_locals/one.pkr.hcl b/hcl2template/testdata/variables/duplicate_locals/one.pkr.hcl new file mode 100644 index 00000000000..d267a3fb45f --- /dev/null +++ b/hcl2template/testdata/variables/duplicate_locals/one.pkr.hcl @@ -0,0 +1,4 @@ + +local "sensible" { + expression = "something" +} diff --git a/hcl2template/testdata/variables/duplicate_locals/one_copy.pkr.hcl b/hcl2template/testdata/variables/duplicate_locals/one_copy.pkr.hcl new file mode 100644 index 00000000000..d267a3fb45f --- /dev/null +++ b/hcl2template/testdata/variables/duplicate_locals/one_copy.pkr.hcl @@ -0,0 +1,4 @@ + +local "sensible" { + expression = "something" +} diff --git a/hcl2template/types.packer_config.go b/hcl2template/types.packer_config.go index 3ea92f34516..e6979115377 100644 --- a/hcl2template/types.packer_config.go +++ b/hcl2template/types.packer_config.go @@ -150,10 +150,20 @@ func (c *PackerConfig) parseLocalVariables(f *hcl.File) ([]*LocalBlock, hcl.Diag content, moreDiags := f.Body.Content(configSchema) diags = append(diags, moreDiags...) - var locals []*LocalBlock + + locals := c.LocalBlocks for _, block := range content.Blocks { switch block.Type { + case localLabel: + l, moreDiags := decodeLocalBlock(block, locals) + diags = append(diags, moreDiags...) + if l != nil { + locals = append(locals, l) + } + if moreDiags.HasErrors() { + return locals, diags + } case localsLabel: attrs, moreDiags := block.Body.JustAttributes() diags = append(diags, moreDiags...) @@ -166,7 +176,7 @@ func (c *PackerConfig) parseLocalVariables(f *hcl.File) ([]*LocalBlock, hcl.Diag Subject: attr.NameRange.Ptr(), Context: block.DefRange.Ptr(), }) - return nil, diags + return locals, diags } locals = append(locals, &LocalBlock{ Name: name, @@ -176,6 +186,7 @@ func (c *PackerConfig) parseLocalVariables(f *hcl.File) ([]*LocalBlock, hcl.Diag } } + c.LocalBlocks = locals return locals, diags } @@ -221,14 +232,14 @@ func (c *PackerConfig) evaluateLocalVariables(locals []*LocalBlock) hcl.Diagnost func (c *PackerConfig) evaluateLocalVariable(local *LocalBlock) hcl.Diagnostics { var diags hcl.Diagnostics - value, moreDiags := local.Expr.Value(c.EvalContext(nil)) diags = append(diags, moreDiags...) if moreDiags.HasErrors() { return diags } c.LocalVariables[local.Name] = &Variable{ - Name: local.Name, + Name: local.Name, + Sensitive: local.Sensitive, Values: []VariableAssignment{{ Value: value, Expr: local.Expr, diff --git a/hcl2template/types.packer_config_test.go b/hcl2template/types.packer_config_test.go index e3da5df404e..23f5ee686f9 100644 --- a/hcl2template/types.packer_config_test.go +++ b/hcl2template/types.packer_config_test.go @@ -104,6 +104,13 @@ func TestParser_complete(t *testing.T) { }), }), }, + "supersecret": &Variable{ + Name: "supersecret", + Values: []VariableAssignment{{From: "default", + Value: cty.StringVal("image-id-default-password")}}, + Type: cty.String, + Sensitive: true, + }, }, Datasources: Datasources{ DatasourceRef{Type: "amazon-ami", Name: "test"}: Datasource{ diff --git a/hcl2template/types.variables.go b/hcl2template/types.variables.go index 08dd4739386..217ead99804 100644 --- a/hcl2template/types.variables.go +++ b/hcl2template/types.variables.go @@ -24,6 +24,9 @@ const badIdentifierDetail = "A name must start with a letter or underscore and m type LocalBlock struct { Name string Expr hcl.Expression + // When Sensitive is set to true Packer will try its best to hide/obfuscate + // the variable from the output stream. By replacing the text. + Sensitive bool } // VariableAssignment represents a way a variable was set: the expression @@ -246,6 +249,56 @@ var variableBlockSchema = &hcl.BodySchema{ }, } +var localBlockSchema = &hcl.BodySchema{ + Attributes: []hcl.AttributeSchema{ + { + Name: "expression", + }, + { + Name: "sensitive", + }, + }, +} + +func decodeLocalBlock(block *hcl.Block, locals []*LocalBlock) (*LocalBlock, hcl.Diagnostics) { + name := block.Labels[0] + for _, loc := range locals { + if loc.Name == name { + return nil, []*hcl.Diagnostic{{ + Severity: hcl.DiagError, + Summary: "Duplicate variable", + Detail: "Duplicate " + block.Labels[0] + " variable definition found.", + Context: block.DefRange.Ptr(), + }} + } + } + + content, diags := block.Body.Content(localBlockSchema) + if !hclsyntax.ValidIdentifier(name) { + diags = append(diags, &hcl.Diagnostic{ + Severity: hcl.DiagError, + Summary: "Invalid local name", + Detail: badIdentifierDetail, + Subject: &block.LabelRanges[0], + }) + } + + l := &LocalBlock{ + Name: name, + } + + if attr, exists := content.Attributes["sensitive"]; exists { + valDiags := gohcl.DecodeExpression(attr.Expr, nil, &l.Sensitive) + diags = append(diags, valDiags...) + } + + if def, ok := content.Attributes["expression"]; ok { + l.Expr = def.Expr + } + + return l, diags +} + // decodeVariableBlock decodes a "variable" block // ectx is passed only in the evaluation of the default value. func (variables *Variables) decodeVariableBlock(block *hcl.Block, ectx *hcl.EvalContext) hcl.Diagnostics { @@ -254,7 +307,6 @@ func (variables *Variables) decodeVariableBlock(block *hcl.Block, ectx *hcl.Eval } if _, found := (*variables)[block.Labels[0]]; found { - return []*hcl.Diagnostic{{ Severity: hcl.DiagError, Summary: "Duplicate variable", diff --git a/hcl2template/types.variables_test.go b/hcl2template/types.variables_test.go index a5a0d3b9360..5792967f7d6 100644 --- a/hcl2template/types.variables_test.go +++ b/hcl2template/types.variables_test.go @@ -89,6 +89,15 @@ func TestParse_variables(t *testing.T) { }}, Type: cty.String, }, + "supersecret": &Variable{ + Name: "supersecret", + Values: []VariableAssignment{{ + From: "default", + Value: cty.StringVal("secretvar"), + }}, + Type: cty.String, + Sensitive: true, + }, }, }, false, false, @@ -135,6 +144,28 @@ func TestParse_variables(t *testing.T) { []packersdk.Build{}, false, }, + {"duplicate local block", + defaultParser, + parseTestArgs{"testdata/variables/duplicate_locals", nil, nil}, + &PackerConfig{ + Basedir: "testdata/variables/duplicate_locals", + LocalVariables: Variables{ + "sensible": &Variable{ + Values: []VariableAssignment{ + { + From: "default", + Value: cty.StringVal("something"), + }, + }, + Type: cty.String, + Name: "sensible", + }, + }, + }, + true, true, + []packersdk.Build{}, + false, + }, {"invalid default type", defaultParser, parseTestArgs{"testdata/variables/invalid_default.pkr.hcl", nil, nil}, diff --git a/website/content/docs/templates/hcl_templates/locals.mdx b/website/content/docs/templates/hcl_templates/locals.mdx index b601e917456..5766ae37c53 100644 --- a/website/content/docs/templates/hcl_templates/locals.mdx +++ b/website/content/docs/templates/hcl_templates/locals.mdx @@ -21,9 +21,17 @@ Guide_](/guides/hcl/variables). ## Examples -Local values are defined in `locals` blocks: +Local values are defined in `local` or `locals` blocks: ```hcl +# Using the local block allows you to mark locals as sensitive, which will +# filter their values from logs. +local "mylocal" { + expression = "${var.secret_api_key}" + sensitive = true +} + +# Using the locals block is more compact and efficient for declaring many locals # Ids for multiple sets of EC2 instances, merged together locals { instance_ids = "${concat(aws_instance.blue.*.id, aws_instance.green.*.id)}" @@ -72,6 +80,12 @@ source "amazon-ebs" "server" { ## Description +The `local` block defines exactly one local variable within a folder. The block +label is the name of the local, and the "expression" is the expression that +should be evaluated to create the local. Using this block, you can optionally +supply a "sensitive" boolean to mark the variable as sensitive and filter it +from logs. + The `locals` block defines one or more local variables within a folder. The names given for the items in the `locals` block must be unique throughout a diff --git a/website/content/partials/from-1.5/locals/example-block.mdx b/website/content/partials/from-1.5/locals/example-block.mdx index 59ea917774a..ed151e746ca 100644 --- a/website/content/partials/from-1.5/locals/example-block.mdx +++ b/website/content/partials/from-1.5/locals/example-block.mdx @@ -6,4 +6,10 @@ locals { # locals can also be set with other variables : baz = "Foo is '${var.foo}' but not '${local.wee}'" } + +# Use the singular local block if you need to mark a local as sensitive +local "mylocal" { + expression = "${var.secret_api_key}" + sensitive = true +} ```