diff --git a/tf-vsphere-devrc.mk.example b/tf-vsphere-devrc.mk.example index 202ee847f..547f43200 100644 --- a/tf-vsphere-devrc.mk.example +++ b/tf-vsphere-devrc.mk.example @@ -38,5 +38,7 @@ export VSPHERE_DC_FOLDER ?= dc-folder # DC resource test folder export VSPHERE_ESXI_HOST ?= esxi1 # ESXi host to work with export VSPHERE_HOST_NIC0 ?= vmnic0 # NIC0 for host net tests export VSPHERE_HOST_NIC1 ?= vmnic1 # NIC1 for host net tests +export VSPHERE_VMFS_EXPECTED ?= scsi-name # Name of expected SCSI disk +export VSPHERE_VMFS_REGEXP ?= expr # Regexp for SCSI disk search # vi: filetype=make diff --git a/vsphere/data_source_vsphere_vmfs_disks.go b/vsphere/data_source_vsphere_vmfs_disks.go new file mode 100644 index 000000000..fbe6c5106 --- /dev/null +++ b/vsphere/data_source_vsphere_vmfs_disks.go @@ -0,0 +1,89 @@ +package vsphere + +import ( + "context" + "fmt" + "regexp" + "sort" + "time" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/helper/validation" + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +func dataSourceVSphereVmfsDisks() *schema.Resource { + return &schema.Resource{ + Read: dataSourceVSphereVmfsDisksRead, + + Schema: map[string]*schema.Schema{ + "host_system_id": &schema.Schema{ + Type: schema.TypeString, + Description: "The managed object ID of the host to search for disks on.", + Required: true, + }, + "rescan": &schema.Schema{ + Type: schema.TypeBool, + Description: "Rescan the system for disks before querying. This may lengthen the time it takes to gather information.", + Optional: true, + }, + "filter": &schema.Schema{ + Type: schema.TypeString, + Description: "A regular expression to filter the disks against. Only disks with canonical names that match will be included.", + Optional: true, + ValidateFunc: validation.ValidateRegexp, + }, + "disks": &schema.Schema{ + Type: schema.TypeList, + Description: "The names of the disks discovered by the search.", + Computed: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + }, + } +} + +func dataSourceVSphereVmfsDisksRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*govmomi.Client) + hsID := d.Get("host_system_id").(string) + ss, err := hostStorageSystemFromHostSystemID(client, hsID) + if err != nil { + return fmt.Errorf("error loading host storage system: %s", err) + } + + if d.Get("rescan").(bool) { + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + if err := ss.RescanAllHba(ctx); err != nil { + return err + } + } + + var hss mo.HostStorageSystem + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + if err := ss.Properties(ctx, ss.Reference(), nil, &hss); err != nil { + return fmt.Errorf("error querying storage system properties: %s", err) + } + + d.SetId(time.Now().UTC().String()) + + var disks []string + for _, sl := range hss.StorageDeviceInfo.ScsiLun { + if hsd, ok := sl.(*types.HostScsiDisk); ok { + if matched, _ := regexp.MatchString(d.Get("filter").(string), hsd.CanonicalName); matched { + disks = append(disks, hsd.CanonicalName) + } + } + } + + sort.Strings(disks) + + if err := d.Set("disks", disks); err != nil { + return fmt.Errorf("error saving results to state: %s", err) + } + + return nil +} diff --git a/vsphere/data_source_vsphere_vmfs_disks_test.go b/vsphere/data_source_vsphere_vmfs_disks_test.go new file mode 100644 index 000000000..6ae1ea26e --- /dev/null +++ b/vsphere/data_source_vsphere_vmfs_disks_test.go @@ -0,0 +1,123 @@ +package vsphere + +import ( + "fmt" + "os" + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDataSourceVSphereVmfsDisks(t *testing.T) { + var tp *testing.T + testAccDataSourceVSphereVmfsDisksCases := []struct { + name string + testCase resource.TestCase + }{ + { + "basic", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccDataSourceVSphereVmfsDisksPreCheck(tp) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceVSphereVmfsDisksConfig(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckOutput("found", "true"), + ), + }, + }, + }, + }, + { + "with regular expression", + resource.TestCase{ + PreCheck: func() { + testAccPreCheck(tp) + testAccDataSourceVSphereVmfsDisksPreCheck(tp) + }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + { + Config: testAccDataSourceVSphereVmfsDisksConfigRegexp(), + Check: resource.ComposeTestCheckFunc( + resource.TestCheckOutput("expected_length", "true"), + ), + }, + }, + }, + }, + } + + for _, tc := range testAccDataSourceVSphereVmfsDisksCases { + t.Run(tc.name, func(t *testing.T) { + tp = t + resource.Test(t, tc.testCase) + }) + } +} + +func testAccDataSourceVSphereVmfsDisksPreCheck(t *testing.T) { + if os.Getenv("VSPHERE_ESXI_HOST") == "" { + t.Skip("set VSPHERE_ESXI_HOST to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_VMFS_EXPECTED") == "" { + t.Skip("set VSPHERE_VMFS_EXPECTED to run vsphere_vmfs_disks acceptance tests") + } + if os.Getenv("VSPHERE_VMFS_REGEXP") == "" { + t.Skip("set VSPHERE_VMFS_REGEXP to run vsphere_vmfs_disks acceptance tests") + } +} + +func testAccDataSourceVSphereVmfsDisksConfig() string { + return fmt.Sprintf(` +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +data "vsphere_vmfs_disks" "available" { + host_system_id = "${data.vsphere_host.esxi_host.id}" + rescan = true +} + +output "found" { + value = "${contains(data.vsphere_vmfs_disks.available.disks, "%s")}" +} +`, os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST"), os.Getenv("VSPHERE_VMFS_EXPECTED")) +} + +func testAccDataSourceVSphereVmfsDisksConfigRegexp() string { + return fmt.Sprintf(` +variable "regexp" { + type = "string" + default = "%s" +} + +data "vsphere_datacenter" "datacenter" { + name = "%s" +} + +data "vsphere_host" "esxi_host" { + name = "%s" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +data "vsphere_vmfs_disks" "available" { + host_system_id = "${data.vsphere_host.esxi_host.id}" + rescan = true + filter = "${var.regexp}" +} + +output "expected_length" { + value = "${length(data.vsphere_vmfs_disks.available.disks) == 2 ? "true" : "false" }" +} +`, os.Getenv("VSPHERE_VMFS_REGEXP"), os.Getenv("VSPHERE_DATACENTER"), os.Getenv("VSPHERE_ESXI_HOST")) +} diff --git a/vsphere/host_storage_system_helper.go b/vsphere/host_storage_system_helper.go new file mode 100644 index 000000000..de2d6a5d9 --- /dev/null +++ b/vsphere/host_storage_system_helper.go @@ -0,0 +1,20 @@ +package vsphere + +import ( + "context" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/object" +) + +// hostStorageSystemFromHostSystemID locates a HostStorageSystem from a +// specified HostSystem managed object ID. +func hostStorageSystemFromHostSystemID(client *govmomi.Client, hsID string) (*object.HostStorageSystem, error) { + hs, err := hostSystemFromID(client, hsID) + if err != nil { + return nil, err + } + ctx, cancel := context.WithTimeout(context.Background(), defaultAPITimeout) + defer cancel() + return hs.ConfigManager().StorageSystem(ctx) +} diff --git a/vsphere/provider.go b/vsphere/provider.go index 5534fb932..c2787ae4c 100644 --- a/vsphere/provider.go +++ b/vsphere/provider.go @@ -82,6 +82,7 @@ func Provider() terraform.ResourceProvider { DataSourcesMap: map[string]*schema.Resource{ "vsphere_datacenter": dataSourceVSphereDatacenter(), "vsphere_host": dataSourceVSphereHost(), + "vsphere_vmfs_disks": dataSourceVSphereVmfsDisks(), }, ConfigureFunc: providerConfigure, diff --git a/website/docs/d/vmfs_disks.html.markdown b/website/docs/d/vmfs_disks.html.markdown new file mode 100644 index 000000000..72858d900 --- /dev/null +++ b/website/docs/d/vmfs_disks.html.markdown @@ -0,0 +1,56 @@ +--- +layout: "vsphere" +page_title: "VMware vSphere: vsphere_vmfs_disks" +sidebar_current: "docs-vsphere-data-source-vmfs-disks" +description: |- + A data source that can be used to discover storage devices that can be used for VMFS datastores. +--- + +# vsphere\_vmfs\_disks + +The `vsphere_vmfs_disks` data source can be used to discover the storage +devices available on an ESXi host. This data source can be combined with the +[`vsphere_vmfs_datastore`][data-source-vmfs-datastore] resource to create VMFS +datastores based off a set of discovered disks. + +[data-source-vmfs-datastore]: /docs/providers/vsphere/r/vmfs_datastore.html + +## Example Usage + +```hcl +data "vsphere_datacenter" "datacenter" { + name = "dc1" +} + +data "vsphere_host" "host" { + name = "esxi1" + datacenter_id = "${data.vsphere_datacenter.datacenter.id}" +} + +data "vsphere_vmfs_disks" "available" { + host_system_id = "${data.vsphere_host.host.id}" + rescan = true + filter = "mpx.vmhba1:C0:T[12]:L0" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `host_system_id` - (String, required) The managed object ID of the host to + look for disks on. +* `rescan` - (Boolean, optional) Whether or not to rescan storage adapters + before searching for disks. This may lengthen the time it takes to perform + the search. Default: `false`. +* `filter` - (String, optional) A regular expression to filter the disks + against. Only disks with canonical names that match will be included. + +~> **NOTE:** Using a `filter` is recommended if there is any chance the host +will have any specific storage devices added to it that may affect the order of +the output `disks` attribute below, which is lexicographically sorted. + +## Attribute Reference + +* `disks` - (List of strings) A lexicographically sorted list of devices + discovered by the operation, matching the supplied `filter`, if provided. diff --git a/website/vsphere.erb b/website/vsphere.erb index cf3fc801e..40332b69c 100644 --- a/website/vsphere.erb +++ b/website/vsphere.erb @@ -19,6 +19,9 @@