diff --git a/README.md b/README.md
index 91410a14..06d0bc04 100644
--- a/README.md
+++ b/README.md
@@ -78,10 +78,21 @@ kubectl apply -f https://raw.githubusercontent.com/vmware/cloud-director-named-d
| Storage Type | Independent Shareable Named Disks of VCD |
|Provisioning|
- Static Provisioning
- Dynamic Provisioning
|
|Access Modes|- ReadOnlyMany
- ReadWriteOnce
|
-|Volume|Block|
+|Volume||
|VolumeMode||
|Topology|- Static Provisioning: reuses VCD topology capabilities
- Dynamic Provisioning: places disk in the OVDC of the `ClusterAdminUser` based on the StorageProfile specified.
|
+## Supported Bus Types
+| Bus Type | `bustype` Parameter in `StorageClass` |
+| :--------- | :----------------------- |
+| Paravirtual (SCSI) | `scsi_paravirtual` (default) |
+| LSI Logic Parallel (SCSI) | `scsi_lsi_logic_parallel` |
+| LSI Logic SAS (SCSI) | `scsi_lsi_logic_sas` |
+| Bus Logic (SCSI) | `scsi_buslogic` |
+| SATA | `sata` |
+| NVME | `nvme` |
+
+
## Contributing
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for instructions on how to contribute.
diff --git a/artifacts/default-csi-node-crs-airgap.yaml.template b/artifacts/default-csi-node-crs-airgap.yaml.template
index ca233e2f..9fcead9f 100644
--- a/artifacts/default-csi-node-crs-airgap.yaml.template
+++ b/artifacts/default-csi-node-crs-airgap.yaml.template
@@ -116,6 +116,9 @@ spec:
mountPath: /etc/kubernetes/vcloud
- name: vcloud-basic-auth-volume
mountPath: /etc/kubernetes/vcloud/basic-auth
+ - mountPath: /run
+ name: host-run-dir
+ mountPropagation: "HostToContainer"
volumes:
- name: socket-dir
hostPath:
@@ -147,4 +150,7 @@ spec:
- name: vcloud-basic-auth-volume
secret:
secretName: vcloud-basic-auth
----
+ - name: host-run-dir
+ hostPath:
+ path: /run
+ type: Directory
diff --git a/go.mod b/go.mod
index 04c17b0d..d0abead4 100644
--- a/go.mod
+++ b/go.mod
@@ -23,6 +23,7 @@ require (
)
require (
+ github.com/jaypipes/ghw v0.10.0
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.19.0
k8s.io/api v0.22.1
@@ -30,12 +31,15 @@ require (
)
require (
+ github.com/StackExchange/wmi v1.2.1 // indirect
github.com/antihax/optional v1.0.0 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/araddon/dateparse v0.0.0-20190622164848-0fb0a474d195 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
+ github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-logr/logr v0.4.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
@@ -45,13 +49,16 @@ require (
github.com/hashicorp/go-version v1.2.0 // indirect
github.com/imdario/mergo v0.3.5 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
+ github.com/jaypipes/pcidb v1.0.0 // indirect
github.com/json-iterator/go v1.1.11 // indirect
github.com/kr/pretty v0.2.1 // indirect
github.com/kr/text v0.2.0 // indirect
+ github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.1 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/peterhellberg/link v1.1.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sirupsen/logrus v1.8.1 // indirect
golang.org/x/net v0.7.0 // indirect
@@ -63,6 +70,7 @@ require (
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ howett.net/plist v1.0.0 // indirect
k8s.io/client-go v0.22.1 // indirect
k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
diff --git a/go.sum b/go.sum
index 181fd69f..77b738cc 100644
--- a/go.sum
+++ b/go.sum
@@ -44,9 +44,12 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
+github.com/StackExchange/wmi v1.2.1 h1:VIkavFPXSjcnS+O8yTq7NI32k0R5Aj+v39y29VYDOSA=
+github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8=
github.com/akutz/gofsutil v0.1.2 h1:aCdWrZdxajx8kllNQSKaMDpRJWSE2wcyKNy7eDMXkrI=
github.com/akutz/gofsutil v0.1.2/go.mod h1:09JEF8dR0bTTZMQ1m3/+O1rqQyH2lG1ET34POnpzyxw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -72,6 +75,8 @@ github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kB
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@@ -80,12 +85,17 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
github.com/container-storage-interface/spec v1.4.0 h1:ozAshSKxpJnYUfmkpZCTYyF/4MYeYlhdXbAvPvfGmkg=
github.com/container-storage-interface/spec v1.4.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4=
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
@@ -96,8 +106,11 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
+github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
+github.com/emicklei/go-restful v2.16.0+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
@@ -113,6 +126,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
+github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -126,15 +141,23 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8=
github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
+github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
+github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
+github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
+github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
@@ -165,6 +188,7 @@ github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QD
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -192,6 +216,7 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -206,6 +231,7 @@ github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORR
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
@@ -233,11 +259,19 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
+github.com/jaypipes/ghw v0.10.0 h1:UHu9UX08Py315iPojADFPOkmjTsNzHj4g4adsNKKteY=
+github.com/jaypipes/ghw v0.10.0/go.mod h1:jeJGbkRB2lL3/gxYzNYzEDETV1ZJ56OKr+CSeSEym+g=
+github.com/jaypipes/pcidb v1.0.0 h1:vtZIfkiCUE42oYbJS0TAq9XSfSmcsgo9IdxSm9qzYU8=
+github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLRCuNDfk=
+github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -249,6 +283,7 @@ github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfV
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q=
+github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -265,6 +300,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
+github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
@@ -272,6 +308,7 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
+github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
@@ -287,6 +324,7 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
+github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
@@ -300,14 +338,18 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
github.com/onsi/ginkgo/v2 v2.1.3 h1:e/3Cwtogj0HA+25nMP1jCMDIf8RtRYbGwGGuBIFztkc=
+github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
github.com/onsi/gomega v1.19.0 h1:4ieX6qQjPP/BfC3mpsAtIGGlxTWPeA3Inl/7DtXw1tw=
github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro=
+github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
@@ -315,15 +357,18 @@ github.com/peterhellberg/link v1.1.0 h1:s2+RH8EGuI/mI4QwrWGSYQCRz7uNgip9BaM04HKu
github.com/peterhellberg/link v1.1.0/go.mod h1:gtSlOT4jmkY8P47hbTc8PTgiDDWpdPbFYl75keYyBB8=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
+github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -349,19 +394,23 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
+github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.1.3 h1:xghbfqPkxzxP3C/f3n5DdpAbdKLj4ZE4BWQI362l53M=
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
+github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -380,10 +429,7 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/thecodeteam/gofsutil v0.1.2 h1:FL87mBzZeeuDMZm8hpYLFcYylQdq6bbm8UQ1oc6VRMM=
github.com/thecodeteam/gofsutil v0.1.2/go.mod h1:7bDOpr2aMnmdm9RTdxBEeqdOr+8RpnQhsB/VUEI3DgM=
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
-github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230316164812-df9d4ddc9292 h1:Uh7EE1UjpBBfyKQEiPMgXEP3rpcojsUDvMyzEkGUxXA=
-github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230316164812-df9d4ddc9292/go.mod h1:B7LWK1eIP4gYf5grYWNCwMuMkaGqXJ3BJj+ZNe1Gbpw=
-github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230417202714-aa24a76e6138 h1:CPmJ54U6rFzedOAXgYAeZqTJrtga9I8+1spBghS1mrQ=
-github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230417202714-aa24a76e6138/go.mod h1:B7LWK1eIP4gYf5grYWNCwMuMkaGqXJ3BJj+ZNe1Gbpw=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230420174123-f285168de9e5 h1:Ech3c2JxElUoXy3Rj77NivBZT8+7uB+bcyRSwgLLfL0=
github.com/vmware/cloud-provider-for-cloud-director v0.0.0-20230420174123-f285168de9e5/go.mod h1:B7LWK1eIP4gYf5grYWNCwMuMkaGqXJ3BJj+ZNe1Gbpw=
github.com/vmware/go-vcloud-director/v2 v2.14.0-rc.3 h1:VJolXzgomaRPrgzSr0EduuUtJIJEf5RdoLbktZFQqIc=
@@ -394,13 +440,23 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
@@ -413,6 +469,7 @@ go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16g
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
@@ -426,6 +483,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -458,6 +517,7 @@ golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzB
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -493,8 +553,14 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
+golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@@ -515,6 +581,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -532,6 +599,7 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -556,21 +624,29 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -580,15 +656,18 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
@@ -605,6 +684,7 @@ golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -635,6 +715,7 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -684,6 +765,7 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -737,9 +819,12 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
+gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
+gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v1 v1.0.0-20140924161607-9f9df34309c0/go.mod h1:WDnlLJ4WF5VGsH/HVa3CI79GS0ol3YnhVnKP89i0kNg=
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@@ -764,14 +849,19 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
+howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM=
+howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g=
k8s.io/api v0.22.1 h1:ISu3tD/jRhYfSW8jI/Q1e+lRxkR7w9UwQEZ7FgslrwY=
k8s.io/api v0.22.1/go.mod h1:bh13rkTp3F1XEaLGykbyRD2QaTTzPm0e/BMd8ptFONY=
k8s.io/apimachinery v0.22.1 h1:DTARnyzmdHMz7bFWFDDm22AM4pLWTQECMpRTFu2d2OM=
k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apiserver v0.22.1/go.mod h1:2mcM6dzSt+XndzVQJX21Gx0/Klo7Aen7i0Ai6tIa400=
k8s.io/client-go v0.22.1 h1:jW0ZSHi8wW260FvcXHkIa0NLxFBQszTlhiAVsU5mopw=
k8s.io/client-go v0.22.1/go.mod h1:BquC5A4UOo4qVDUtoc04/+Nxp1MeHcVc1HJm1KmG8kk=
+k8s.io/cloud-provider v0.22.1/go.mod h1:Dm3xJ4j3l88rZ0LBCRLrt7V9Pz0avRAzZSU6ENwYnrw=
k8s.io/component-base v0.22.1 h1:SFqIXsEN3v3Kkr1bS6rstrs1wd45StJqbtgbQ4nRQdo=
k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo=
+k8s.io/controller-manager v0.22.1/go.mod h1:HN5qzvZs8A4fd/xuqDZwqe+Nsz249a2Kbq/YqZ903n8=
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
@@ -785,6 +875,7 @@ k8s.io/utils v0.0.0-20210707171843-4b05e18ac7d9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
diff --git a/manifests/csi-node-crs.yaml b/manifests/csi-node-crs.yaml
index f9fef431..a16434c2 100644
--- a/manifests/csi-node-crs.yaml
+++ b/manifests/csi-node-crs.yaml
@@ -116,6 +116,9 @@ spec:
mountPath: /etc/kubernetes/vcloud
- name: vcloud-basic-auth-volume
mountPath: /etc/kubernetes/vcloud/basic-auth
+ - mountPath: /run
+ name: host-run-dir
+ mountPropagation: "HostToContainer"
volumes:
- name: socket-dir
hostPath:
@@ -147,4 +150,7 @@ spec:
- name: vcloud-basic-auth-volume
secret:
secretName: vcloud-basic-auth
----
+ - name: host-run-dir
+ hostPath:
+ path: /run
+ type: Directory
diff --git a/manifests/csi-node-crs.yaml.template b/manifests/csi-node-crs.yaml.template
index a02017db..c6421fee 100644
--- a/manifests/csi-node-crs.yaml.template
+++ b/manifests/csi-node-crs.yaml.template
@@ -116,6 +116,9 @@ spec:
mountPath: /etc/kubernetes/vcloud
- name: vcloud-basic-auth-volume
mountPath: /etc/kubernetes/vcloud/basic-auth
+ - mountPath: /run
+ name: host-run-dir
+ mountPropagation: "HostToContainer"
volumes:
- name: socket-dir
hostPath:
@@ -147,4 +150,7 @@ spec:
- name: vcloud-basic-auth-volume
secret:
secretName: vcloud-basic-auth
----
+ - name: host-run-dir
+ hostPath:
+ path: /run
+ type: Directory
diff --git a/manifests/csi-node.yaml b/manifests/csi-node.yaml
index acd10441..541a7671 100644
--- a/manifests/csi-node.yaml
+++ b/manifests/csi-node.yaml
@@ -111,6 +111,9 @@ spec:
mountPath: /etc/kubernetes/vcloud
- name: vcloud-basic-auth-volume
mountPath: /etc/kubernetes/vcloud/basic-auth
+ - mountPath: /run
+ name: host-run-dir
+ mountPropagation: "HostToContainer"
volumes:
- name: socket-dir
hostPath:
@@ -142,4 +145,7 @@ spec:
- name: vcloud-basic-auth-volume
secret:
secretName: vcloud-basic-auth
----
+ - name: host-run-dir
+ hostPath:
+ path: /run
+ type: Directory
diff --git a/manifests/csi-node.yaml.template b/manifests/csi-node.yaml.template
index 1a7b2021..40a7d39d 100644
--- a/manifests/csi-node.yaml.template
+++ b/manifests/csi-node.yaml.template
@@ -111,6 +111,9 @@ spec:
mountPath: /etc/kubernetes/vcloud
- name: vcloud-basic-auth-volume
mountPath: /etc/kubernetes/vcloud/basic-auth
+ - mountPath: /run
+ name: host-run-dir
+ mountPropagation: "HostToContainer"
volumes:
- name: socket-dir
hostPath:
@@ -142,4 +145,7 @@ spec:
- name: vcloud-basic-auth-volume
secret:
secretName: vcloud-basic-auth
----
+ - name: host-run-dir
+ hostPath:
+ path: /run
+ type: Directory
diff --git a/pkg/csi/controller.go b/pkg/csi/controller.go
index 27dc1239..8391adb1 100644
--- a/pkg/csi/controller.go
+++ b/pkg/csi/controller.go
@@ -8,6 +8,8 @@ package csi
import (
"context"
"fmt"
+ "math"
+
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdcsiclient"
@@ -16,7 +18,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"k8s.io/klog"
- "math"
)
const (
@@ -27,7 +28,6 @@ const (
const (
BusTypeParameter = "busType"
- BusSubTypeParameter = "busSubType"
StorageProfileParameter = "storageProfile"
FileSystemParameter = "filesystem"
EphemeralVolumeContext = "csi.storage.k8s.io/ephemeral"
@@ -36,15 +36,8 @@ const (
VMFullNameAttribute = "vmID"
DiskUUIDAttribute = "diskUUID"
FileSystemAttribute = "filesystem"
-)
-
-var (
- // BusTypesFromValues is a map of different possible BusTypes from id to string
- BusTypesFromValues = map[string]string{
- "5": "IDE",
- "6": "SCSI",
- "20": "SATA",
- }
+ DefaultFSType = "ext4"
+ DefaultBusType = "scsi_paravirtual"
)
type controllerServer struct {
@@ -97,7 +90,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context,
}
volumeCapabilities := req.GetVolumeCapabilities()
- if volumeCapabilities == nil || len(volumeCapabilities) == 0 {
+ if len(volumeCapabilities) == 0 { // should omit nil check; len() for []*github.com/container-storage-interface/spec/lib/go/csi.VolumeCapability is defined as zero (S1009)
return nil, status.Error(codes.InvalidArgument, "CreateVolume: VolumeCapabilities should be provided")
}
for _, volumeCapability := range volumeCapabilities {
@@ -117,39 +110,51 @@ func (cs *controllerServer) CreateVolume(ctx context.Context,
klog.Infof("CreateVolume: requesting volume [%s] with size [%d] MiB, shareable [%v]",
diskName, sizeMB, shareable)
- busType := vcdcsiclient.VCDBusTypeSCSI
- busSubType := vcdcsiclient.VCDBusSubTypeVirtualSCSI
+ var (
+ storageProfile string
+ fsType string
+ busType string
+ tuple vcdcsiclient.BusTuple
+ ok bool
+ )
- storageProfile, _ := req.Parameters[StorageProfileParameter]
+ if busType, ok = req.Parameters[BusTypeParameter]; !ok {
+ busType = DefaultBusType
+ klog.Infof("No parameter [%s] specified for raw disk [%s]. Hence defaulting to [%s].", BusTypeParameter, diskName, DefaultBusType)
+ }
- disk, err := cs.DiskManager.CreateDisk(diskName, sizeMB, busType,
- busSubType, cs.DiskManager.ClusterID, storageProfile, shareable)
+ tuple, ok = vcdcsiclient.BusTypesSet[busType]
+ if !ok {
+ return nil, fmt.Errorf("invalid busType: [%s]", busType)
+ }
+ if storageProfile, ok = req.Parameters[StorageProfileParameter]; !ok {
+ klog.Infof("No parameter [%s] specified for raw disk [%s]. ", StorageProfileParameter, diskName)
+ }
+
+ if fsType, ok = req.Parameters[FileSystemParameter]; !ok {
+ fsType = DefaultFSType
+ klog.Infof("No parameter [%s] specified for raw disk [%s]. Hence defaulting to [%s].", FileSystemParameter, diskName, DefaultFSType)
+ }
+
+ disk, err := cs.DiskManager.CreateDisk(diskName, sizeMB, tuple.BusType, tuple.BusSubType, "", storageProfile, shareable)
if err != nil {
if rdeErr := cs.DiskManager.AddToErrorSet(util.DiskCreateError, "", diskName, map[string]interface{}{"Detailed Error": err.Error()}); rdeErr != nil {
klog.Errorf("unable to add error [%s] into [CSI.Errors] in RDE [%s], %v", util.DiskCreateError, cs.DiskManager.ClusterID, rdeErr)
}
- return nil, fmt.Errorf("unable to create disk [%s] with sise [%d]MB: [%v]",
- diskName, sizeMB, err)
+ return nil, fmt.Errorf("unable to create disk [%s] with size [%d]MB: [%v]", diskName, sizeMB, err)
}
if removeErrorRdeErr := cs.DiskManager.RemoveFromErrorSet(util.DiskCreateError, "", diskName); removeErrorRdeErr != nil {
klog.Errorf("unable to remove error [%s] from [CSI.Errors] in RDE [%s]", util.DiskCreateError, cs.DiskManager.ClusterID)
}
klog.Infof("Successfully created disk [%s] of size [%d]MB", diskName, sizeMB)
- attributes := make(map[string]string)
- attributes[BusTypeParameter] = BusTypesFromValues[disk.BusType]
- attributes[BusSubTypeParameter] = disk.BusSubType
- attributes[StorageProfileParameter] = disk.StorageProfile.Name
- attributes[DiskIDAttribute] = disk.Id
-
- fsType := ""
- ok := false
- if fsType, ok = req.Parameters[FileSystemParameter]; !ok {
- fsType = "ext4"
- klog.Infof("No FS specified for raw disk [%s]. Hence defaulting to [%s].", diskName, fsType)
+ attributes := map[string]string{
+ BusTypeParameter: vcdcsiclient.BusSubTypesFromValues[disk.BusSubType], // BusSubType defines better the busType since SATA and NVME share the same ID=20
+ StorageProfileParameter: disk.StorageProfile.Name,
+ DiskIDAttribute: disk.Id,
+ FileSystemParameter: fsType,
}
- attributes[FileSystemParameter] = fsType
resp := &csi.CreateVolumeResponse{
Volume: &csi.Volume{
diff --git a/pkg/csi/node.go b/pkg/csi/node.go
index a2356b57..f91865a2 100644
--- a/pkg/csi/node.go
+++ b/pkg/csi/node.go
@@ -8,15 +8,16 @@ package csi
import (
"context"
"fmt"
- "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
"os"
- "os/exec"
"path/filepath"
"regexp"
"strings"
+ "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
+
"github.com/akutz/gofsutil"
"github.com/container-storage-interface/spec/lib/go/csi"
+ "github.com/jaypipes/ghw"
"golang.org/x/sys/unix"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -29,7 +30,7 @@ const (
// is pre-allocated for the HBA. Hence we have only 15 disks.
maxVolumesPerNode = 15
- DevDiskPath = "/dev/disk/by-path"
+ DevDiskPath = "/dev/"
ScsiHostPath = "/sys/class/scsi_host"
HostNameRegexPattern = "^host[0-9]+"
)
@@ -474,73 +475,23 @@ func (ns *nodeService) rescanDiskInVM(ctx context.Context) error {
return nil
}
-// getDiskPath looks for a device corresponding to vmName:diskName as stored in vSphere. It
-// enumerates devices in /dev/disk/by-path and returns a device with UUID matching the scsi UUID.
-// It needs disk.enableUUID to be set for the VM.
-func (ns *nodeService) getDiskPath(ctx context.Context, vmFullName string, diskUUID string) (string, error) {
-
- if diskUUID == "" {
- return "", fmt.Errorf("diskUUID should not be an empty string")
+// getDiskPath looks for a disk corresponding to vmName:diskName as stored in vSphere.
+// Scan disk drives and returns the disk with the matching UUID.
+// It needs disk.enableUUID to be set for the VM. Also /run must be propagated from Host VM.
+func (ns *nodeService) getDiskPath(ctx context.Context, vmFullName, diskUUID string) (string, error) {
+ block, err := ghw.Block()
+ if err != nil {
+ return "", fmt.Errorf("error getting block storage info: %w", err)
}
-
hexDiskUUID := strings.ReplaceAll(diskUUID, "-", "")
-
- guestDiskPath := ""
- err := filepath.Walk(DevDiskPath, func(path string, fi os.FileInfo, err error) error {
- if err != nil {
- return nil
- }
- if guestDiskPath != "" {
- return nil
- }
- if fi.IsDir() {
- return nil
+ for _, disk := range block.Disks {
+ if disk.SerialNumber == hexDiskUUID {
+ klog.Infof("Obtained matching disk [%s%s] with [%s] controller\n", DevDiskPath, disk.Name, disk.StorageController)
+ return DevDiskPath + disk.Name, nil
}
-
- fileToProcess := path
- if fi.Mode()&os.ModeSymlink != 0 {
- dst, err := filepath.EvalSymlinks(path)
- if err != nil {
- klog.Infof("Error accessing file [%s]: [%v]", path, err)
- return nil
- }
- fileToProcess = dst
- }
- if fileToProcess == "" {
- return nil
- }
-
- klog.Infof("Checking file: [%s] => [%s]\n", path, fileToProcess)
- outBytes, err := exec.Command(
- "/lib/udev/scsi_id",
- "--page=0x83",
- "--whitelisted",
- fmt.Sprintf("--device=%v", fileToProcess)).CombinedOutput()
- if err != nil {
- klog.Infof("Encountered error while processing file [%s]: [%v]", fileToProcess, err)
- klog.Infof("Please check if the `disk.enableUUID` parameter is set to 1 for the VM in VC config.")
- return nil
- }
- out := strings.TrimSpace(string(outBytes))
- if len(out) == 33 {
- out = out[1:]
- } else if len(out) != 32 {
- klog.Infof("Obtained uuid with incorrect length: [%s]", out)
- return nil
- }
-
- if strings.ToLower(out) == strings.ToLower(hexDiskUUID) {
- guestDiskPath = fileToProcess
- }
-
- return nil
- })
- if err != nil {
- return "", fmt.Errorf("could not create filepath.Walk for [%s]: [%v]", DevDiskPath, err)
}
- klog.Infof("Obtained matching disk [%s]", guestDiskPath)
- return guestDiskPath, nil
+ return "", nil
}
func (ns *nodeService) isVolumeReadOnly(capability *csi.VolumeCapability) bool {
diff --git a/pkg/vcdcsiclient/disks.go b/pkg/vcdcsiclient/disks.go
index 3e0dcc49..0c6463b5 100644
--- a/pkg/vcdcsiclient/disks.go
+++ b/pkg/vcdcsiclient/disks.go
@@ -9,6 +9,10 @@ import (
"context"
"encoding/json"
"fmt"
+ "net/http"
+ "strings"
+ "time"
+
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdtypes"
"github.com/vmware/cloud-director-named-disk-csi-driver/version"
@@ -17,9 +21,6 @@ import (
"github.com/vmware/go-vcloud-director/v2/govcd"
"github.com/vmware/go-vcloud-director/v2/types/v56"
"k8s.io/klog"
- "net/http"
- "strings"
- "time"
)
type DiskManager struct {
@@ -28,9 +29,35 @@ type DiskManager struct {
}
const (
- VCDBusTypeSCSI = "6"
- VCDBusSubTypeVirtualSCSI = "VirtualSCSI"
- NoRdePrefix = `NO_RDE_`
+ NoRdePrefix = `NO_RDE_`
+)
+
+type BusTuple struct {
+ BusType, BusSubType string
+}
+
+var (
+ // BusTypesSet matches all the types of BusTypes in the StorageClass to a pair BusType/SubBusType
+ BusTypesSet = map[string]BusTuple{
+ "sata": {"20", "vmware.sata.ahci"},
+ "nvme": {"20", "vmware.nvme.controller"},
+ "scsi_paravirtual": {"6", "VirtualSCSI"},
+ "scsi_lsi_logic_parallel": {"6", "lsilogic"},
+ "scsi_lsi_logic_sas": {"6", "lsilogicsas"},
+ "scsi_buslogic": {"6", "buslogic"},
+ }
+
+ /* BusSubTypesFromValues resolves BusTypes from BusSubType ID.
+ It defines better the busType since BusSubType ID are unique.
+ */
+ BusSubTypesFromValues = map[string]string{
+ "vmware.sata.ahci": "sata",
+ "vmware.nvme.controller": "nvme",
+ "lsilogic": "lsi logic parallel (scsi)",
+ "lsilogicsas": "lsi logic sas (scsi)",
+ "VirtualSCSI": "paravirtual (scsi)",
+ "buslogic": "bus logic (scsi)",
+ }
)
// Returns a Disk structure as JSON
diff --git a/pkg/vcdcsiclient/disks_system_test.go b/pkg/vcdcsiclient/disks_system_test.go
index 528fe202..094549e6 100644
--- a/pkg/vcdcsiclient/disks_system_test.go
+++ b/pkg/vcdcsiclient/disks_system_test.go
@@ -9,16 +9,18 @@ import (
"context"
"encoding/json"
"fmt"
- "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
- "github.com/vmware/cloud-provider-for-cloud-director/pkg/vcdsdk"
- swaggerClient "github.com/vmware/cloud-provider-for-cloud-director/pkg/vcdswaggerclient"
- "gopkg.in/yaml.v3"
"io/ioutil"
"net/http"
"path/filepath"
"testing"
"github.com/google/uuid"
+ "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
+ "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdtypes"
+ "github.com/vmware/cloud-provider-for-cloud-director/pkg/vcdsdk"
+ swaggerClient "github.com/vmware/cloud-provider-for-cloud-director/pkg/vcdswaggerclient"
+ "gopkg.in/yaml.v3"
+
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -74,6 +76,7 @@ func TestUpdateRDE(t *testing.T) {
require.NotNil(t, vcdClient, "VCD DiskManager should not be nil")
diskManager.UpgradeRDEPersistentVolumes()
}
+
func TestDiskCreateAttach(t *testing.T) {
diskManager := new(DiskManager)
@@ -89,104 +92,156 @@ func TestDiskCreateAttach(t *testing.T) {
cloudConfig, err := getTestConfig()
assert.NoError(t, err, "There should be no error opening and parsing cloud config file contents.")
- // get client
- vcdClient, err := getTestVCDClient(cloudConfig, map[string]interface{}{
- "getVdcClient": true,
- "user": authDetails.Username,
- "secret": authDetails.Password,
- "userOrg": authDetails.UserOrg,
- })
- diskManager.VCDClient = vcdClient
- vAppName := cloudConfig.VCD.VAppName
- diskManager.ClusterID = cloudConfig.ClusterID
- assert.NoError(t, err, "Unable to get VCD client")
- require.NotNil(t, vcdClient, "VCD DiskManager should not be nil")
-
- _, err = vcdClient.VDC.FindStorageProfileReference("dev")
- assert.Errorf(t, err, "unable to find storage profile reference")
-
- _, err = vcdClient.VDC.FindStorageProfileReference("*")
- assert.NoErrorf(t, err, "unable to find storage profile reference")
-
- // create disk with bad storage profile: should not succeed
- diskName := fmt.Sprintf("test-pvc-%s", uuid.New().String())
- disk, err := diskManager.CreateDisk(diskName, 100, VCDBusTypeSCSI, VCDBusSubTypeVirtualSCSI,
- "", "dev", true)
- assert.Errorf(t, err, "should not be able to create disk with storage profile [dev]")
- assert.Nil(t, disk, "disk created should be nil")
-
- // create disk
- diskName = fmt.Sprintf("test-pvc-%s", uuid.New().String())
- // diskName = "test-pvc-29830aa7-377e-4496-b87d-41f2e50a5491"
- disk, err = diskManager.CreateDisk(diskName, 100, VCDBusTypeSCSI, VCDBusSubTypeVirtualSCSI,
- "", "*", true)
- assert.NoErrorf(t, err, "unable to create disk with name [%s]", diskName)
- require.NotNil(t, disk, "disk created should not be nil")
- assert.NotNil(t, disk.UUID, "disk UUID should not be nil")
-
- // try to create same disk with same parameters: should succeed
- disk, err = diskManager.CreateDisk(diskName, 100, VCDBusTypeSCSI, VCDBusSubTypeVirtualSCSI,
- "", "*", true)
- assert.NoError(t, err, "unable to create disk again with name [%s]", diskName)
- require.NotNil(t, disk, "disk created should not be nil")
-
- // Check RDE was updated with PV
- clusterOrg, err := diskManager.VCDClient.VCDClient.GetOrgByName(diskManager.VCDClient.ClusterOrgName)
- assert.NoError(t, err, "unable to get org by name [%s]", diskManager.VCDClient.ClusterOrgName)
- assert.NotNil(t, clusterOrg, "retrieved org is nil for org name [%s]", diskManager.VCDClient.ClusterOrgName)
- assert.NotNil(t, clusterOrg.Org, "retrieved org is nil for org name [%s]", diskManager.VCDClient.ClusterOrgName)
-
- defEnt, _, _, err := diskManager.VCDClient.APIClient.DefinedEntityApi.GetDefinedEntity(context.TODO(),
- diskManager.ClusterID, clusterOrg.Org.ID)
- assert.NoError(t, err, "unable to get RDE")
-
- currRDEPvs, err := GetCAPVCDRDEPersistentVolumes(&defEnt)
- assert.NoError(t, err, "unable to get RDE PVs after creating disk")
- assert.Equal(t, true, foundStringInSlice(disk.Name, currRDEPvs), "Disk Id should be found in RDE")
-
- // try to create same disk with different parameters; should not succeed
- disk1, err := diskManager.CreateDisk(diskName, 1000, VCDBusTypeSCSI, VCDBusSubTypeVirtualSCSI,
- "", "", true)
- assert.Error(t, err, "should not be able to create same disk with different parameters")
- assert.Nil(t, disk1, "disk should not be created")
-
- // get VM nodeID should be the existing VM name
- nodeID := "capi-cluster-2-md0-85c8585c96-8bqj2"
-
- vdcManager, err := vcdsdk.NewVDCManager(diskManager.VCDClient, diskManager.VCDClient.ClusterOrgName, diskManager.VCDClient.ClusterOVDCName)
- assert.NoError(t, err, "unable to get vdcManager")
- // Todo find a suitable way to handle cluster
- vm, err := vdcManager.FindVMByName(vAppName, nodeID)
- require.NoError(t, err, "unable to find VM [%s] by name", nodeID)
- require.NotNil(t, vm, "vm should not be nil")
-
- // attach to VM
- err = diskManager.AttachVolume(vm, disk)
- assert.NoError(t, err, "unable to attach disk [%s] to vm [%#v]", disk.Name, vm)
-
- attachedVMs, err := diskManager.govcdAttachedVM(disk)
- assert.NoError(t, err, "unable to get VMs attached to disk [%#v]", disk)
- assert.NotNil(t, attachedVMs, "VM [%s] should be returned", nodeID)
- assert.EqualValues(t, len(attachedVMs), 1, "[%d] VM(s) should be returned", 1)
- assert.EqualValues(t, attachedVMs[0].Name, nodeID, "VM Name should be [%s]", nodeID)
-
- err = diskManager.DetachVolume(vm, disk.Name)
- assert.NoError(t, err, "unable to detach disk [%s] from vm [%#v]", disk.Name, vm)
-
- attachedVMs, err = diskManager.govcdAttachedVM(disk)
- assert.NoError(t, err, "unable to get VMs attached to disk [%#v]", disk)
- assert.Nil(t, attachedVMs, "no VM should be returned", nodeID)
-
- err = diskManager.DeleteDisk(diskName)
- assert.NoError(t, err, "unable to delete disk [%s]", disk.Name)
-
- // Check PV was removed from RDE
- defEnt, _, _, err = diskManager.VCDClient.APIClient.DefinedEntityApi.GetDefinedEntity(context.TODO(),
- diskManager.ClusterID, clusterOrg.Org.ID)
- assert.NoError(t, err, "unable to get RDE")
- currRDEPvs, err = GetCAPVCDRDEPersistentVolumes(&defEnt)
- assert.NoError(t, err, "unable to get RDE PVs after deleting disk")
- assert.False(t, foundStringInSlice(disk.Name, currRDEPvs), "Disk Id should not be found in RDE")
+ type args struct {
+ storageProfile string
+ nodeID string
+ busTuple BusTuple
+ }
+ tests := []struct {
+ name string
+ args args
+ want *vcdtypes.Disk
+ wantErr bool
+ }{
+ {
+ name: "SATA",
+ args: args{
+ storageProfile: "*",
+ nodeID: "capi-cluster-2-md0-85c8585c96-8bqj2",
+ busTuple: BusTypesSet["sata"],
+ },
+ },
+ {
+ name: "Paravirtual(SCSI)",
+ args: args{
+ storageProfile: "*",
+ nodeID: "capi-cluster-2-md0-85c8585c96-8bqj2",
+ busTuple: BusTypesSet["scsi_paravirtual"],
+ },
+ },
+ {
+ name: "NVME",
+ args: args{
+ storageProfile: "*",
+ nodeID: "capi-cluster-2-md0-85c8585c96-8bqj2",
+ busTuple: BusTypesSet["nvme"],
+ },
+ },
+ {
+ name: "LSI Logic Parallel (SCSI)",
+ args: args{
+ storageProfile: "*",
+ nodeID: "capi-cluster-2-md0-85c8585c96-8bqj2",
+ busTuple: BusTypesSet["scsi_lsi_logic_parallel"],
+ },
+ },
+ {
+ name: "LSI Logic SAS (SCSI)",
+ args: args{
+ storageProfile: "*",
+ nodeID: "capi-cluster-2-md0-85c8585c96-8bqj2",
+ busTuple: BusTypesSet["scsi_lsi_logic_sas"],
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ // get client
+ vcdClient, err := getTestVCDClient(cloudConfig, map[string]interface{}{
+ "getVdcClient": true,
+ "user": authDetails.Username,
+ "secret": authDetails.Password,
+ "userOrg": authDetails.UserOrg,
+ })
+ diskManager.VCDClient = vcdClient
+ vAppName := cloudConfig.VCD.VAppName
+ diskManager.ClusterID = cloudConfig.ClusterID
+ assert.NoError(t, err, "Unable to get VCD client")
+ require.NotNil(t, vcdClient, "VCD DiskManager should not be nil")
+
+ _, err = vcdClient.VDC.FindStorageProfileReference("dev")
+ assert.Errorf(t, err, "unable to find storage profile reference")
+
+ _, err = vcdClient.VDC.FindStorageProfileReference(tt.args.storageProfile)
+ assert.NoErrorf(t, err, "unable to find storage profile reference")
+
+ diskName := fmt.Sprintf("test-pvc-%s", uuid.New().String())
+
+ // create disk with bad storage profile: should not succeed
+ disk, err := diskManager.CreateDisk(diskName, 100, tt.args.busTuple.BusType, tt.args.busTuple.BusSubType,
+ "", "dev", true)
+ assert.Errorf(t, err, "should not be able to create disk with storage profile [dev]")
+ assert.Nil(t, disk, "disk created should be nil")
+
+ // create disk
+ diskName = fmt.Sprintf("test-pvc-%s", uuid.New().String())
+
+ disk, err = diskManager.CreateDisk(diskName, 100, tt.args.busTuple.BusType, tt.args.busTuple.BusSubType, "", tt.args.storageProfile, true)
+ assert.NoErrorf(t, err, "unable to create disk with name [%s]", diskName)
+ require.NotNil(t, disk, "disk created should not be nil")
+ assert.NotNil(t, disk.UUID, "disk UUID should not be nil")
+
+ // try to create same disk with same parameters: should succeed
+ disk, err = diskManager.CreateDisk(diskName, 100, tt.args.busTuple.BusType, tt.args.busTuple.BusSubType, "", tt.args.storageProfile, true)
+ assert.NoError(t, err, "unable to create disk again with name [%s]", diskName)
+ require.NotNil(t, disk, "disk created should not be nil")
+
+ // Check RDE was updated with PV
+ clusterOrg, err := diskManager.VCDClient.VCDClient.GetOrgByName(diskManager.VCDClient.ClusterOrgName)
+ assert.NoError(t, err, "unable to get org by name [%s]", diskManager.VCDClient.ClusterOrgName)
+ assert.NotNil(t, clusterOrg, "retrieved org is nil for org name [%s]", diskManager.VCDClient.ClusterOrgName)
+ assert.NotNil(t, clusterOrg.Org, "retrieved org is nil for org name [%s]", diskManager.VCDClient.ClusterOrgName)
+
+ defEnt, _, _, err := diskManager.VCDClient.APIClient.DefinedEntityApi.GetDefinedEntity(context.TODO(),
+ diskManager.ClusterID, clusterOrg.Org.ID)
+ assert.NoError(t, err, "unable to get RDE")
+
+ currRDEPvs, err := GetCAPVCDRDEPersistentVolumes(&defEnt)
+ assert.NoError(t, err, "unable to get RDE PVs after creating disk")
+ assert.Equal(t, true, foundStringInSlice(disk.Name, currRDEPvs), "Disk Id should be found in RDE")
+
+ // try to create same disk with different parameters; should not succeed
+ disk1, err := diskManager.CreateDisk(diskName, 1000, tt.args.busTuple.BusType, tt.args.busTuple.BusSubType, "", "", true)
+ assert.Error(t, err, "should not be able to create same disk with different parameters")
+ assert.Nil(t, disk1, "disk should not be created")
+
+ // get VM nodeID should be the existing VM name
+ vdcManager, err := vcdsdk.NewVDCManager(diskManager.VCDClient, diskManager.VCDClient.ClusterOrgName, diskManager.VCDClient.ClusterOVDCName)
+ assert.NoError(t, err, "unable to get vdcManager")
+ // Todo find a suitable way to handle cluster
+ vm, err := vdcManager.FindVMByName(vAppName, tt.args.nodeID)
+ require.NoError(t, err, "unable to find VM [%s] by name", tt.args.nodeID)
+ require.NotNil(t, vm, "vm should not be nil")
+
+ // attach to VM
+ err = diskManager.AttachVolume(vm, disk)
+ assert.NoError(t, err, "unable to attach disk [%s] to vm [%#v]", disk.Name, vm)
+
+ attachedVMs, err := diskManager.govcdAttachedVM(disk)
+ assert.NoError(t, err, "unable to get VMs attached to disk [%#v]", disk)
+ assert.NotNil(t, attachedVMs, "VM [%s] should be returned", tt.args.nodeID)
+ assert.EqualValues(t, len(attachedVMs), 1, "[%d] VM(s) should be returned", 1)
+ assert.EqualValues(t, attachedVMs[0].Name, tt.args.nodeID, "VM Name should be [%s]", tt.args.nodeID)
+
+ err = diskManager.DetachVolume(vm, disk.Name)
+ assert.NoError(t, err, "unable to detach disk [%s] from vm [%#v]", disk.Name, vm)
+
+ attachedVMs, err = diskManager.govcdAttachedVM(disk)
+ assert.NoError(t, err, "unable to get VMs attached to disk [%#v]", disk)
+ assert.Nil(t, attachedVMs, "no VM should be returned", tt.args.nodeID)
+
+ err = diskManager.DeleteDisk(diskName)
+ assert.NoError(t, err, "unable to delete disk [%s]", disk.Name)
+
+ // Check PV was removed from RDE
+ defEnt, _, _, err = diskManager.VCDClient.APIClient.DefinedEntityApi.GetDefinedEntity(context.TODO(),
+ diskManager.ClusterID, clusterOrg.Org.ID)
+ assert.NoError(t, err, "unable to get RDE")
+ currRDEPvs, err = GetCAPVCDRDEPersistentVolumes(&defEnt)
+ assert.NoError(t, err, "unable to get RDE PVs after deleting disk")
+ assert.False(t, foundStringInSlice(disk.Name, currRDEPvs), "Disk Id should not be found in RDE")
+ })
+ }
}
func GetCAPVCDRDEPersistentVolumes(rde *swaggerClient.DefinedEntity) ([]string, error) {
diff --git a/samples/additional-busTypes.yaml b/samples/additional-busTypes.yaml
new file mode 100644
index 00000000..aa6809c9
--- /dev/null
+++ b/samples/additional-busTypes.yaml
@@ -0,0 +1,48 @@
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "false"
+ name: vcd-disk-sata-dev
+provisioner: named-disk.csi.cloud-director.vmware.com
+reclaimPolicy: Delete
+parameters:
+ storageProfile: "dev"
+ busType: "sata"
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "false"
+ name: vcd-disk-nvme-dev
+provisioner: named-disk.csi.cloud-director.vmware.com
+reclaimPolicy: Delete
+parameters:
+ storageProfile: "dev"
+ busType: "nvme"
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "false"
+ name: vcd-disk-scsi-lsi-logic-parallel-dev
+provisioner: named-disk.csi.cloud-director.vmware.com
+reclaimPolicy: Delete
+parameters:
+ storageProfile: "dev"
+ busType: "scsi_lsi_logic_parallel"
+---
+kind: StorageClass
+apiVersion: storage.k8s.io/v1
+metadata:
+ annotations:
+ storageclass.kubernetes.io/is-default-class: "false"
+ name: vcd-disk-scsi-lsi-logic-parallel-dev
+provisioner: named-disk.csi.cloud-director.vmware.com
+reclaimPolicy: Delete
+parameters:
+ storageProfile: "dev"
+ busType: "scsi_lsi_logic_sas"
\ No newline at end of file
diff --git a/samples/my-pvc-ext4.yaml b/samples/my-pvc-ext4.yaml
index cdd62fb5..0a3df0db 100644
--- a/samples/my-pvc-ext4.yaml
+++ b/samples/my-pvc-ext4.yaml
@@ -22,5 +22,5 @@ provisioner: named-disk.csi.cloud-director.vmware.com
reclaimPolicy: Delete
parameters:
storageProfile: "dev"
+ # busType defaults to scsi_paravirtual
filesystem: "ext4"
----
diff --git a/tests/e2e/dynamic_provisioning_test.go b/tests/e2e/dynamic_provisioning_test.go
index 8ddd63df..2e8ff10b 100644
--- a/tests/e2e/dynamic_provisioning_test.go
+++ b/tests/e2e/dynamic_provisioning_test.go
@@ -3,6 +3,7 @@ package e2e
import (
"context"
"fmt"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdtypes"
@@ -13,15 +14,14 @@ import (
)
const (
- testNameSpaceName = "provisioning-test-ns"
- testRetainPVCName = "test-retain-pvc"
- testDeletePVCName = "test-delete-pvc"
- testDeploymentName = "test-deploy-name"
- storageClassDelete = "delete-storage-class"
- storageClassRetain = "retain-storage-class"
- storageSize = "2Gi"
- defaultStorageProfile = "*"
- volumeName = "deployment-pv"
+ testNameSpaceName = "provisioning-test-ns"
+ testRetainPVCName = "test-retain-pvc"
+ testDeletePVCName = "test-delete-pvc"
+ testDeploymentName = "test-deploy-name"
+ storageClassDelete = "delete-storage-class"
+ storageClassRetain = "retain-storage-class"
+ storageSize = "2Gi"
+ volumeName = "deployment-pv"
)
var _ = Describe("CSI dynamic provisioning Test", func() {
@@ -33,14 +33,13 @@ var _ = Describe("CSI dynamic provisioning Test", func() {
pv *apiv1.PersistentVolume
pvDeleted bool
)
-
tc, err = testingsdk.NewTestClient(&testingsdk.VCDAuthParams{
Host: host,
OvdcName: ovdc,
OrgName: org,
Username: userName,
RefreshToken: refreshToken,
- UserOrg: "system",
+ UserOrg: userOrg,
GetVdcClient: true,
}, rdeId)
Expect(err).NotTo(HaveOccurred())
@@ -53,10 +52,10 @@ var _ = Describe("CSI dynamic provisioning Test", func() {
ns, err := tc.CreateNameSpace(ctx, testNameSpaceName)
Expect(err).NotTo(HaveOccurred())
Expect(ns).NotTo(BeNil())
- retainStorageClass, err := tc.CreateStorageClass(ctx, storageClassRetain, apiv1.PersistentVolumeReclaimRetain, defaultStorageProfile)
+ retainStorageClass, err := tc.CreateStorageClass(ctx, storageClassRetain, apiv1.PersistentVolumeReclaimRetain, storageProfile, busType)
Expect(err).NotTo(HaveOccurred())
Expect(retainStorageClass).NotTo(BeNil())
- deleteStorageClass, err := tc.CreateStorageClass(ctx, storageClassDelete, apiv1.PersistentVolumeReclaimDelete, defaultStorageProfile)
+ deleteStorageClass, err := tc.CreateStorageClass(ctx, storageClassDelete, apiv1.PersistentVolumeReclaimDelete, storageProfile, busType)
Expect(err).NotTo(HaveOccurred())
Expect(deleteStorageClass).NotTo(BeNil())
})
@@ -74,6 +73,7 @@ var _ = Describe("CSI dynamic provisioning Test", func() {
By("PVC should be presented in kubernetes")
pvc, err = tc.GetPVC(ctx, testNameSpaceName, testRetainPVCName)
+ Expect(err).NotTo(HaveOccurred())
dynamicPVName = pvc.Spec.VolumeName
Expect(dynamicPVName).NotTo(BeEmpty())
diff --git a/tests/e2e/e2e_suite_test.go b/tests/e2e/e2e_suite_test.go
index df6406d7..b7be0917 100644
--- a/tests/e2e/e2e_suite_test.go
+++ b/tests/e2e/e2e_suite_test.go
@@ -2,18 +2,22 @@ package e2e
import (
"flag"
+ "testing"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "testing"
)
var (
- rdeId string
- host string
- org string
- ovdc string
- userName string
- refreshToken string
+ rdeId string
+ host string
+ org string
+ ovdc string
+ userName string
+ refreshToken string
+ busType string
+ storageProfile string
+ userOrg string
)
func init() {
@@ -24,6 +28,9 @@ func init() {
flag.StringVar(&userName, "userName", "", "Username for login to generate client")
flag.StringVar(&refreshToken, "refreshToken", "", "Refresh token of user to generate client")
flag.StringVar(&rdeId, "rdeId", "", "Cluster ID to fetch cluster RDE")
+ flag.StringVar(&busType, "busType", "scsi_paravirtual", "busType of the Disks")
+ flag.StringVar(&storageProfile, "storageProfile", "*", "storageProfile")
+ flag.StringVar(&userOrg, "userOrg", "system", "UserOrg to generate client")
}
var _ = BeforeSuite(func() {
diff --git a/tests/e2e/static_provisioning_test.go b/tests/e2e/static_provisioning_test.go
index 373172ec..deac5c37 100644
--- a/tests/e2e/static_provisioning_test.go
+++ b/tests/e2e/static_provisioning_test.go
@@ -2,8 +2,10 @@ package e2e
import (
"context"
+
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ csiClient "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdcsiclient"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdtypes"
"github.com/vmware/cloud-director-named-disk-csi-driver/tests/utils"
"github.com/vmware/cloud-provider-for-cloud-director/pkg/testingsdk"
@@ -31,7 +33,7 @@ var _ = Describe("CSI static provisioning Test", func() {
OrgName: org,
Username: userName,
RefreshToken: refreshToken,
- UserOrg: "system",
+ UserOrg: userOrg,
GetVdcClient: true,
}, rdeId)
Expect(err).NotTo(HaveOccurred())
@@ -44,10 +46,10 @@ var _ = Describe("CSI static provisioning Test", func() {
ns, err := tc.CreateNameSpace(ctx, testNameSpaceName)
Expect(err).NotTo(HaveOccurred())
Expect(ns).NotTo(BeNil())
- retainStorageClass, err := tc.CreateStorageClass(ctx, storageClassRetain, apiv1.PersistentVolumeReclaimRetain, defaultStorageProfile)
+ retainStorageClass, err := tc.CreateStorageClass(ctx, storageClassRetain, apiv1.PersistentVolumeReclaimRetain, storageProfile, busType)
Expect(err).NotTo(HaveOccurred())
Expect(retainStorageClass).NotTo(BeNil())
- deleteStorageClass, err := tc.CreateStorageClass(ctx, storageClassDelete, apiv1.PersistentVolumeReclaimDelete, defaultStorageProfile)
+ deleteStorageClass, err := tc.CreateStorageClass(ctx, storageClassDelete, apiv1.PersistentVolumeReclaimDelete, storageProfile, busType)
Expect(err).NotTo(HaveOccurred())
Expect(deleteStorageClass).NotTo(BeNil())
})
@@ -55,11 +57,11 @@ var _ = Describe("CSI static provisioning Test", func() {
//scenario 1: use 'Delete' retention policy. step1: create VCD named-disk and PV.
It("should create a disk using VCD API calls and set up a PV based on the disk", func() {
By("should create the disk successfully from VCD")
- err = utils.CreateDisk(tc.VcdClient, testDiskName, smallDiskSizeMB, defaultStorageProfile)
+ err = utils.CreateDisk(tc.VcdClient, testDiskName, smallDiskSizeMB, csiClient.BusTypesSet[busType], storageProfile)
Expect(err).NotTo(HaveOccurred())
By("should create the static PV successfully in kubernetes")
- pv, err = tc.CreatePV(ctx, testDiskName, storageClassDelete, defaultStorageProfile, storageSize, apiv1.PersistentVolumeReclaimDelete)
+ pv, err = tc.CreatePV(ctx, testDiskName, storageClassDelete, storageProfile, storageSize, busType, apiv1.PersistentVolumeReclaimDelete)
Expect(err).NotTo(HaveOccurred())
Expect(pv).NotTo(BeNil())
@@ -131,11 +133,11 @@ var _ = Describe("CSI static provisioning Test", func() {
//scenario 2: use 'Retain' retention policy. step1: create VCD named-disk and PV.
It("Create a disk using VCD API calls and Set up a PV based on the disk using retain policy", func() {
By("should create the disk successfully from VCD")
- err := utils.CreateDisk(tc.VcdClient, testDiskName, smallDiskSizeMB, defaultStorageProfile)
+ err := utils.CreateDisk(tc.VcdClient, testDiskName, smallDiskSizeMB, csiClient.BusTypesSet[busType], storageProfile)
Expect(err).NotTo(HaveOccurred())
By("should create the static PV successfully in kubernetes")
- pv, err = tc.CreatePV(ctx, testDiskName, storageClassRetain, defaultStorageProfile, storageSize, apiv1.PersistentVolumeReclaimRetain)
+ pv, err = tc.CreatePV(ctx, testDiskName, storageClassRetain, storageProfile, storageSize, busType, apiv1.PersistentVolumeReclaimRetain)
Expect(err).NotTo(HaveOccurred())
Expect(pv).NotTo(BeNil())
diff --git a/tests/e2e/storageProfile_test.go b/tests/e2e/storageProfile_test.go
index 5d539e8e..4ae6a3e5 100644
--- a/tests/e2e/storageProfile_test.go
+++ b/tests/e2e/storageProfile_test.go
@@ -3,6 +3,7 @@ package e2e
import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
+ csiClient "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdcsiclient"
"github.com/vmware/cloud-director-named-disk-csi-driver/tests/utils"
"github.com/vmware/cloud-provider-for-cloud-director/pkg/testingsdk"
"github.com/vmware/go-vcloud-director/v2/govcd"
@@ -31,7 +32,7 @@ var _ = Describe("CSI Storage Profile Test", func() {
OrgName: org,
Username: userName,
RefreshToken: refreshToken,
- UserOrg: "system",
+ UserOrg: userOrg,
GetVdcClient: true,
}, rdeId)
Expect(err).NotTo(HaveOccurred())
@@ -80,7 +81,7 @@ var _ = Describe("CSI Storage Profile Test", func() {
It("should failed to create a PV with a size higher than quota", func() {
By("creating a disk with a size higher than quota")
- err = utils.CreateDisk(tc.VcdClient, staticDisk, largeDiskSizeMB, storageProfileWithLimit)
+ err = utils.CreateDisk(tc.VcdClient, staticDisk, largeDiskSizeMB, csiClient.BusTypesSet[busType], storageProfileWithLimit)
Expect(err).To(HaveOccurred())
By("verifying the error as being a storage limit error")
diff --git a/tests/utils/disk_util.go b/tests/utils/disk_util.go
index 3195495e..f065fca4 100644
--- a/tests/utils/disk_util.go
+++ b/tests/utils/disk_util.go
@@ -3,6 +3,10 @@ package utils
import (
"context"
"fmt"
+ "net/http"
+ "strings"
+ "time"
+
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/util"
csiClient "github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdcsiclient"
"github.com/vmware/cloud-director-named-disk-csi-driver/pkg/vcdtypes"
@@ -11,9 +15,6 @@ import (
"github.com/vmware/go-vcloud-director/v2/govcd"
"github.com/vmware/go-vcloud-director/v2/types/v56"
"k8s.io/apimachinery/pkg/util/wait"
- "net/http"
- "strings"
- "time"
)
type CSItc struct {
@@ -24,8 +25,9 @@ const (
defaultRetryInterval = 10 * time.Second
defaultRetryTimeout = 60 * time.Second
defaultWaitTimeout = 120 * time.Second
+ deleteDiskTimeout = 180 * time.Second
+ deleteDiskInterval = 20 * time.Second
CSIVersion = "1.3.0"
- MaxVCDUpdateRetries = 10
)
func DeleteDisk(vcdClient *vcdsdk.Client, diskName string) error {
@@ -106,13 +108,16 @@ func ValidateNoAttachedVM(vcdClient *vcdsdk.Client, disk *vcdtypes.Disk) error {
return nil
}
-func CreateDisk(vcdClient *vcdsdk.Client, diskName string, diskSizeMB int64, storageProfileName string) error {
+func CreateDisk(vcdClient *vcdsdk.Client, diskName string, diskSizeMB int64, busTuple csiClient.BusTuple, storageProfileName string) error {
spRef, err := vcdClient.VDC.FindStorageProfileReference(storageProfileName)
+ if err != nil {
+ return fmt.Errorf("could not find storage profile %s %w", storageProfileName, err)
+ }
d := &vcdtypes.Disk{
Name: diskName,
SizeMb: diskSizeMB,
- BusType: csiClient.VCDBusTypeSCSI,
- BusSubType: csiClient.VCDBusSubTypeVirtualSCSI,
+ BusType: busTuple.BusType,
+ BusSubType: busTuple.BusSubType,
Description: "",
Shareable: false,
StorageProfile: &spRef,
@@ -178,7 +183,7 @@ func VerifyDiskViaVCD(vcdClient *vcdsdk.Client, diskName string) (*vcdtypes.Disk
}
func WaitDiskDeleteViaVCD(vcdClient *vcdsdk.Client, diskName string) error {
- err := wait.PollImmediate(30*time.Second, 150*time.Second, func() (bool, error) {
+ err := wait.PollImmediate(deleteDiskInterval, deleteDiskTimeout, func() (bool, error) {
_, err := GetDiskByNameViaVCD(vcdClient, diskName)
if err != nil {
if err == govcd.ErrorEntityNotFound {
@@ -199,7 +204,7 @@ func GetDiskByNameViaVCD(vcdClient *vcdsdk.Client, diskName string) (disk *vcdty
return nil, fmt.Errorf("error occurred while getting vcdClient VDC, [%v]", err)
}
- for i := 0; i < MaxVCDUpdateRetries; i++ {
+ for i := 0; i < vcdsdk.MaxRDEUpdateRetries; i++ {
var diskList []vcdtypes.Disk
err = vcdClient.VDC.Refresh()
if err != nil {
diff --git a/vendor/github.com/StackExchange/wmi/LICENSE b/vendor/github.com/StackExchange/wmi/LICENSE
new file mode 100644
index 00000000..ae80b672
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/LICENSE
@@ -0,0 +1,20 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Stack Exchange
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/StackExchange/wmi/README.md b/vendor/github.com/StackExchange/wmi/README.md
new file mode 100644
index 00000000..c4a432d6
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/README.md
@@ -0,0 +1,13 @@
+wmi
+===
+
+Package wmi provides a WQL interface to Windows WMI.
+
+Note: It interfaces with WMI on the local machine, therefore it only runs on Windows.
+
+---
+
+NOTE: This project is no longer being actively maintained. If you would like
+to become its new owner, please contact tlimoncelli at stack over flow dot com.
+
+---
diff --git a/vendor/github.com/StackExchange/wmi/swbemservices.go b/vendor/github.com/StackExchange/wmi/swbemservices.go
new file mode 100644
index 00000000..3ff87563
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/swbemservices.go
@@ -0,0 +1,260 @@
+// +build windows
+
+package wmi
+
+import (
+ "fmt"
+ "reflect"
+ "runtime"
+ "sync"
+
+ "github.com/go-ole/go-ole"
+ "github.com/go-ole/go-ole/oleutil"
+)
+
+// SWbemServices is used to access wmi. See https://msdn.microsoft.com/en-us/library/aa393719(v=vs.85).aspx
+type SWbemServices struct {
+ //TODO: track namespace. Not sure if we can re connect to a different namespace using the same instance
+ cWMIClient *Client //This could also be an embedded struct, but then we would need to branch on Client vs SWbemServices in the Query method
+ sWbemLocatorIUnknown *ole.IUnknown
+ sWbemLocatorIDispatch *ole.IDispatch
+ queries chan *queryRequest
+ closeError chan error
+ lQueryorClose sync.Mutex
+}
+
+type queryRequest struct {
+ query string
+ dst interface{}
+ args []interface{}
+ finished chan error
+}
+
+// InitializeSWbemServices will return a new SWbemServices object that can be used to query WMI
+func InitializeSWbemServices(c *Client, connectServerArgs ...interface{}) (*SWbemServices, error) {
+ //fmt.Println("InitializeSWbemServices: Starting")
+ //TODO: implement connectServerArgs as optional argument for init with connectServer call
+ s := new(SWbemServices)
+ s.cWMIClient = c
+ s.queries = make(chan *queryRequest)
+ initError := make(chan error)
+ go s.process(initError)
+
+ err, ok := <-initError
+ if ok {
+ return nil, err //Send error to caller
+ }
+ //fmt.Println("InitializeSWbemServices: Finished")
+ return s, nil
+}
+
+// Close will clear and release all of the SWbemServices resources
+func (s *SWbemServices) Close() error {
+ s.lQueryorClose.Lock()
+ if s == nil || s.sWbemLocatorIDispatch == nil {
+ s.lQueryorClose.Unlock()
+ return fmt.Errorf("SWbemServices is not Initialized")
+ }
+ if s.queries == nil {
+ s.lQueryorClose.Unlock()
+ return fmt.Errorf("SWbemServices has been closed")
+ }
+ //fmt.Println("Close: sending close request")
+ var result error
+ ce := make(chan error)
+ s.closeError = ce //Race condition if multiple callers to close. May need to lock here
+ close(s.queries) //Tell background to shut things down
+ s.lQueryorClose.Unlock()
+ err, ok := <-ce
+ if ok {
+ result = err
+ }
+ //fmt.Println("Close: finished")
+ return result
+}
+
+func (s *SWbemServices) process(initError chan error) {
+ //fmt.Println("process: starting background thread initialization")
+ //All OLE/WMI calls must happen on the same initialized thead, so lock this goroutine
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
+ if err != nil {
+ oleCode := err.(*ole.OleError).Code()
+ if oleCode != ole.S_OK && oleCode != S_FALSE {
+ initError <- fmt.Errorf("ole.CoInitializeEx error: %v", err)
+ return
+ }
+ }
+ defer ole.CoUninitialize()
+
+ unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
+ if err != nil {
+ initError <- fmt.Errorf("CreateObject SWbemLocator error: %v", err)
+ return
+ } else if unknown == nil {
+ initError <- ErrNilCreateObject
+ return
+ }
+ defer unknown.Release()
+ s.sWbemLocatorIUnknown = unknown
+
+ dispatch, err := s.sWbemLocatorIUnknown.QueryInterface(ole.IID_IDispatch)
+ if err != nil {
+ initError <- fmt.Errorf("SWbemLocator QueryInterface error: %v", err)
+ return
+ }
+ defer dispatch.Release()
+ s.sWbemLocatorIDispatch = dispatch
+
+ // we can't do the ConnectServer call outside the loop unless we find a way to track and re-init the connectServerArgs
+ //fmt.Println("process: initialized. closing initError")
+ close(initError)
+ //fmt.Println("process: waiting for queries")
+ for q := range s.queries {
+ //fmt.Printf("process: new query: len(query)=%d\n", len(q.query))
+ errQuery := s.queryBackground(q)
+ //fmt.Println("process: s.queryBackground finished")
+ if errQuery != nil {
+ q.finished <- errQuery
+ }
+ close(q.finished)
+ }
+ //fmt.Println("process: queries channel closed")
+ s.queries = nil //set channel to nil so we know it is closed
+ //TODO: I think the Release/Clear calls can panic if things are in a bad state.
+ //TODO: May need to recover from panics and send error to method caller instead.
+ close(s.closeError)
+}
+
+// Query runs the WQL query using a SWbemServices instance and appends the values to dst.
+//
+// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
+// the query must have the same name in dst. Supported types are all signed and
+// unsigned integers, time.Time, string, bool, or a pointer to one of those.
+// Array types are not supported.
+//
+// By default, the local machine and default namespace are used. These can be
+// changed using connectServerArgs. See
+// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
+func (s *SWbemServices) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
+ s.lQueryorClose.Lock()
+ if s == nil || s.sWbemLocatorIDispatch == nil {
+ s.lQueryorClose.Unlock()
+ return fmt.Errorf("SWbemServices is not Initialized")
+ }
+ if s.queries == nil {
+ s.lQueryorClose.Unlock()
+ return fmt.Errorf("SWbemServices has been closed")
+ }
+
+ //fmt.Println("Query: Sending query request")
+ qr := queryRequest{
+ query: query,
+ dst: dst,
+ args: connectServerArgs,
+ finished: make(chan error),
+ }
+ s.queries <- &qr
+ s.lQueryorClose.Unlock()
+ err, ok := <-qr.finished
+ if ok {
+ //fmt.Println("Query: Finished with error")
+ return err //Send error to caller
+ }
+ //fmt.Println("Query: Finished")
+ return nil
+}
+
+func (s *SWbemServices) queryBackground(q *queryRequest) error {
+ if s == nil || s.sWbemLocatorIDispatch == nil {
+ return fmt.Errorf("SWbemServices is not Initialized")
+ }
+ wmi := s.sWbemLocatorIDispatch //Should just rename in the code, but this will help as we break things apart
+ //fmt.Println("queryBackground: Starting")
+
+ dv := reflect.ValueOf(q.dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType := checkMultiArg(dv)
+ if mat == multiArgTypeInvalid {
+ return ErrInvalidEntityType
+ }
+
+ // service is a SWbemServices
+ serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", q.args...)
+ if err != nil {
+ return err
+ }
+ service := serviceRaw.ToIDispatch()
+ defer serviceRaw.Clear()
+
+ // result is a SWBemObjectSet
+ resultRaw, err := oleutil.CallMethod(service, "ExecQuery", q.query)
+ if err != nil {
+ return err
+ }
+ result := resultRaw.ToIDispatch()
+ defer resultRaw.Clear()
+
+ count, err := oleInt64(result, "Count")
+ if err != nil {
+ return err
+ }
+
+ enumProperty, err := result.GetProperty("_NewEnum")
+ if err != nil {
+ return err
+ }
+ defer enumProperty.Clear()
+
+ enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
+ if err != nil {
+ return err
+ }
+ if enum == nil {
+ return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
+ }
+ defer enum.Release()
+
+ // Initialize a slice with Count capacity
+ dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
+
+ var errFieldMismatch error
+ for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
+ if err != nil {
+ return err
+ }
+
+ err := func() error {
+ // item is a SWbemObject, but really a Win32_Process
+ item := itemRaw.ToIDispatch()
+ defer item.Release()
+
+ ev := reflect.New(elemType)
+ if err = s.cWMIClient.loadEntity(ev.Interface(), item); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ //fmt.Println("queryBackground: Finished")
+ return errFieldMismatch
+}
diff --git a/vendor/github.com/StackExchange/wmi/wmi.go b/vendor/github.com/StackExchange/wmi/wmi.go
new file mode 100644
index 00000000..b4bb4f09
--- /dev/null
+++ b/vendor/github.com/StackExchange/wmi/wmi.go
@@ -0,0 +1,590 @@
+// +build windows
+
+/*
+Package wmi provides a WQL interface for WMI on Windows.
+
+Example code to print names of running processes:
+
+ type Win32_Process struct {
+ Name string
+ }
+
+ func main() {
+ var dst []Win32_Process
+ q := wmi.CreateQuery(&dst, "")
+ err := wmi.Query(q, &dst)
+ if err != nil {
+ log.Fatal(err)
+ }
+ for i, v := range dst {
+ println(i, v.Name)
+ }
+ }
+
+*/
+package wmi
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/go-ole/go-ole"
+ "github.com/go-ole/go-ole/oleutil"
+)
+
+var l = log.New(os.Stdout, "", log.LstdFlags)
+
+var (
+ ErrInvalidEntityType = errors.New("wmi: invalid entity type")
+ // ErrNilCreateObject is the error returned if CreateObject returns nil even
+ // if the error was nil.
+ ErrNilCreateObject = errors.New("wmi: create object returned nil")
+ lock sync.Mutex
+)
+
+// S_FALSE is returned by CoInitializeEx if it was already called on this thread.
+const S_FALSE = 0x00000001
+
+// QueryNamespace invokes Query with the given namespace on the local machine.
+func QueryNamespace(query string, dst interface{}, namespace string) error {
+ return Query(query, dst, nil, namespace)
+}
+
+// Query runs the WQL query and appends the values to dst.
+//
+// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
+// the query must have the same name in dst. Supported types are all signed and
+// unsigned integers, time.Time, string, bool, or a pointer to one of those.
+// Array types are not supported.
+//
+// By default, the local machine and default namespace are used. These can be
+// changed using connectServerArgs. See
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
+//
+// Query is a wrapper around DefaultClient.Query.
+func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
+ if DefaultClient.SWbemServicesClient == nil {
+ return DefaultClient.Query(query, dst, connectServerArgs...)
+ }
+ return DefaultClient.SWbemServicesClient.Query(query, dst, connectServerArgs...)
+}
+
+// CallMethod calls a method named methodName on an instance of the class named
+// className, with the given params.
+//
+// CallMethod is a wrapper around DefaultClient.CallMethod.
+func CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
+ return DefaultClient.CallMethod(connectServerArgs, className, methodName, params)
+}
+
+// A Client is an WMI query client.
+//
+// Its zero value (DefaultClient) is a usable client.
+type Client struct {
+ // NonePtrZero specifies if nil values for fields which aren't pointers
+ // should be returned as the field types zero value.
+ //
+ // Setting this to true allows stucts without pointer fields to be used
+ // without the risk failure should a nil value returned from WMI.
+ NonePtrZero bool
+
+ // PtrNil specifies if nil values for pointer fields should be returned
+ // as nil.
+ //
+ // Setting this to true will set pointer fields to nil where WMI
+ // returned nil, otherwise the types zero value will be returned.
+ PtrNil bool
+
+ // AllowMissingFields specifies that struct fields not present in the
+ // query result should not result in an error.
+ //
+ // Setting this to true allows custom queries to be used with full
+ // struct definitions instead of having to define multiple structs.
+ AllowMissingFields bool
+
+ // SWbemServiceClient is an optional SWbemServices object that can be
+ // initialized and then reused across multiple queries. If it is null
+ // then the method will initialize a new temporary client each time.
+ SWbemServicesClient *SWbemServices
+}
+
+// DefaultClient is the default Client and is used by Query, QueryNamespace, and CallMethod.
+var DefaultClient = &Client{}
+
+// coinitService coinitializes WMI service. If no error is returned, a cleanup function
+// is returned which must be executed (usually deferred) to clean up allocated resources.
+func (c *Client) coinitService(connectServerArgs ...interface{}) (*ole.IDispatch, func(), error) {
+ var unknown *ole.IUnknown
+ var wmi *ole.IDispatch
+ var serviceRaw *ole.VARIANT
+
+ // be sure teardown happens in the reverse
+ // order from that which they were created
+ deferFn := func() {
+ if serviceRaw != nil {
+ serviceRaw.Clear()
+ }
+ if wmi != nil {
+ wmi.Release()
+ }
+ if unknown != nil {
+ unknown.Release()
+ }
+ ole.CoUninitialize()
+ }
+
+ // if we error'ed here, clean up immediately
+ var err error
+ defer func() {
+ if err != nil {
+ deferFn()
+ }
+ }()
+
+ err = ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
+ if err != nil {
+ oleCode := err.(*ole.OleError).Code()
+ if oleCode != ole.S_OK && oleCode != S_FALSE {
+ return nil, nil, err
+ }
+ }
+
+ unknown, err = oleutil.CreateObject("WbemScripting.SWbemLocator")
+ if err != nil {
+ return nil, nil, err
+ } else if unknown == nil {
+ return nil, nil, ErrNilCreateObject
+ }
+
+ wmi, err = unknown.QueryInterface(ole.IID_IDispatch)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ // service is a SWbemServices
+ serviceRaw, err = oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ return serviceRaw.ToIDispatch(), deferFn, nil
+}
+
+// CallMethod calls a WMI method named methodName on an instance
+// of the class named className. It passes in the arguments given
+// in params. Use connectServerArgs to customize the machine and
+// namespace; by default, the local machine and default namespace
+// are used. See
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
+func (c *Client) CallMethod(connectServerArgs []interface{}, className, methodName string, params []interface{}) (int32, error) {
+ service, cleanup, err := c.coinitService(connectServerArgs...)
+ if err != nil {
+ return 0, fmt.Errorf("coinit: %v", err)
+ }
+ defer cleanup()
+
+ // Get class
+ classRaw, err := oleutil.CallMethod(service, "Get", className)
+ if err != nil {
+ return 0, fmt.Errorf("CallMethod Get class %s: %v", className, err)
+ }
+ class := classRaw.ToIDispatch()
+ defer classRaw.Clear()
+
+ // Run method
+ resultRaw, err := oleutil.CallMethod(class, methodName, params...)
+ if err != nil {
+ return 0, fmt.Errorf("CallMethod %s.%s: %v", className, methodName, err)
+ }
+ resultInt, ok := resultRaw.Value().(int32)
+ if !ok {
+ return 0, fmt.Errorf("return value was not an int32: %v (%T)", resultRaw, resultRaw)
+ }
+
+ return resultInt, nil
+}
+
+// Query runs the WQL query and appends the values to dst.
+//
+// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
+// the query must have the same name in dst. Supported types are all signed and
+// unsigned integers, time.Time, string, bool, or a pointer to one of those.
+// Array types are not supported.
+//
+// By default, the local machine and default namespace are used. These can be
+// changed using connectServerArgs. See
+// https://docs.microsoft.com/en-us/windows/desktop/WmiSdk/swbemlocator-connectserver
+// for details.
+func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
+ dv := reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType := checkMultiArg(dv)
+ if mat == multiArgTypeInvalid {
+ return ErrInvalidEntityType
+ }
+
+ lock.Lock()
+ defer lock.Unlock()
+ runtime.LockOSThread()
+ defer runtime.UnlockOSThread()
+
+ service, cleanup, err := c.coinitService(connectServerArgs...)
+ if err != nil {
+ return err
+ }
+ defer cleanup()
+
+ // result is a SWBemObjectSet
+ resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
+ if err != nil {
+ return err
+ }
+ result := resultRaw.ToIDispatch()
+ defer resultRaw.Clear()
+
+ count, err := oleInt64(result, "Count")
+ if err != nil {
+ return err
+ }
+
+ enumProperty, err := result.GetProperty("_NewEnum")
+ if err != nil {
+ return err
+ }
+ defer enumProperty.Clear()
+
+ enum, err := enumProperty.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
+ if err != nil {
+ return err
+ }
+ if enum == nil {
+ return fmt.Errorf("can't get IEnumVARIANT, enum is nil")
+ }
+ defer enum.Release()
+
+ // Initialize a slice with Count capacity
+ dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
+
+ var errFieldMismatch error
+ for itemRaw, length, err := enum.Next(1); length > 0; itemRaw, length, err = enum.Next(1) {
+ if err != nil {
+ return err
+ }
+
+ err := func() error {
+ // item is a SWbemObject, but really a Win32_Process
+ item := itemRaw.ToIDispatch()
+ defer item.Release()
+
+ ev := reflect.New(elemType)
+ if err = c.loadEntity(ev.Interface(), item); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ return nil
+ }()
+ if err != nil {
+ return err
+ }
+ }
+ return errFieldMismatch
+}
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+
+// loadEntity loads a SWbemObject into a struct pointer.
+func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
+ v := reflect.ValueOf(dst).Elem()
+ for i := 0; i < v.NumField(); i++ {
+ f := v.Field(i)
+ of := f
+ isPtr := f.Kind() == reflect.Ptr
+ if isPtr {
+ ptr := reflect.New(f.Type().Elem())
+ f.Set(ptr)
+ f = f.Elem()
+ }
+ n := v.Type().Field(i).Name
+ if n[0] < 'A' || n[0] > 'Z' {
+ continue
+ }
+ if !f.CanSet() {
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "CanSet() is false",
+ }
+ }
+ prop, err := oleutil.GetProperty(src, n)
+ if err != nil {
+ if !c.AllowMissingFields {
+ errFieldMismatch = &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "no such struct field",
+ }
+ }
+ continue
+ }
+ defer prop.Clear()
+
+ if prop.VT == 0x1 { //VT_NULL
+ continue
+ }
+
+ switch val := prop.Value().(type) {
+ case int8, int16, int32, int64, int:
+ v := reflect.ValueOf(val).Int()
+ switch f.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ f.SetInt(v)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ f.SetUint(uint64(v))
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not an integer class",
+ }
+ }
+ case uint8, uint16, uint32, uint64:
+ v := reflect.ValueOf(val).Uint()
+ switch f.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ f.SetInt(int64(v))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ f.SetUint(v)
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not an integer class",
+ }
+ }
+ case string:
+ switch f.Kind() {
+ case reflect.String:
+ f.SetString(val)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ iv, err := strconv.ParseInt(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ f.SetInt(iv)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ uv, err := strconv.ParseUint(val, 10, 64)
+ if err != nil {
+ return err
+ }
+ f.SetUint(uv)
+ case reflect.Struct:
+ switch f.Type() {
+ case timeType:
+ if len(val) == 25 {
+ mins, err := strconv.Atoi(val[22:])
+ if err != nil {
+ return err
+ }
+ val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
+ }
+ t, err := time.Parse("20060102150405.000000-0700", val)
+ if err != nil {
+ return err
+ }
+ f.Set(reflect.ValueOf(t))
+ }
+ }
+ case bool:
+ switch f.Kind() {
+ case reflect.Bool:
+ f.SetBool(val)
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not a bool",
+ }
+ }
+ case float32:
+ switch f.Kind() {
+ case reflect.Float32:
+ f.SetFloat(float64(val))
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: "not a Float32",
+ }
+ }
+ default:
+ if f.Kind() == reflect.Slice {
+ switch f.Type().Elem().Kind() {
+ case reflect.String:
+ safeArray := prop.ToArray()
+ if safeArray != nil {
+ arr := safeArray.ToValueArray()
+ fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
+ for i, v := range arr {
+ s := fArr.Index(i)
+ s.SetString(v.(string))
+ }
+ f.Set(fArr)
+ }
+ case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
+ safeArray := prop.ToArray()
+ if safeArray != nil {
+ arr := safeArray.ToValueArray()
+ fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
+ for i, v := range arr {
+ s := fArr.Index(i)
+ s.SetUint(reflect.ValueOf(v).Uint())
+ }
+ f.Set(fArr)
+ }
+ case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
+ safeArray := prop.ToArray()
+ if safeArray != nil {
+ arr := safeArray.ToValueArray()
+ fArr := reflect.MakeSlice(f.Type(), len(arr), len(arr))
+ for i, v := range arr {
+ s := fArr.Index(i)
+ s.SetInt(reflect.ValueOf(v).Int())
+ }
+ f.Set(fArr)
+ }
+ default:
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: fmt.Sprintf("unsupported slice type (%T)", val),
+ }
+ }
+ } else {
+ typeof := reflect.TypeOf(val)
+ if typeof == nil && (isPtr || c.NonePtrZero) {
+ if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
+ of.Set(reflect.Zero(of.Type()))
+ }
+ break
+ }
+ return &ErrFieldMismatch{
+ StructType: of.Type(),
+ FieldName: n,
+ Reason: fmt.Sprintf("unsupported type (%T)", val),
+ }
+ }
+ }
+ }
+ return errFieldMismatch
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+)
+
+// checkMultiArg checks that v has type []S, []*S for some struct type S.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
+ v, err := oleutil.GetProperty(item, prop)
+ if err != nil {
+ return 0, err
+ }
+ defer v.Clear()
+
+ i := int64(v.Val)
+ return i, nil
+}
+
+// CreateQuery returns a WQL query string that queries all columns of src. where
+// is an optional string that is appended to the query, to be used with WHERE
+// clauses. In such a case, the "WHERE" string should appear at the beginning.
+// The wmi class is obtained by the name of the type. You can pass a optional
+// class throught the variadic class parameter which is useful for anonymous
+// structs.
+func CreateQuery(src interface{}, where string, class ...string) string {
+ var b bytes.Buffer
+ b.WriteString("SELECT ")
+ s := reflect.Indirect(reflect.ValueOf(src))
+ t := s.Type()
+ if s.Kind() == reflect.Slice {
+ t = t.Elem()
+ }
+ if t.Kind() != reflect.Struct {
+ return ""
+ }
+ var fields []string
+ for i := 0; i < t.NumField(); i++ {
+ fields = append(fields, t.Field(i).Name)
+ }
+ b.WriteString(strings.Join(fields, ", "))
+ b.WriteString(" FROM ")
+ if len(class) > 0 {
+ b.WriteString(class[0])
+ } else {
+ b.WriteString(t.Name())
+ }
+ b.WriteString(" " + where)
+ return b.String()
+}
diff --git a/vendor/github.com/ghodss/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore
new file mode 100644
index 00000000..e256a31e
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.gitignore
@@ -0,0 +1,20 @@
+# OSX leaves these everywhere on SMB shares
+._*
+
+# Eclipse files
+.classpath
+.project
+.settings/**
+
+# Emacs save files
+*~
+
+# Vim-related files
+[._]*.s[a-w][a-z]
+[._]s[a-w][a-z]
+*.un~
+Session.vim
+.netrwhist
+
+# Go test binaries
+*.test
diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml
new file mode 100644
index 00000000..0e9d6edc
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/.travis.yml
@@ -0,0 +1,7 @@
+language: go
+go:
+ - 1.3
+ - 1.4
+script:
+ - go test
+ - go build
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 00000000..7805d36d
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 00000000..0200f75b
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,121 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"age"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ age: 30
+ name: John
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err = yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 00000000..58600740
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,501 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ if v.CanSet() {
+ v.Set(reflect.New(v.Type().Elem()))
+ } else {
+ v = reflect.New(v.Type().Elem())
+ }
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 00000000..4fb4054a
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ "gopkg.in/yaml.v2"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: %v", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshalling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/go-ole/go-ole/.travis.yml b/vendor/github.com/go-ole/go-ole/.travis.yml
new file mode 100644
index 00000000..28f740cd
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/.travis.yml
@@ -0,0 +1,8 @@
+language: go
+sudo: false
+
+go:
+ - 1.9.x
+ - 1.10.x
+ - 1.11.x
+ - tip
diff --git a/vendor/github.com/go-ole/go-ole/ChangeLog.md b/vendor/github.com/go-ole/go-ole/ChangeLog.md
new file mode 100644
index 00000000..4ba6a8c6
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ChangeLog.md
@@ -0,0 +1,49 @@
+# Version 1.x.x
+
+* **Add more test cases and reference new test COM server project.** (Placeholder for future additions)
+
+# Version 1.2.0-alphaX
+
+**Minimum supported version is now Go 1.4. Go 1.1 support is deprecated, but should still build.**
+
+ * Added CI configuration for Travis-CI and AppVeyor.
+ * Added test InterfaceID and ClassID for the COM Test Server project.
+ * Added more inline documentation (#83).
+ * Added IEnumVARIANT implementation (#88).
+ * Added IEnumVARIANT test cases (#99, #100, #101).
+ * Added support for retrieving `time.Time` from VARIANT (#92).
+ * Added test case for IUnknown (#64).
+ * Added test case for IDispatch (#64).
+ * Added test cases for scalar variants (#64, #76).
+
+# Version 1.1.1
+
+ * Fixes for Linux build.
+ * Fixes for Windows build.
+
+# Version 1.1.0
+
+The change to provide building on all platforms is a new feature. The increase in minor version reflects that and allows those who wish to stay on 1.0.x to continue to do so. Support for 1.0.x will be limited to bug fixes.
+
+ * Move GUID out of variables.go into its own file to make new documentation available.
+ * Move OleError out of ole.go into its own file to make new documentation available.
+ * Add documentation to utility functions.
+ * Add documentation to variant receiver functions.
+ * Add documentation to ole structures.
+ * Make variant available to other systems outside of Windows.
+ * Make OLE structures available to other systems outside of Windows.
+
+## New Features
+
+ * Library should now be built on all platforms supported by Go. Library will NOOP on any platform that is not Windows.
+ * More functions are now documented and available on godoc.org.
+
+# Version 1.0.1
+
+ 1. Fix package references from repository location change.
+
+# Version 1.0.0
+
+This version is stable enough for use. The COM API is still incomplete, but provides enough functionality for accessing COM servers using IDispatch interface.
+
+There is no changelog for this version. Check commits for history.
diff --git a/vendor/github.com/go-ole/go-ole/LICENSE b/vendor/github.com/go-ole/go-ole/LICENSE
new file mode 100644
index 00000000..623ec06f
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright © 2013-2017 Yasuhiro Matsumoto,
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the “Software”), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/go-ole/go-ole/README.md b/vendor/github.com/go-ole/go-ole/README.md
new file mode 100644
index 00000000..7b577558
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/README.md
@@ -0,0 +1,46 @@
+# Go OLE
+
+[![Build status](https://ci.appveyor.com/api/projects/status/qr0u2sf7q43us9fj?svg=true)](https://ci.appveyor.com/project/jacobsantos/go-ole-jgs28)
+[![Build Status](https://travis-ci.org/go-ole/go-ole.svg?branch=master)](https://travis-ci.org/go-ole/go-ole)
+[![GoDoc](https://godoc.org/github.com/go-ole/go-ole?status.svg)](https://godoc.org/github.com/go-ole/go-ole)
+
+Go bindings for Windows COM using shared libraries instead of cgo.
+
+By Yasuhiro Matsumoto.
+
+## Install
+
+To experiment with go-ole, you can just compile and run the example program:
+
+```
+go get github.com/go-ole/go-ole
+cd /path/to/go-ole/
+go test
+
+cd /path/to/go-ole/example/excel
+go run excel.go
+```
+
+## Continuous Integration
+
+Continuous integration configuration has been added for both Travis-CI and AppVeyor. You will have to add these to your own account for your fork in order for it to run.
+
+**Travis-CI**
+
+Travis-CI was added to check builds on Linux to ensure that `go get` works when cross building. Currently, Travis-CI is not used to test cross-building, but this may be changed in the future. It is also not currently possible to test the library on Linux, since COM API is specific to Windows and it is not currently possible to run a COM server on Linux or even connect to a remote COM server.
+
+**AppVeyor**
+
+AppVeyor is used to build on Windows using the (in-development) test COM server. It is currently only used to test the build and ensure that the code works on Windows. It will be used to register a COM server and then run the test cases based on the test COM server.
+
+The tests currently do run and do pass and this should be maintained with commits.
+
+## Versioning
+
+Go OLE uses [semantic versioning](http://semver.org) for version numbers, which is similar to the version contract of the Go language. Which means that the major version will always maintain backwards compatibility with minor versions. Minor versions will only add new additions and changes. Fixes will always be in patch.
+
+This contract should allow you to upgrade to new minor and patch versions without breakage or modifications to your existing code. Leave a ticket, if there is breakage, so that it could be fixed.
+
+## LICENSE
+
+Under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/go-ole/go-ole/appveyor.yml b/vendor/github.com/go-ole/go-ole/appveyor.yml
new file mode 100644
index 00000000..0d557ac2
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/appveyor.yml
@@ -0,0 +1,54 @@
+# Notes:
+# - Minimal appveyor.yml file is an empty file. All sections are optional.
+# - Indent each level of configuration with 2 spaces. Do not use tabs!
+# - All section names are case-sensitive.
+# - Section names should be unique on each level.
+
+version: "1.3.0.{build}-alpha-{branch}"
+
+os: Windows Server 2012 R2
+
+branches:
+ only:
+ - master
+ - v1.2
+ - v1.1
+ - v1.0
+
+skip_tags: true
+
+clone_folder: c:\gopath\src\github.com\go-ole\go-ole
+
+environment:
+ GOPATH: c:\gopath
+ matrix:
+ - GOARCH: amd64
+ GOVERSION: 1.5
+ GOROOT: c:\go
+ DOWNLOADPLATFORM: "x64"
+
+install:
+ - choco install mingw
+ - SET PATH=c:\tools\mingw64\bin;%PATH%
+ # - Download COM Server
+ - ps: Start-FileDownload "https://github.com/go-ole/test-com-server/releases/download/v1.0.2/test-com-server-${env:DOWNLOADPLATFORM}.zip"
+ - 7z e test-com-server-%DOWNLOADPLATFORM%.zip -oc:\gopath\src\github.com\go-ole\go-ole > NUL
+ - c:\gopath\src\github.com\go-ole\go-ole\build\register-assembly.bat
+ # - set
+ - go version
+ - go env
+ - go get -u golang.org/x/tools/cmd/cover
+ - go get -u golang.org/x/tools/cmd/godoc
+ - go get -u golang.org/x/tools/cmd/stringer
+
+build_script:
+ - cd c:\gopath\src\github.com\go-ole\go-ole
+ - go get -v -t ./...
+ - go build
+ - go test -v -cover ./...
+
+# disable automatic tests
+test: off
+
+# disable deployment
+deploy: off
diff --git a/vendor/github.com/go-ole/go-ole/com.go b/vendor/github.com/go-ole/go-ole/com.go
new file mode 100644
index 00000000..a9bef150
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/com.go
@@ -0,0 +1,344 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unicode/utf16"
+ "unsafe"
+)
+
+var (
+ procCoInitialize = modole32.NewProc("CoInitialize")
+ procCoInitializeEx = modole32.NewProc("CoInitializeEx")
+ procCoUninitialize = modole32.NewProc("CoUninitialize")
+ procCoCreateInstance = modole32.NewProc("CoCreateInstance")
+ procCoTaskMemFree = modole32.NewProc("CoTaskMemFree")
+ procCLSIDFromProgID = modole32.NewProc("CLSIDFromProgID")
+ procCLSIDFromString = modole32.NewProc("CLSIDFromString")
+ procStringFromCLSID = modole32.NewProc("StringFromCLSID")
+ procStringFromIID = modole32.NewProc("StringFromIID")
+ procIIDFromString = modole32.NewProc("IIDFromString")
+ procCoGetObject = modole32.NewProc("CoGetObject")
+ procGetUserDefaultLCID = modkernel32.NewProc("GetUserDefaultLCID")
+ procCopyMemory = modkernel32.NewProc("RtlMoveMemory")
+ procVariantInit = modoleaut32.NewProc("VariantInit")
+ procVariantClear = modoleaut32.NewProc("VariantClear")
+ procVariantTimeToSystemTime = modoleaut32.NewProc("VariantTimeToSystemTime")
+ procSysAllocString = modoleaut32.NewProc("SysAllocString")
+ procSysAllocStringLen = modoleaut32.NewProc("SysAllocStringLen")
+ procSysFreeString = modoleaut32.NewProc("SysFreeString")
+ procSysStringLen = modoleaut32.NewProc("SysStringLen")
+ procCreateDispTypeInfo = modoleaut32.NewProc("CreateDispTypeInfo")
+ procCreateStdDispatch = modoleaut32.NewProc("CreateStdDispatch")
+ procGetActiveObject = modoleaut32.NewProc("GetActiveObject")
+
+ procGetMessageW = moduser32.NewProc("GetMessageW")
+ procDispatchMessageW = moduser32.NewProc("DispatchMessageW")
+)
+
+// coInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func coInitialize() (err error) {
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms678543(v=vs.85).aspx
+ // Suggests that no value should be passed to CoInitialized.
+ // Could just be Call() since the parameter is optional. <-- Needs testing to be sure.
+ hr, _, _ := procCoInitialize.Call(uintptr(0))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// coInitializeEx initializes COM library with concurrency model.
+func coInitializeEx(coinit uint32) (err error) {
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/ms695279(v=vs.85).aspx
+ // Suggests that the first parameter is not only optional but should always be NULL.
+ hr, _, _ := procCoInitializeEx.Call(uintptr(0), uintptr(coinit))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// CoInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func CoInitialize(p uintptr) (err error) {
+ // p is ignored and won't be used.
+ // Avoid any variable not used errors.
+ p = uintptr(0)
+ return coInitialize()
+}
+
+// CoInitializeEx initializes COM library with concurrency model.
+func CoInitializeEx(p uintptr, coinit uint32) (err error) {
+ // Avoid any variable not used errors.
+ p = uintptr(0)
+ return coInitializeEx(coinit)
+}
+
+// CoUninitialize uninitializes COM Library.
+func CoUninitialize() {
+ procCoUninitialize.Call()
+}
+
+// CoTaskMemFree frees memory pointer.
+func CoTaskMemFree(memptr uintptr) {
+ procCoTaskMemFree.Call(memptr)
+}
+
+// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
+//
+// The Programmatic Identifier must be registered, because it will be looked up
+// in the Windows Registry. The registry entry has the following keys: CLSID,
+// Insertable, Protocol and Shell
+// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
+//
+// programID identifies the class id with less precision and is not guaranteed
+// to be unique. These are usually found in the registry under
+// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
+// "Program.Component.Version" with version being optional.
+//
+// CLSIDFromProgID in Windows API.
+func CLSIDFromProgID(progId string) (clsid *GUID, err error) {
+ var guid GUID
+ lpszProgID := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
+ hr, _, _ := procCLSIDFromProgID.Call(lpszProgID, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// CLSIDFromString retrieves Class ID from string representation.
+//
+// This is technically the string version of the GUID and will convert the
+// string to object.
+//
+// CLSIDFromString in Windows API.
+func CLSIDFromString(str string) (clsid *GUID, err error) {
+ var guid GUID
+ lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(str)))
+ hr, _, _ := procCLSIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// StringFromCLSID returns GUID formated string from GUID object.
+func StringFromCLSID(clsid *GUID) (str string, err error) {
+ var p *uint16
+ hr, _, _ := procStringFromCLSID.Call(uintptr(unsafe.Pointer(clsid)), uintptr(unsafe.Pointer(&p)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ str = LpOleStrToString(p)
+ return
+}
+
+// IIDFromString returns GUID from program ID.
+func IIDFromString(progId string) (clsid *GUID, err error) {
+ var guid GUID
+ lpsz := uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(progId)))
+ hr, _, _ := procIIDFromString.Call(lpsz, uintptr(unsafe.Pointer(&guid)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ clsid = &guid
+ return
+}
+
+// StringFromIID returns GUID formatted string from GUID object.
+func StringFromIID(iid *GUID) (str string, err error) {
+ var p *uint16
+ hr, _, _ := procStringFromIID.Call(uintptr(unsafe.Pointer(iid)), uintptr(unsafe.Pointer(&p)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ str = LpOleStrToString(p)
+ return
+}
+
+// CreateInstance of single uninitialized object with GUID.
+func CreateInstance(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
+ if iid == nil {
+ iid = IID_IUnknown
+ }
+ hr, _, _ := procCoCreateInstance.Call(
+ uintptr(unsafe.Pointer(clsid)),
+ 0,
+ CLSCTX_SERVER,
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&unk)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// GetActiveObject retrieves pointer to active object.
+func GetActiveObject(clsid *GUID, iid *GUID) (unk *IUnknown, err error) {
+ if iid == nil {
+ iid = IID_IUnknown
+ }
+ hr, _, _ := procGetActiveObject.Call(
+ uintptr(unsafe.Pointer(clsid)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&unk)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+type BindOpts struct {
+ CbStruct uint32
+ GrfFlags uint32
+ GrfMode uint32
+ TickCountDeadline uint32
+}
+
+// GetObject retrieves pointer to active object.
+func GetObject(programID string, bindOpts *BindOpts, iid *GUID) (unk *IUnknown, err error) {
+ if bindOpts != nil {
+ bindOpts.CbStruct = uint32(unsafe.Sizeof(BindOpts{}))
+ }
+ if iid == nil {
+ iid = IID_IUnknown
+ }
+ hr, _, _ := procCoGetObject.Call(
+ uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(programID))),
+ uintptr(unsafe.Pointer(bindOpts)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&unk)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// VariantInit initializes variant.
+func VariantInit(v *VARIANT) (err error) {
+ hr, _, _ := procVariantInit.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// VariantClear clears value in Variant settings to VT_EMPTY.
+func VariantClear(v *VARIANT) (err error) {
+ hr, _, _ := procVariantClear.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// SysAllocString allocates memory for string and copies string into memory.
+func SysAllocString(v string) (ss *int16) {
+ pss, _, _ := procSysAllocString.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(v))))
+ ss = (*int16)(unsafe.Pointer(pss))
+ return
+}
+
+// SysAllocStringLen copies up to length of given string returning pointer.
+func SysAllocStringLen(v string) (ss *int16) {
+ utf16 := utf16.Encode([]rune(v + "\x00"))
+ ptr := &utf16[0]
+
+ pss, _, _ := procSysAllocStringLen.Call(uintptr(unsafe.Pointer(ptr)), uintptr(len(utf16)-1))
+ ss = (*int16)(unsafe.Pointer(pss))
+ return
+}
+
+// SysFreeString frees string system memory. This must be called with SysAllocString.
+func SysFreeString(v *int16) (err error) {
+ hr, _, _ := procSysFreeString.Call(uintptr(unsafe.Pointer(v)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// SysStringLen is the length of the system allocated string.
+func SysStringLen(v *int16) uint32 {
+ l, _, _ := procSysStringLen.Call(uintptr(unsafe.Pointer(v)))
+ return uint32(l)
+}
+
+// CreateStdDispatch provides default IDispatch implementation for IUnknown.
+//
+// This handles default IDispatch implementation for objects. It haves a few
+// limitations with only supporting one language. It will also only return
+// default exception codes.
+func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (disp *IDispatch, err error) {
+ hr, _, _ := procCreateStdDispatch.Call(
+ uintptr(unsafe.Pointer(unk)),
+ v,
+ uintptr(unsafe.Pointer(ptinfo)),
+ uintptr(unsafe.Pointer(&disp)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
+//
+// This will not handle the full implementation of the interface.
+func CreateDispTypeInfo(idata *INTERFACEDATA) (pptinfo *IUnknown, err error) {
+ hr, _, _ := procCreateDispTypeInfo.Call(
+ uintptr(unsafe.Pointer(idata)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&pptinfo)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// copyMemory moves location of a block of memory.
+func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {
+ procCopyMemory.Call(uintptr(dest), uintptr(src), uintptr(length))
+}
+
+// GetUserDefaultLCID retrieves current user default locale.
+func GetUserDefaultLCID() (lcid uint32) {
+ ret, _, _ := procGetUserDefaultLCID.Call()
+ lcid = uint32(ret)
+ return
+}
+
+// GetMessage in message queue from runtime.
+//
+// This function appears to block. PeekMessage does not block.
+func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (ret int32, err error) {
+ r0, _, err := procGetMessageW.Call(uintptr(unsafe.Pointer(msg)), uintptr(hwnd), uintptr(MsgFilterMin), uintptr(MsgFilterMax))
+ ret = int32(r0)
+ return
+}
+
+// DispatchMessage to window procedure.
+func DispatchMessage(msg *Msg) (ret int32) {
+ r0, _, _ := procDispatchMessageW.Call(uintptr(unsafe.Pointer(msg)))
+ ret = int32(r0)
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/com_func.go b/vendor/github.com/go-ole/go-ole/com_func.go
new file mode 100644
index 00000000..cef539d9
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/com_func.go
@@ -0,0 +1,174 @@
+// +build !windows
+
+package ole
+
+import (
+ "time"
+ "unsafe"
+)
+
+// coInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func coInitialize() error {
+ return NewError(E_NOTIMPL)
+}
+
+// coInitializeEx initializes COM library with concurrency model.
+func coInitializeEx(coinit uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoInitialize initializes COM library on current thread.
+//
+// MSDN documentation suggests that this function should not be called. Call
+// CoInitializeEx() instead. The reason has to do with threading and this
+// function is only for single-threaded apartments.
+//
+// That said, most users of the library have gotten away with just this
+// function. If you are experiencing threading issues, then use
+// CoInitializeEx().
+func CoInitialize(p uintptr) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoInitializeEx initializes COM library with concurrency model.
+func CoInitializeEx(p uintptr, coinit uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+// CoUninitialize uninitializes COM Library.
+func CoUninitialize() {}
+
+// CoTaskMemFree frees memory pointer.
+func CoTaskMemFree(memptr uintptr) {}
+
+// CLSIDFromProgID retrieves Class Identifier with the given Program Identifier.
+//
+// The Programmatic Identifier must be registered, because it will be looked up
+// in the Windows Registry. The registry entry has the following keys: CLSID,
+// Insertable, Protocol and Shell
+// (https://msdn.microsoft.com/en-us/library/dd542719(v=vs.85).aspx).
+//
+// programID identifies the class id with less precision and is not guaranteed
+// to be unique. These are usually found in the registry under
+// HKEY_LOCAL_MACHINE\SOFTWARE\Classes, usually with the format of
+// "Program.Component.Version" with version being optional.
+//
+// CLSIDFromProgID in Windows API.
+func CLSIDFromProgID(progId string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// CLSIDFromString retrieves Class ID from string representation.
+//
+// This is technically the string version of the GUID and will convert the
+// string to object.
+//
+// CLSIDFromString in Windows API.
+func CLSIDFromString(str string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// StringFromCLSID returns GUID formated string from GUID object.
+func StringFromCLSID(clsid *GUID) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// IIDFromString returns GUID from program ID.
+func IIDFromString(progId string) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// StringFromIID returns GUID formatted string from GUID object.
+func StringFromIID(iid *GUID) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// CreateInstance of single uninitialized object with GUID.
+func CreateInstance(clsid *GUID, iid *GUID) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// GetActiveObject retrieves pointer to active object.
+func GetActiveObject(clsid *GUID, iid *GUID) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// VariantInit initializes variant.
+func VariantInit(v *VARIANT) error {
+ return NewError(E_NOTIMPL)
+}
+
+// VariantClear clears value in Variant settings to VT_EMPTY.
+func VariantClear(v *VARIANT) error {
+ return NewError(E_NOTIMPL)
+}
+
+// SysAllocString allocates memory for string and copies string into memory.
+func SysAllocString(v string) *int16 {
+ u := int16(0)
+ return &u
+}
+
+// SysAllocStringLen copies up to length of given string returning pointer.
+func SysAllocStringLen(v string) *int16 {
+ u := int16(0)
+ return &u
+}
+
+// SysFreeString frees string system memory. This must be called with SysAllocString.
+func SysFreeString(v *int16) error {
+ return NewError(E_NOTIMPL)
+}
+
+// SysStringLen is the length of the system allocated string.
+func SysStringLen(v *int16) uint32 {
+ return uint32(0)
+}
+
+// CreateStdDispatch provides default IDispatch implementation for IUnknown.
+//
+// This handles default IDispatch implementation for objects. It haves a few
+// limitations with only supporting one language. It will also only return
+// default exception codes.
+func CreateStdDispatch(unk *IUnknown, v uintptr, ptinfo *IUnknown) (*IDispatch, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// CreateDispTypeInfo provides default ITypeInfo implementation for IDispatch.
+//
+// This will not handle the full implementation of the interface.
+func CreateDispTypeInfo(idata *INTERFACEDATA) (*IUnknown, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// copyMemory moves location of a block of memory.
+func copyMemory(dest unsafe.Pointer, src unsafe.Pointer, length uint32) {}
+
+// GetUserDefaultLCID retrieves current user default locale.
+func GetUserDefaultLCID() uint32 {
+ return uint32(0)
+}
+
+// GetMessage in message queue from runtime.
+//
+// This function appears to block. PeekMessage does not block.
+func GetMessage(msg *Msg, hwnd uint32, MsgFilterMin uint32, MsgFilterMax uint32) (int32, error) {
+ return int32(0), NewError(E_NOTIMPL)
+}
+
+// DispatchMessage to window procedure.
+func DispatchMessage(msg *Msg) int32 {
+ return int32(0)
+}
+
+func GetVariantDate(value uint64) (time.Time, error) {
+ return time.Now(), NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/connect.go b/vendor/github.com/go-ole/go-ole/connect.go
new file mode 100644
index 00000000..b2ac2ec6
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/connect.go
@@ -0,0 +1,192 @@
+package ole
+
+// Connection contains IUnknown for fluent interface interaction.
+//
+// Deprecated. Use oleutil package instead.
+type Connection struct {
+ Object *IUnknown // Access COM
+}
+
+// Initialize COM.
+func (*Connection) Initialize() (err error) {
+ return coInitialize()
+}
+
+// Uninitialize COM.
+func (*Connection) Uninitialize() {
+ CoUninitialize()
+}
+
+// Create IUnknown object based first on ProgId and then from String.
+func (c *Connection) Create(progId string) (err error) {
+ var clsid *GUID
+ clsid, err = CLSIDFromProgID(progId)
+ if err != nil {
+ clsid, err = CLSIDFromString(progId)
+ if err != nil {
+ return
+ }
+ }
+
+ unknown, err := CreateInstance(clsid, IID_IUnknown)
+ if err != nil {
+ return
+ }
+ c.Object = unknown
+
+ return
+}
+
+// Release IUnknown object.
+func (c *Connection) Release() {
+ c.Object.Release()
+}
+
+// Load COM object from list of programIDs or strings.
+func (c *Connection) Load(names ...string) (errors []error) {
+ var tempErrors []error = make([]error, len(names))
+ var numErrors int = 0
+ for _, name := range names {
+ err := c.Create(name)
+ if err != nil {
+ tempErrors = append(tempErrors, err)
+ numErrors += 1
+ continue
+ }
+ break
+ }
+
+ copy(errors, tempErrors[0:numErrors])
+ return
+}
+
+// Dispatch returns Dispatch object.
+func (c *Connection) Dispatch() (object *Dispatch, err error) {
+ dispatch, err := c.Object.QueryInterface(IID_IDispatch)
+ if err != nil {
+ return
+ }
+ object = &Dispatch{dispatch}
+ return
+}
+
+// Dispatch stores IDispatch object.
+type Dispatch struct {
+ Object *IDispatch // Dispatch object.
+}
+
+// Call method on IDispatch with parameters.
+func (d *Dispatch) Call(method string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(method)
+ if err != nil {
+ return
+ }
+
+ result, err = d.Invoke(id, DISPATCH_METHOD, params)
+ return
+}
+
+// MustCall method on IDispatch with parameters.
+func (d *Dispatch) MustCall(method string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(method)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_METHOD, params)
+ if err != nil {
+ panic(err)
+ }
+
+ return
+}
+
+// Get property on IDispatch with parameters.
+func (d *Dispatch) Get(name string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(name)
+ if err != nil {
+ return
+ }
+ result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
+ return
+}
+
+// MustGet property on IDispatch with parameters.
+func (d *Dispatch) MustGet(name string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(name)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_PROPERTYGET, params)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+// Set property on IDispatch with parameters.
+func (d *Dispatch) Set(name string, params ...interface{}) (result *VARIANT, err error) {
+ id, err := d.GetId(name)
+ if err != nil {
+ return
+ }
+ result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
+ return
+}
+
+// MustSet property on IDispatch with parameters.
+func (d *Dispatch) MustSet(name string, params ...interface{}) (result *VARIANT) {
+ id, err := d.GetId(name)
+ if err != nil {
+ panic(err)
+ }
+
+ result, err = d.Invoke(id, DISPATCH_PROPERTYPUT, params)
+ if err != nil {
+ panic(err)
+ }
+ return
+}
+
+// GetId retrieves ID of name on IDispatch.
+func (d *Dispatch) GetId(name string) (id int32, err error) {
+ var dispid []int32
+ dispid, err = d.Object.GetIDsOfName([]string{name})
+ if err != nil {
+ return
+ }
+ id = dispid[0]
+ return
+}
+
+// GetIds retrieves all IDs of names on IDispatch.
+func (d *Dispatch) GetIds(names ...string) (dispid []int32, err error) {
+ dispid, err = d.Object.GetIDsOfName(names)
+ return
+}
+
+// Invoke IDispatch on DisplayID of dispatch type with parameters.
+//
+// There have been problems where if send cascading params..., it would error
+// out because the parameters would be empty.
+func (d *Dispatch) Invoke(id int32, dispatch int16, params []interface{}) (result *VARIANT, err error) {
+ if len(params) < 1 {
+ result, err = d.Object.Invoke(id, dispatch)
+ } else {
+ result, err = d.Object.Invoke(id, dispatch, params...)
+ }
+ return
+}
+
+// Release IDispatch object.
+func (d *Dispatch) Release() {
+ d.Object.Release()
+}
+
+// Connect initializes COM and attempts to load IUnknown based on given names.
+func Connect(names ...string) (connection *Connection) {
+ connection.Initialize()
+ connection.Load(names...)
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/constants.go b/vendor/github.com/go-ole/go-ole/constants.go
new file mode 100644
index 00000000..fd0c6d74
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/constants.go
@@ -0,0 +1,153 @@
+package ole
+
+const (
+ CLSCTX_INPROC_SERVER = 1
+ CLSCTX_INPROC_HANDLER = 2
+ CLSCTX_LOCAL_SERVER = 4
+ CLSCTX_INPROC_SERVER16 = 8
+ CLSCTX_REMOTE_SERVER = 16
+ CLSCTX_ALL = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER | CLSCTX_LOCAL_SERVER
+ CLSCTX_INPROC = CLSCTX_INPROC_SERVER | CLSCTX_INPROC_HANDLER
+ CLSCTX_SERVER = CLSCTX_INPROC_SERVER | CLSCTX_LOCAL_SERVER | CLSCTX_REMOTE_SERVER
+)
+
+const (
+ COINIT_APARTMENTTHREADED = 0x2
+ COINIT_MULTITHREADED = 0x0
+ COINIT_DISABLE_OLE1DDE = 0x4
+ COINIT_SPEED_OVER_MEMORY = 0x8
+)
+
+const (
+ DISPATCH_METHOD = 1
+ DISPATCH_PROPERTYGET = 2
+ DISPATCH_PROPERTYPUT = 4
+ DISPATCH_PROPERTYPUTREF = 8
+)
+
+const (
+ S_OK = 0x00000000
+ E_UNEXPECTED = 0x8000FFFF
+ E_NOTIMPL = 0x80004001
+ E_OUTOFMEMORY = 0x8007000E
+ E_INVALIDARG = 0x80070057
+ E_NOINTERFACE = 0x80004002
+ E_POINTER = 0x80004003
+ E_HANDLE = 0x80070006
+ E_ABORT = 0x80004004
+ E_FAIL = 0x80004005
+ E_ACCESSDENIED = 0x80070005
+ E_PENDING = 0x8000000A
+
+ CO_E_CLASSSTRING = 0x800401F3
+)
+
+const (
+ CC_FASTCALL = iota
+ CC_CDECL
+ CC_MSCPASCAL
+ CC_PASCAL = CC_MSCPASCAL
+ CC_MACPASCAL
+ CC_STDCALL
+ CC_FPFASTCALL
+ CC_SYSCALL
+ CC_MPWCDECL
+ CC_MPWPASCAL
+ CC_MAX = CC_MPWPASCAL
+)
+
+type VT uint16
+
+const (
+ VT_EMPTY VT = 0x0
+ VT_NULL VT = 0x1
+ VT_I2 VT = 0x2
+ VT_I4 VT = 0x3
+ VT_R4 VT = 0x4
+ VT_R8 VT = 0x5
+ VT_CY VT = 0x6
+ VT_DATE VT = 0x7
+ VT_BSTR VT = 0x8
+ VT_DISPATCH VT = 0x9
+ VT_ERROR VT = 0xa
+ VT_BOOL VT = 0xb
+ VT_VARIANT VT = 0xc
+ VT_UNKNOWN VT = 0xd
+ VT_DECIMAL VT = 0xe
+ VT_I1 VT = 0x10
+ VT_UI1 VT = 0x11
+ VT_UI2 VT = 0x12
+ VT_UI4 VT = 0x13
+ VT_I8 VT = 0x14
+ VT_UI8 VT = 0x15
+ VT_INT VT = 0x16
+ VT_UINT VT = 0x17
+ VT_VOID VT = 0x18
+ VT_HRESULT VT = 0x19
+ VT_PTR VT = 0x1a
+ VT_SAFEARRAY VT = 0x1b
+ VT_CARRAY VT = 0x1c
+ VT_USERDEFINED VT = 0x1d
+ VT_LPSTR VT = 0x1e
+ VT_LPWSTR VT = 0x1f
+ VT_RECORD VT = 0x24
+ VT_INT_PTR VT = 0x25
+ VT_UINT_PTR VT = 0x26
+ VT_FILETIME VT = 0x40
+ VT_BLOB VT = 0x41
+ VT_STREAM VT = 0x42
+ VT_STORAGE VT = 0x43
+ VT_STREAMED_OBJECT VT = 0x44
+ VT_STORED_OBJECT VT = 0x45
+ VT_BLOB_OBJECT VT = 0x46
+ VT_CF VT = 0x47
+ VT_CLSID VT = 0x48
+ VT_BSTR_BLOB VT = 0xfff
+ VT_VECTOR VT = 0x1000
+ VT_ARRAY VT = 0x2000
+ VT_BYREF VT = 0x4000
+ VT_RESERVED VT = 0x8000
+ VT_ILLEGAL VT = 0xffff
+ VT_ILLEGALMASKED VT = 0xfff
+ VT_TYPEMASK VT = 0xfff
+)
+
+const (
+ DISPID_UNKNOWN = -1
+ DISPID_VALUE = 0
+ DISPID_PROPERTYPUT = -3
+ DISPID_NEWENUM = -4
+ DISPID_EVALUATE = -5
+ DISPID_CONSTRUCTOR = -6
+ DISPID_DESTRUCTOR = -7
+ DISPID_COLLECT = -8
+)
+
+const (
+ TKIND_ENUM = 1
+ TKIND_RECORD = 2
+ TKIND_MODULE = 3
+ TKIND_INTERFACE = 4
+ TKIND_DISPATCH = 5
+ TKIND_COCLASS = 6
+ TKIND_ALIAS = 7
+ TKIND_UNION = 8
+ TKIND_MAX = 9
+)
+
+// Safe Array Feature Flags
+
+const (
+ FADF_AUTO = 0x0001
+ FADF_STATIC = 0x0002
+ FADF_EMBEDDED = 0x0004
+ FADF_FIXEDSIZE = 0x0010
+ FADF_RECORD = 0x0020
+ FADF_HAVEIID = 0x0040
+ FADF_HAVEVARTYPE = 0x0080
+ FADF_BSTR = 0x0100
+ FADF_UNKNOWN = 0x0200
+ FADF_DISPATCH = 0x0400
+ FADF_VARIANT = 0x0800
+ FADF_RESERVED = 0xF008
+)
diff --git a/vendor/github.com/go-ole/go-ole/error.go b/vendor/github.com/go-ole/go-ole/error.go
new file mode 100644
index 00000000..096b456d
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error.go
@@ -0,0 +1,51 @@
+package ole
+
+// OleError stores COM errors.
+type OleError struct {
+ hr uintptr
+ description string
+ subError error
+}
+
+// NewError creates new error with HResult.
+func NewError(hr uintptr) *OleError {
+ return &OleError{hr: hr}
+}
+
+// NewErrorWithDescription creates new COM error with HResult and description.
+func NewErrorWithDescription(hr uintptr, description string) *OleError {
+ return &OleError{hr: hr, description: description}
+}
+
+// NewErrorWithSubError creates new COM error with parent error.
+func NewErrorWithSubError(hr uintptr, description string, err error) *OleError {
+ return &OleError{hr: hr, description: description, subError: err}
+}
+
+// Code is the HResult.
+func (v *OleError) Code() uintptr {
+ return uintptr(v.hr)
+}
+
+// String description, either manually set or format message with error code.
+func (v *OleError) String() string {
+ if v.description != "" {
+ return errstr(int(v.hr)) + " (" + v.description + ")"
+ }
+ return errstr(int(v.hr))
+}
+
+// Error implements error interface.
+func (v *OleError) Error() string {
+ return v.String()
+}
+
+// Description retrieves error summary, if there is one.
+func (v *OleError) Description() string {
+ return v.description
+}
+
+// SubError returns parent error, if there is one.
+func (v *OleError) SubError() error {
+ return v.subError
+}
diff --git a/vendor/github.com/go-ole/go-ole/error_func.go b/vendor/github.com/go-ole/go-ole/error_func.go
new file mode 100644
index 00000000..8a2ffaa2
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error_func.go
@@ -0,0 +1,8 @@
+// +build !windows
+
+package ole
+
+// errstr converts error code to string.
+func errstr(errno int) string {
+ return ""
+}
diff --git a/vendor/github.com/go-ole/go-ole/error_windows.go b/vendor/github.com/go-ole/go-ole/error_windows.go
new file mode 100644
index 00000000..d0e8e685
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/error_windows.go
@@ -0,0 +1,24 @@
+// +build windows
+
+package ole
+
+import (
+ "fmt"
+ "syscall"
+ "unicode/utf16"
+)
+
+// errstr converts error code to string.
+func errstr(errno int) string {
+ // ask windows for the remaining errors
+ var flags uint32 = syscall.FORMAT_MESSAGE_FROM_SYSTEM | syscall.FORMAT_MESSAGE_ARGUMENT_ARRAY | syscall.FORMAT_MESSAGE_IGNORE_INSERTS
+ b := make([]uint16, 300)
+ n, err := syscall.FormatMessage(flags, 0, uint32(errno), 0, b, nil)
+ if err != nil {
+ return fmt.Sprintf("error %d (FormatMessage failed with: %v)", errno, err)
+ }
+ // trim terminating \r and \n
+ for ; n > 0 && (b[n-1] == '\n' || b[n-1] == '\r'); n-- {
+ }
+ return string(utf16.Decode(b[:n]))
+}
diff --git a/vendor/github.com/go-ole/go-ole/guid.go b/vendor/github.com/go-ole/go-ole/guid.go
new file mode 100644
index 00000000..8d20f68f
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/guid.go
@@ -0,0 +1,284 @@
+package ole
+
+var (
+ // IID_NULL is null Interface ID, used when no other Interface ID is known.
+ IID_NULL = NewGUID("{00000000-0000-0000-0000-000000000000}")
+
+ // IID_IUnknown is for IUnknown interfaces.
+ IID_IUnknown = NewGUID("{00000000-0000-0000-C000-000000000046}")
+
+ // IID_IDispatch is for IDispatch interfaces.
+ IID_IDispatch = NewGUID("{00020400-0000-0000-C000-000000000046}")
+
+ // IID_IEnumVariant is for IEnumVariant interfaces
+ IID_IEnumVariant = NewGUID("{00020404-0000-0000-C000-000000000046}")
+
+ // IID_IConnectionPointContainer is for IConnectionPointContainer interfaces.
+ IID_IConnectionPointContainer = NewGUID("{B196B284-BAB4-101A-B69C-00AA00341D07}")
+
+ // IID_IConnectionPoint is for IConnectionPoint interfaces.
+ IID_IConnectionPoint = NewGUID("{B196B286-BAB4-101A-B69C-00AA00341D07}")
+
+ // IID_IInspectable is for IInspectable interfaces.
+ IID_IInspectable = NewGUID("{AF86E2E0-B12D-4C6A-9C5A-D7AA65101E90}")
+
+ // IID_IProvideClassInfo is for IProvideClassInfo interfaces.
+ IID_IProvideClassInfo = NewGUID("{B196B283-BAB4-101A-B69C-00AA00341D07}")
+)
+
+// These are for testing and not part of any library.
+var (
+ // IID_ICOMTestString is for ICOMTestString interfaces.
+ //
+ // {E0133EB4-C36F-469A-9D3D-C66B84BE19ED}
+ IID_ICOMTestString = NewGUID("{E0133EB4-C36F-469A-9D3D-C66B84BE19ED}")
+
+ // IID_ICOMTestInt8 is for ICOMTestInt8 interfaces.
+ //
+ // {BEB06610-EB84-4155-AF58-E2BFF53680B4}
+ IID_ICOMTestInt8 = NewGUID("{BEB06610-EB84-4155-AF58-E2BFF53680B4}")
+
+ // IID_ICOMTestInt16 is for ICOMTestInt16 interfaces.
+ //
+ // {DAA3F9FA-761E-4976-A860-8364CE55F6FC}
+ IID_ICOMTestInt16 = NewGUID("{DAA3F9FA-761E-4976-A860-8364CE55F6FC}")
+
+ // IID_ICOMTestInt32 is for ICOMTestInt32 interfaces.
+ //
+ // {E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}
+ IID_ICOMTestInt32 = NewGUID("{E3DEDEE7-38A2-4540-91D1-2EEF1D8891B0}")
+
+ // IID_ICOMTestInt64 is for ICOMTestInt64 interfaces.
+ //
+ // {8D437CBC-B3ED-485C-BC32-C336432A1623}
+ IID_ICOMTestInt64 = NewGUID("{8D437CBC-B3ED-485C-BC32-C336432A1623}")
+
+ // IID_ICOMTestFloat is for ICOMTestFloat interfaces.
+ //
+ // {BF1ED004-EA02-456A-AA55-2AC8AC6B054C}
+ IID_ICOMTestFloat = NewGUID("{BF1ED004-EA02-456A-AA55-2AC8AC6B054C}")
+
+ // IID_ICOMTestDouble is for ICOMTestDouble interfaces.
+ //
+ // {BF908A81-8687-4E93-999F-D86FAB284BA0}
+ IID_ICOMTestDouble = NewGUID("{BF908A81-8687-4E93-999F-D86FAB284BA0}")
+
+ // IID_ICOMTestBoolean is for ICOMTestBoolean interfaces.
+ //
+ // {D530E7A6-4EE8-40D1-8931-3D63B8605010}
+ IID_ICOMTestBoolean = NewGUID("{D530E7A6-4EE8-40D1-8931-3D63B8605010}")
+
+ // IID_ICOMEchoTestObject is for ICOMEchoTestObject interfaces.
+ //
+ // {6485B1EF-D780-4834-A4FE-1EBB51746CA3}
+ IID_ICOMEchoTestObject = NewGUID("{6485B1EF-D780-4834-A4FE-1EBB51746CA3}")
+
+ // IID_ICOMTestTypes is for ICOMTestTypes interfaces.
+ //
+ // {CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}
+ IID_ICOMTestTypes = NewGUID("{CCA8D7AE-91C0-4277-A8B3-FF4EDF28D3C0}")
+
+ // CLSID_COMEchoTestObject is for COMEchoTestObject class.
+ //
+ // {3C24506A-AE9E-4D50-9157-EF317281F1B0}
+ CLSID_COMEchoTestObject = NewGUID("{3C24506A-AE9E-4D50-9157-EF317281F1B0}")
+
+ // CLSID_COMTestScalarClass is for COMTestScalarClass class.
+ //
+ // {865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}
+ CLSID_COMTestScalarClass = NewGUID("{865B85C5-0334-4AC6-9EF6-AACEC8FC5E86}")
+)
+
+const hextable = "0123456789ABCDEF"
+const emptyGUID = "{00000000-0000-0000-0000-000000000000}"
+
+// GUID is Windows API specific GUID type.
+//
+// This exists to match Windows GUID type for direct passing for COM.
+// Format is in xxxxxxxx-xxxx-xxxx-xxxxxxxxxxxxxxxx.
+type GUID struct {
+ Data1 uint32
+ Data2 uint16
+ Data3 uint16
+ Data4 [8]byte
+}
+
+// NewGUID converts the given string into a globally unique identifier that is
+// compliant with the Windows API.
+//
+// The supplied string may be in any of these formats:
+//
+// XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
+// XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX
+// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
+//
+// The conversion of the supplied string is not case-sensitive.
+func NewGUID(guid string) *GUID {
+ d := []byte(guid)
+ var d1, d2, d3, d4a, d4b []byte
+
+ switch len(d) {
+ case 38:
+ if d[0] != '{' || d[37] != '}' {
+ return nil
+ }
+ d = d[1:37]
+ fallthrough
+ case 36:
+ if d[8] != '-' || d[13] != '-' || d[18] != '-' || d[23] != '-' {
+ return nil
+ }
+ d1 = d[0:8]
+ d2 = d[9:13]
+ d3 = d[14:18]
+ d4a = d[19:23]
+ d4b = d[24:36]
+ case 32:
+ d1 = d[0:8]
+ d2 = d[8:12]
+ d3 = d[12:16]
+ d4a = d[16:20]
+ d4b = d[20:32]
+ default:
+ return nil
+ }
+
+ var g GUID
+ var ok1, ok2, ok3, ok4 bool
+ g.Data1, ok1 = decodeHexUint32(d1)
+ g.Data2, ok2 = decodeHexUint16(d2)
+ g.Data3, ok3 = decodeHexUint16(d3)
+ g.Data4, ok4 = decodeHexByte64(d4a, d4b)
+ if ok1 && ok2 && ok3 && ok4 {
+ return &g
+ }
+ return nil
+}
+
+func decodeHexUint32(src []byte) (value uint32, ok bool) {
+ var b1, b2, b3, b4 byte
+ var ok1, ok2, ok3, ok4 bool
+ b1, ok1 = decodeHexByte(src[0], src[1])
+ b2, ok2 = decodeHexByte(src[2], src[3])
+ b3, ok3 = decodeHexByte(src[4], src[5])
+ b4, ok4 = decodeHexByte(src[6], src[7])
+ value = (uint32(b1) << 24) | (uint32(b2) << 16) | (uint32(b3) << 8) | uint32(b4)
+ ok = ok1 && ok2 && ok3 && ok4
+ return
+}
+
+func decodeHexUint16(src []byte) (value uint16, ok bool) {
+ var b1, b2 byte
+ var ok1, ok2 bool
+ b1, ok1 = decodeHexByte(src[0], src[1])
+ b2, ok2 = decodeHexByte(src[2], src[3])
+ value = (uint16(b1) << 8) | uint16(b2)
+ ok = ok1 && ok2
+ return
+}
+
+func decodeHexByte64(s1 []byte, s2 []byte) (value [8]byte, ok bool) {
+ var ok1, ok2, ok3, ok4, ok5, ok6, ok7, ok8 bool
+ value[0], ok1 = decodeHexByte(s1[0], s1[1])
+ value[1], ok2 = decodeHexByte(s1[2], s1[3])
+ value[2], ok3 = decodeHexByte(s2[0], s2[1])
+ value[3], ok4 = decodeHexByte(s2[2], s2[3])
+ value[4], ok5 = decodeHexByte(s2[4], s2[5])
+ value[5], ok6 = decodeHexByte(s2[6], s2[7])
+ value[6], ok7 = decodeHexByte(s2[8], s2[9])
+ value[7], ok8 = decodeHexByte(s2[10], s2[11])
+ ok = ok1 && ok2 && ok3 && ok4 && ok5 && ok6 && ok7 && ok8
+ return
+}
+
+func decodeHexByte(c1, c2 byte) (value byte, ok bool) {
+ var n1, n2 byte
+ var ok1, ok2 bool
+ n1, ok1 = decodeHexChar(c1)
+ n2, ok2 = decodeHexChar(c2)
+ value = (n1 << 4) | n2
+ ok = ok1 && ok2
+ return
+}
+
+func decodeHexChar(c byte) (byte, bool) {
+ switch {
+ case '0' <= c && c <= '9':
+ return c - '0', true
+ case 'a' <= c && c <= 'f':
+ return c - 'a' + 10, true
+ case 'A' <= c && c <= 'F':
+ return c - 'A' + 10, true
+ }
+
+ return 0, false
+}
+
+// String converts the GUID to string form. It will adhere to this pattern:
+//
+// {XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}
+//
+// If the GUID is nil, the string representation of an empty GUID is returned:
+//
+// {00000000-0000-0000-0000-000000000000}
+func (guid *GUID) String() string {
+ if guid == nil {
+ return emptyGUID
+ }
+
+ var c [38]byte
+ c[0] = '{'
+ putUint32Hex(c[1:9], guid.Data1)
+ c[9] = '-'
+ putUint16Hex(c[10:14], guid.Data2)
+ c[14] = '-'
+ putUint16Hex(c[15:19], guid.Data3)
+ c[19] = '-'
+ putByteHex(c[20:24], guid.Data4[0:2])
+ c[24] = '-'
+ putByteHex(c[25:37], guid.Data4[2:8])
+ c[37] = '}'
+ return string(c[:])
+}
+
+func putUint32Hex(b []byte, v uint32) {
+ b[0] = hextable[byte(v>>24)>>4]
+ b[1] = hextable[byte(v>>24)&0x0f]
+ b[2] = hextable[byte(v>>16)>>4]
+ b[3] = hextable[byte(v>>16)&0x0f]
+ b[4] = hextable[byte(v>>8)>>4]
+ b[5] = hextable[byte(v>>8)&0x0f]
+ b[6] = hextable[byte(v)>>4]
+ b[7] = hextable[byte(v)&0x0f]
+}
+
+func putUint16Hex(b []byte, v uint16) {
+ b[0] = hextable[byte(v>>8)>>4]
+ b[1] = hextable[byte(v>>8)&0x0f]
+ b[2] = hextable[byte(v)>>4]
+ b[3] = hextable[byte(v)&0x0f]
+}
+
+func putByteHex(dst, src []byte) {
+ for i := 0; i < len(src); i++ {
+ dst[i*2] = hextable[src[i]>>4]
+ dst[i*2+1] = hextable[src[i]&0x0f]
+ }
+}
+
+// IsEqualGUID compares two GUID.
+//
+// Not constant time comparison.
+func IsEqualGUID(guid1 *GUID, guid2 *GUID) bool {
+ return guid1.Data1 == guid2.Data1 &&
+ guid1.Data2 == guid2.Data2 &&
+ guid1.Data3 == guid2.Data3 &&
+ guid1.Data4[0] == guid2.Data4[0] &&
+ guid1.Data4[1] == guid2.Data4[1] &&
+ guid1.Data4[2] == guid2.Data4[2] &&
+ guid1.Data4[3] == guid2.Data4[3] &&
+ guid1.Data4[4] == guid2.Data4[4] &&
+ guid1.Data4[5] == guid2.Data4[5] &&
+ guid1.Data4[6] == guid2.Data4[6] &&
+ guid1.Data4[7] == guid2.Data4[7]
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go
new file mode 100644
index 00000000..9e6c49f4
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint.go
@@ -0,0 +1,20 @@
+package ole
+
+import "unsafe"
+
+type IConnectionPoint struct {
+ IUnknown
+}
+
+type IConnectionPointVtbl struct {
+ IUnknownVtbl
+ GetConnectionInterface uintptr
+ GetConnectionPointContainer uintptr
+ Advise uintptr
+ Unadvise uintptr
+ EnumConnections uintptr
+}
+
+func (v *IConnectionPoint) VTable() *IConnectionPointVtbl {
+ return (*IConnectionPointVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
new file mode 100644
index 00000000..5414dc3c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_func.go
@@ -0,0 +1,21 @@
+// +build !windows
+
+package ole
+
+import "unsafe"
+
+func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
+ return int32(0)
+}
+
+func (v *IConnectionPoint) Advise(unknown *IUnknown) (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPoint) Unadvise(cookie uint32) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) (err error) {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
new file mode 100644
index 00000000..32bc1832
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpoint_windows.go
@@ -0,0 +1,43 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *IConnectionPoint) GetConnectionInterface(piid **GUID) int32 {
+ // XXX: This doesn't look like it does what it's supposed to
+ return release((*IUnknown)(unsafe.Pointer(v)))
+}
+
+func (v *IConnectionPoint) Advise(unknown *IUnknown) (cookie uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().Advise,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(unknown)),
+ uintptr(unsafe.Pointer(&cookie)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (v *IConnectionPoint) Unadvise(cookie uint32) (err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().Unadvise,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(cookie),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (v *IConnectionPoint) EnumConnections(p *unsafe.Pointer) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
new file mode 100644
index 00000000..165860d1
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer.go
@@ -0,0 +1,17 @@
+package ole
+
+import "unsafe"
+
+type IConnectionPointContainer struct {
+ IUnknown
+}
+
+type IConnectionPointContainerVtbl struct {
+ IUnknownVtbl
+ EnumConnectionPoints uintptr
+ FindConnectionPoint uintptr
+}
+
+func (v *IConnectionPointContainer) VTable() *IConnectionPointContainerVtbl {
+ return (*IConnectionPointContainerVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
new file mode 100644
index 00000000..5dfa42aa
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_func.go
@@ -0,0 +1,11 @@
+// +build !windows
+
+package ole
+
+func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
new file mode 100644
index 00000000..ad30d79e
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iconnectionpointcontainer_windows.go
@@ -0,0 +1,25 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *IConnectionPointContainer) EnumConnectionPoints(points interface{}) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (v *IConnectionPointContainer) FindConnectionPoint(iid *GUID, point **IConnectionPoint) (err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().FindConnectionPoint,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(point)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch.go b/vendor/github.com/go-ole/go-ole/idispatch.go
new file mode 100644
index 00000000..d4af1240
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch.go
@@ -0,0 +1,94 @@
+package ole
+
+import "unsafe"
+
+type IDispatch struct {
+ IUnknown
+}
+
+type IDispatchVtbl struct {
+ IUnknownVtbl
+ GetTypeInfoCount uintptr
+ GetTypeInfo uintptr
+ GetIDsOfNames uintptr
+ Invoke uintptr
+}
+
+func (v *IDispatch) VTable() *IDispatchVtbl {
+ return (*IDispatchVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IDispatch) GetIDsOfName(names []string) (dispid []int32, err error) {
+ dispid, err = getIDsOfName(v, names)
+ return
+}
+
+func (v *IDispatch) Invoke(dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) {
+ result, err = invoke(v, dispid, dispatch, params...)
+ return
+}
+
+func (v *IDispatch) GetTypeInfoCount() (c uint32, err error) {
+ c, err = getTypeInfoCount(v)
+ return
+}
+
+func (v *IDispatch) GetTypeInfo() (tinfo *ITypeInfo, err error) {
+ tinfo, err = getTypeInfo(v)
+ return
+}
+
+// GetSingleIDOfName is a helper that returns single display ID for IDispatch name.
+//
+// This replaces the common pattern of attempting to get a single name from the list of available
+// IDs. It gives the first ID, if it is available.
+func (v *IDispatch) GetSingleIDOfName(name string) (displayID int32, err error) {
+ var displayIDs []int32
+ displayIDs, err = v.GetIDsOfName([]string{name})
+ if err != nil {
+ return
+ }
+ displayID = displayIDs[0]
+ return
+}
+
+// InvokeWithOptionalArgs accepts arguments as an array, works like Invoke.
+//
+// Accepts name and will attempt to retrieve Display ID to pass to Invoke.
+//
+// Passing params as an array is a workaround that could be fixed in later versions of Go that
+// prevent passing empty params. During testing it was discovered that this is an acceptable way of
+// getting around not being able to pass params normally.
+func (v *IDispatch) InvokeWithOptionalArgs(name string, dispatch int16, params []interface{}) (result *VARIANT, err error) {
+ displayID, err := v.GetSingleIDOfName(name)
+ if err != nil {
+ return
+ }
+
+ if len(params) < 1 {
+ result, err = v.Invoke(displayID, dispatch)
+ } else {
+ result, err = v.Invoke(displayID, dispatch, params...)
+ }
+
+ return
+}
+
+// CallMethod invokes named function with arguments on object.
+func (v *IDispatch) CallMethod(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_METHOD, params)
+}
+
+// GetProperty retrieves the property with the name with the ability to pass arguments.
+//
+// Most of the time you will not need to pass arguments as most objects do not allow for this
+// feature. Or at least, should not allow for this feature. Some servers don't follow best practices
+// and this is provided for those edge cases.
+func (v *IDispatch) GetProperty(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYGET, params)
+}
+
+// PutProperty attempts to mutate a property in the object.
+func (v *IDispatch) PutProperty(name string, params ...interface{}) (*VARIANT, error) {
+ return v.InvokeWithOptionalArgs(name, DISPATCH_PROPERTYPUT, params)
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch_func.go b/vendor/github.com/go-ole/go-ole/idispatch_func.go
new file mode 100644
index 00000000..b8fbbe31
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func getIDsOfName(disp *IDispatch, names []string) ([]int32, error) {
+ return []int32{}, NewError(E_NOTIMPL)
+}
+
+func getTypeInfoCount(disp *IDispatch) (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
+
+func getTypeInfo(disp *IDispatch) (*ITypeInfo, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (*VARIANT, error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/idispatch_windows.go b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
new file mode 100644
index 00000000..b399f047
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/idispatch_windows.go
@@ -0,0 +1,202 @@
+// +build windows
+
+package ole
+
+import (
+ "math/big"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+func getIDsOfName(disp *IDispatch, names []string) (dispid []int32, err error) {
+ wnames := make([]*uint16, len(names))
+ for i := 0; i < len(names); i++ {
+ wnames[i] = syscall.StringToUTF16Ptr(names[i])
+ }
+ dispid = make([]int32, len(names))
+ namelen := uint32(len(names))
+ hr, _, _ := syscall.Syscall6(
+ disp.VTable().GetIDsOfNames,
+ 6,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(IID_NULL)),
+ uintptr(unsafe.Pointer(&wnames[0])),
+ uintptr(namelen),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&dispid[0])))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func getTypeInfoCount(disp *IDispatch) (c uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetTypeInfoCount,
+ 2,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(&c)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func getTypeInfo(disp *IDispatch) (tinfo *ITypeInfo, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetTypeInfo,
+ 3,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(unsafe.Pointer(&tinfo)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func invoke(disp *IDispatch, dispid int32, dispatch int16, params ...interface{}) (result *VARIANT, err error) {
+ var dispparams DISPPARAMS
+
+ if dispatch&DISPATCH_PROPERTYPUT != 0 {
+ dispnames := [1]int32{DISPID_PROPERTYPUT}
+ dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
+ dispparams.cNamedArgs = 1
+ } else if dispatch&DISPATCH_PROPERTYPUTREF != 0 {
+ dispnames := [1]int32{DISPID_PROPERTYPUT}
+ dispparams.rgdispidNamedArgs = uintptr(unsafe.Pointer(&dispnames[0]))
+ dispparams.cNamedArgs = 1
+ }
+ var vargs []VARIANT
+ if len(params) > 0 {
+ vargs = make([]VARIANT, len(params))
+ for i, v := range params {
+ //n := len(params)-i-1
+ n := len(params) - i - 1
+ VariantInit(&vargs[n])
+ switch vv := v.(type) {
+ case bool:
+ if vv {
+ vargs[n] = NewVariant(VT_BOOL, 0xffff)
+ } else {
+ vargs[n] = NewVariant(VT_BOOL, 0)
+ }
+ case *bool:
+ vargs[n] = NewVariant(VT_BOOL|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*bool)))))
+ case uint8:
+ vargs[n] = NewVariant(VT_I1, int64(v.(uint8)))
+ case *uint8:
+ vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8)))))
+ case int8:
+ vargs[n] = NewVariant(VT_I1, int64(v.(int8)))
+ case *int8:
+ vargs[n] = NewVariant(VT_I1|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint8)))))
+ case int16:
+ vargs[n] = NewVariant(VT_I2, int64(v.(int16)))
+ case *int16:
+ vargs[n] = NewVariant(VT_I2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int16)))))
+ case uint16:
+ vargs[n] = NewVariant(VT_UI2, int64(v.(uint16)))
+ case *uint16:
+ vargs[n] = NewVariant(VT_UI2|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint16)))))
+ case int32:
+ vargs[n] = NewVariant(VT_I4, int64(v.(int32)))
+ case *int32:
+ vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int32)))))
+ case uint32:
+ vargs[n] = NewVariant(VT_UI4, int64(v.(uint32)))
+ case *uint32:
+ vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint32)))))
+ case int64:
+ vargs[n] = NewVariant(VT_I8, int64(v.(int64)))
+ case *int64:
+ vargs[n] = NewVariant(VT_I8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int64)))))
+ case uint64:
+ vargs[n] = NewVariant(VT_UI8, int64(uintptr(v.(uint64))))
+ case *uint64:
+ vargs[n] = NewVariant(VT_UI8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint64)))))
+ case int:
+ vargs[n] = NewVariant(VT_I4, int64(v.(int)))
+ case *int:
+ vargs[n] = NewVariant(VT_I4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*int)))))
+ case uint:
+ vargs[n] = NewVariant(VT_UI4, int64(v.(uint)))
+ case *uint:
+ vargs[n] = NewVariant(VT_UI4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*uint)))))
+ case float32:
+ vargs[n] = NewVariant(VT_R4, *(*int64)(unsafe.Pointer(&vv)))
+ case *float32:
+ vargs[n] = NewVariant(VT_R4|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float32)))))
+ case float64:
+ vargs[n] = NewVariant(VT_R8, *(*int64)(unsafe.Pointer(&vv)))
+ case *float64:
+ vargs[n] = NewVariant(VT_R8|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*float64)))))
+ case *big.Int:
+ vargs[n] = NewVariant(VT_DECIMAL, v.(*big.Int).Int64())
+ case string:
+ vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(v.(string))))))
+ case *string:
+ vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*string)))))
+ case time.Time:
+ s := vv.Format("2006-01-02 15:04:05")
+ vargs[n] = NewVariant(VT_BSTR, int64(uintptr(unsafe.Pointer(SysAllocStringLen(s)))))
+ case *time.Time:
+ s := vv.Format("2006-01-02 15:04:05")
+ vargs[n] = NewVariant(VT_BSTR|VT_BYREF, int64(uintptr(unsafe.Pointer(&s))))
+ case *IDispatch:
+ vargs[n] = NewVariant(VT_DISPATCH, int64(uintptr(unsafe.Pointer(v.(*IDispatch)))))
+ case **IDispatch:
+ vargs[n] = NewVariant(VT_DISPATCH|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(**IDispatch)))))
+ case nil:
+ vargs[n] = NewVariant(VT_NULL, 0)
+ case *VARIANT:
+ vargs[n] = NewVariant(VT_VARIANT|VT_BYREF, int64(uintptr(unsafe.Pointer(v.(*VARIANT)))))
+ case []byte:
+ safeByteArray := safeArrayFromByteSlice(v.([]byte))
+ vargs[n] = NewVariant(VT_ARRAY|VT_UI1, int64(uintptr(unsafe.Pointer(safeByteArray))))
+ defer VariantClear(&vargs[n])
+ case []string:
+ safeByteArray := safeArrayFromStringSlice(v.([]string))
+ vargs[n] = NewVariant(VT_ARRAY|VT_BSTR, int64(uintptr(unsafe.Pointer(safeByteArray))))
+ defer VariantClear(&vargs[n])
+ default:
+ panic("unknown type")
+ }
+ }
+ dispparams.rgvarg = uintptr(unsafe.Pointer(&vargs[0]))
+ dispparams.cArgs = uint32(len(params))
+ }
+
+ result = new(VARIANT)
+ var excepInfo EXCEPINFO
+ VariantInit(result)
+ hr, _, _ := syscall.Syscall9(
+ disp.VTable().Invoke,
+ 9,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(dispid),
+ uintptr(unsafe.Pointer(IID_NULL)),
+ uintptr(GetUserDefaultLCID()),
+ uintptr(dispatch),
+ uintptr(unsafe.Pointer(&dispparams)),
+ uintptr(unsafe.Pointer(result)),
+ uintptr(unsafe.Pointer(&excepInfo)),
+ 0)
+ if hr != 0 {
+ excepInfo.renderStrings()
+ excepInfo.Clear()
+ err = NewErrorWithSubError(hr, excepInfo.description, excepInfo)
+ }
+ for i, varg := range vargs {
+ n := len(params) - i - 1
+ if varg.VT == VT_BSTR && varg.Val != 0 {
+ SysFreeString(((*int16)(unsafe.Pointer(uintptr(varg.Val)))))
+ }
+ if varg.VT == (VT_BSTR|VT_BYREF) && varg.Val != 0 {
+ *(params[n].(*string)) = LpOleStrToString(*(**uint16)(unsafe.Pointer(uintptr(varg.Val))))
+ }
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant.go b/vendor/github.com/go-ole/go-ole/ienumvariant.go
new file mode 100644
index 00000000..24338975
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant.go
@@ -0,0 +1,19 @@
+package ole
+
+import "unsafe"
+
+type IEnumVARIANT struct {
+ IUnknown
+}
+
+type IEnumVARIANTVtbl struct {
+ IUnknownVtbl
+ Next uintptr
+ Skip uintptr
+ Reset uintptr
+ Clone uintptr
+}
+
+func (v *IEnumVARIANT) VTable() *IEnumVARIANTVtbl {
+ return (*IEnumVARIANTVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_func.go b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go
new file mode 100644
index 00000000..c1484819
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func (enum *IEnumVARIANT) Clone() (*IEnumVARIANT, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Reset() error {
+ return NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Skip(celt uint) error {
+ return NewError(E_NOTIMPL)
+}
+
+func (enum *IEnumVARIANT) Next(celt uint) (VARIANT, uint, error) {
+ return NewVariant(VT_NULL, int64(0)), 0, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go
new file mode 100644
index 00000000..4781f3b8
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ienumvariant_windows.go
@@ -0,0 +1,63 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (enum *IEnumVARIANT) Clone() (cloned *IEnumVARIANT, err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Clone,
+ 2,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(unsafe.Pointer(&cloned)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Reset() (err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Reset,
+ 1,
+ uintptr(unsafe.Pointer(enum)),
+ 0,
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Skip(celt uint) (err error) {
+ hr, _, _ := syscall.Syscall(
+ enum.VTable().Skip,
+ 2,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(celt),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func (enum *IEnumVARIANT) Next(celt uint) (array VARIANT, length uint, err error) {
+ hr, _, _ := syscall.Syscall6(
+ enum.VTable().Next,
+ 4,
+ uintptr(unsafe.Pointer(enum)),
+ uintptr(celt),
+ uintptr(unsafe.Pointer(&array)),
+ uintptr(unsafe.Pointer(&length)),
+ 0,
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable.go b/vendor/github.com/go-ole/go-ole/iinspectable.go
new file mode 100644
index 00000000..f4a19e25
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable.go
@@ -0,0 +1,18 @@
+package ole
+
+import "unsafe"
+
+type IInspectable struct {
+ IUnknown
+}
+
+type IInspectableVtbl struct {
+ IUnknownVtbl
+ GetIIds uintptr
+ GetRuntimeClassName uintptr
+ GetTrustLevel uintptr
+}
+
+func (v *IInspectable) VTable() *IInspectableVtbl {
+ return (*IInspectableVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_func.go b/vendor/github.com/go-ole/go-ole/iinspectable_func.go
new file mode 100644
index 00000000..348829bf
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable_func.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package ole
+
+func (v *IInspectable) GetIids() ([]*GUID, error) {
+ return []*GUID{}, NewError(E_NOTIMPL)
+}
+
+func (v *IInspectable) GetRuntimeClassName() (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+func (v *IInspectable) GetTrustLevel() (uint32, error) {
+ return uint32(0), NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iinspectable_windows.go b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go
new file mode 100644
index 00000000..4519a4aa
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iinspectable_windows.go
@@ -0,0 +1,72 @@
+// +build windows
+
+package ole
+
+import (
+ "bytes"
+ "encoding/binary"
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+func (v *IInspectable) GetIids() (iids []*GUID, err error) {
+ var count uint32
+ var array uintptr
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetIIds,
+ 3,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&count)),
+ uintptr(unsafe.Pointer(&array)))
+ if hr != 0 {
+ err = NewError(hr)
+ return
+ }
+ defer CoTaskMemFree(array)
+
+ iids = make([]*GUID, count)
+ byteCount := count * uint32(unsafe.Sizeof(GUID{}))
+ slicehdr := reflect.SliceHeader{Data: array, Len: int(byteCount), Cap: int(byteCount)}
+ byteSlice := *(*[]byte)(unsafe.Pointer(&slicehdr))
+ reader := bytes.NewReader(byteSlice)
+ for i := range iids {
+ guid := GUID{}
+ err = binary.Read(reader, binary.LittleEndian, &guid)
+ if err != nil {
+ return
+ }
+ iids[i] = &guid
+ }
+ return
+}
+
+func (v *IInspectable) GetRuntimeClassName() (s string, err error) {
+ var hstring HString
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetRuntimeClassName,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&hstring)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ return
+ }
+ s = hstring.String()
+ DeleteHString(hstring)
+ return
+}
+
+func (v *IInspectable) GetTrustLevel() (level uint32, err error) {
+ hr, _, _ := syscall.Syscall(
+ v.VTable().GetTrustLevel,
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&level)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go
new file mode 100644
index 00000000..25f3a6f2
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo.go
@@ -0,0 +1,21 @@
+package ole
+
+import "unsafe"
+
+type IProvideClassInfo struct {
+ IUnknown
+}
+
+type IProvideClassInfoVtbl struct {
+ IUnknownVtbl
+ GetClassInfo uintptr
+}
+
+func (v *IProvideClassInfo) VTable() *IProvideClassInfoVtbl {
+ return (*IProvideClassInfoVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IProvideClassInfo) GetClassInfo() (cinfo *ITypeInfo, err error) {
+ cinfo, err = getClassInfo(v)
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go
new file mode 100644
index 00000000..7e3cb63e
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_func.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package ole
+
+func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go
new file mode 100644
index 00000000..2ad01639
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iprovideclassinfo_windows.go
@@ -0,0 +1,21 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func getClassInfo(disp *IProvideClassInfo) (tinfo *ITypeInfo, err error) {
+ hr, _, _ := syscall.Syscall(
+ disp.VTable().GetClassInfo,
+ 2,
+ uintptr(unsafe.Pointer(disp)),
+ uintptr(unsafe.Pointer(&tinfo)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo.go b/vendor/github.com/go-ole/go-ole/itypeinfo.go
new file mode 100644
index 00000000..dd3c5e21
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo.go
@@ -0,0 +1,34 @@
+package ole
+
+import "unsafe"
+
+type ITypeInfo struct {
+ IUnknown
+}
+
+type ITypeInfoVtbl struct {
+ IUnknownVtbl
+ GetTypeAttr uintptr
+ GetTypeComp uintptr
+ GetFuncDesc uintptr
+ GetVarDesc uintptr
+ GetNames uintptr
+ GetRefTypeOfImplType uintptr
+ GetImplTypeFlags uintptr
+ GetIDsOfNames uintptr
+ Invoke uintptr
+ GetDocumentation uintptr
+ GetDllEntry uintptr
+ GetRefTypeInfo uintptr
+ AddressOfMember uintptr
+ CreateInstance uintptr
+ GetMops uintptr
+ GetContainingTypeLib uintptr
+ ReleaseTypeAttr uintptr
+ ReleaseFuncDesc uintptr
+ ReleaseVarDesc uintptr
+}
+
+func (v *ITypeInfo) VTable() *ITypeInfoVtbl {
+ return (*ITypeInfoVtbl)(unsafe.Pointer(v.RawVTable))
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_func.go b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go
new file mode 100644
index 00000000..8364a659
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo_func.go
@@ -0,0 +1,7 @@
+// +build !windows
+
+package ole
+
+func (v *ITypeInfo) GetTypeAttr() (*TYPEATTR, error) {
+ return nil, NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go
new file mode 100644
index 00000000..54782b3d
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/itypeinfo_windows.go
@@ -0,0 +1,21 @@
+// +build windows
+
+package ole
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+func (v *ITypeInfo) GetTypeAttr() (tattr *TYPEATTR, err error) {
+ hr, _, _ := syscall.Syscall(
+ uintptr(v.VTable().GetTypeAttr),
+ 2,
+ uintptr(unsafe.Pointer(v)),
+ uintptr(unsafe.Pointer(&tattr)),
+ 0)
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown.go b/vendor/github.com/go-ole/go-ole/iunknown.go
new file mode 100644
index 00000000..108f28ea
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown.go
@@ -0,0 +1,57 @@
+package ole
+
+import "unsafe"
+
+type IUnknown struct {
+ RawVTable *interface{}
+}
+
+type IUnknownVtbl struct {
+ QueryInterface uintptr
+ AddRef uintptr
+ Release uintptr
+}
+
+type UnknownLike interface {
+ QueryInterface(iid *GUID) (disp *IDispatch, err error)
+ AddRef() int32
+ Release() int32
+}
+
+func (v *IUnknown) VTable() *IUnknownVtbl {
+ return (*IUnknownVtbl)(unsafe.Pointer(v.RawVTable))
+}
+
+func (v *IUnknown) PutQueryInterface(interfaceID *GUID, obj interface{}) error {
+ return reflectQueryInterface(v, v.VTable().QueryInterface, interfaceID, obj)
+}
+
+func (v *IUnknown) IDispatch(interfaceID *GUID) (dispatch *IDispatch, err error) {
+ err = v.PutQueryInterface(interfaceID, &dispatch)
+ return
+}
+
+func (v *IUnknown) IEnumVARIANT(interfaceID *GUID) (enum *IEnumVARIANT, err error) {
+ err = v.PutQueryInterface(interfaceID, &enum)
+ return
+}
+
+func (v *IUnknown) QueryInterface(iid *GUID) (*IDispatch, error) {
+ return queryInterface(v, iid)
+}
+
+func (v *IUnknown) MustQueryInterface(iid *GUID) (disp *IDispatch) {
+ unk, err := queryInterface(v, iid)
+ if err != nil {
+ panic(err)
+ }
+ return unk
+}
+
+func (v *IUnknown) AddRef() int32 {
+ return addRef(v)
+}
+
+func (v *IUnknown) Release() int32 {
+ return release(v)
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown_func.go b/vendor/github.com/go-ole/go-ole/iunknown_func.go
new file mode 100644
index 00000000..d0a62cfd
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown_func.go
@@ -0,0 +1,19 @@
+// +build !windows
+
+package ole
+
+func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+func addRef(unk *IUnknown) int32 {
+ return 0
+}
+
+func release(unk *IUnknown) int32 {
+ return 0
+}
diff --git a/vendor/github.com/go-ole/go-ole/iunknown_windows.go b/vendor/github.com/go-ole/go-ole/iunknown_windows.go
new file mode 100644
index 00000000..ede5bb8c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/iunknown_windows.go
@@ -0,0 +1,58 @@
+// +build windows
+
+package ole
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+func reflectQueryInterface(self interface{}, method uintptr, interfaceID *GUID, obj interface{}) (err error) {
+ selfValue := reflect.ValueOf(self).Elem()
+ objValue := reflect.ValueOf(obj).Elem()
+
+ hr, _, _ := syscall.Syscall(
+ method,
+ 3,
+ selfValue.UnsafeAddr(),
+ uintptr(unsafe.Pointer(interfaceID)),
+ objValue.Addr().Pointer())
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func queryInterface(unk *IUnknown, iid *GUID) (disp *IDispatch, err error) {
+ hr, _, _ := syscall.Syscall(
+ unk.VTable().QueryInterface,
+ 3,
+ uintptr(unsafe.Pointer(unk)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&disp)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func addRef(unk *IUnknown) int32 {
+ ret, _, _ := syscall.Syscall(
+ unk.VTable().AddRef,
+ 1,
+ uintptr(unsafe.Pointer(unk)),
+ 0,
+ 0)
+ return int32(ret)
+}
+
+func release(unk *IUnknown) int32 {
+ ret, _, _ := syscall.Syscall(
+ unk.VTable().Release,
+ 1,
+ uintptr(unsafe.Pointer(unk)),
+ 0,
+ 0)
+ return int32(ret)
+}
diff --git a/vendor/github.com/go-ole/go-ole/ole.go b/vendor/github.com/go-ole/go-ole/ole.go
new file mode 100644
index 00000000..dbd132bb
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/ole.go
@@ -0,0 +1,190 @@
+package ole
+
+import (
+ "fmt"
+ "strings"
+ "unsafe"
+)
+
+// DISPPARAMS are the arguments that passed to methods or property.
+type DISPPARAMS struct {
+ rgvarg uintptr
+ rgdispidNamedArgs uintptr
+ cArgs uint32
+ cNamedArgs uint32
+}
+
+// EXCEPINFO defines exception info.
+type EXCEPINFO struct {
+ wCode uint16
+ wReserved uint16
+ bstrSource *uint16
+ bstrDescription *uint16
+ bstrHelpFile *uint16
+ dwHelpContext uint32
+ pvReserved uintptr
+ pfnDeferredFillIn uintptr
+ scode uint32
+
+ // Go-specific part. Don't move upper cos it'll break structure layout for native code.
+ rendered bool
+ source string
+ description string
+ helpFile string
+}
+
+// renderStrings translates BSTR strings to Go ones so `.Error` and `.String`
+// could be safely called after `.Clear`. We need this when we can't rely on
+// a caller to call `.Clear`.
+func (e *EXCEPINFO) renderStrings() {
+ e.rendered = true
+ if e.bstrSource == nil {
+ e.source = ""
+ } else {
+ e.source = BstrToString(e.bstrSource)
+ }
+ if e.bstrDescription == nil {
+ e.description = ""
+ } else {
+ e.description = BstrToString(e.bstrDescription)
+ }
+ if e.bstrHelpFile == nil {
+ e.helpFile = ""
+ } else {
+ e.helpFile = BstrToString(e.bstrHelpFile)
+ }
+}
+
+// Clear frees BSTR strings inside an EXCEPINFO and set it to NULL.
+func (e *EXCEPINFO) Clear() {
+ freeBSTR := func(s *uint16) {
+ // SysFreeString don't return errors and is safe for call's on NULL.
+ // https://docs.microsoft.com/en-us/windows/win32/api/oleauto/nf-oleauto-sysfreestring
+ _ = SysFreeString((*int16)(unsafe.Pointer(s)))
+ }
+
+ if e.bstrSource != nil {
+ freeBSTR(e.bstrSource)
+ e.bstrSource = nil
+ }
+ if e.bstrDescription != nil {
+ freeBSTR(e.bstrDescription)
+ e.bstrDescription = nil
+ }
+ if e.bstrHelpFile != nil {
+ freeBSTR(e.bstrHelpFile)
+ e.bstrHelpFile = nil
+ }
+}
+
+// WCode return wCode in EXCEPINFO.
+func (e EXCEPINFO) WCode() uint16 {
+ return e.wCode
+}
+
+// SCODE return scode in EXCEPINFO.
+func (e EXCEPINFO) SCODE() uint32 {
+ return e.scode
+}
+
+// String convert EXCEPINFO to string.
+func (e EXCEPINFO) String() string {
+ if !e.rendered {
+ e.renderStrings()
+ }
+ return fmt.Sprintf(
+ "wCode: %#x, bstrSource: %v, bstrDescription: %v, bstrHelpFile: %v, dwHelpContext: %#x, scode: %#x",
+ e.wCode, e.source, e.description, e.helpFile, e.dwHelpContext, e.scode,
+ )
+}
+
+// Error implements error interface and returns error string.
+func (e EXCEPINFO) Error() string {
+ if !e.rendered {
+ e.renderStrings()
+ }
+
+ if e.description != "" {
+ return strings.TrimSpace(e.description)
+ }
+
+ code := e.scode
+ if e.wCode != 0 {
+ code = uint32(e.wCode)
+ }
+ return fmt.Sprintf("%v: %#x", e.source, code)
+}
+
+// PARAMDATA defines parameter data type.
+type PARAMDATA struct {
+ Name *int16
+ Vt uint16
+}
+
+// METHODDATA defines method info.
+type METHODDATA struct {
+ Name *uint16
+ Data *PARAMDATA
+ Dispid int32
+ Meth uint32
+ CC int32
+ CArgs uint32
+ Flags uint16
+ VtReturn uint32
+}
+
+// INTERFACEDATA defines interface info.
+type INTERFACEDATA struct {
+ MethodData *METHODDATA
+ CMembers uint32
+}
+
+// Point is 2D vector type.
+type Point struct {
+ X int32
+ Y int32
+}
+
+// Msg is message between processes.
+type Msg struct {
+ Hwnd uint32
+ Message uint32
+ Wparam int32
+ Lparam int32
+ Time uint32
+ Pt Point
+}
+
+// TYPEDESC defines data type.
+type TYPEDESC struct {
+ Hreftype uint32
+ VT uint16
+}
+
+// IDLDESC defines IDL info.
+type IDLDESC struct {
+ DwReserved uint32
+ WIDLFlags uint16
+}
+
+// TYPEATTR defines type info.
+type TYPEATTR struct {
+ Guid GUID
+ Lcid uint32
+ dwReserved uint32
+ MemidConstructor int32
+ MemidDestructor int32
+ LpstrSchema *uint16
+ CbSizeInstance uint32
+ Typekind int32
+ CFuncs uint16
+ CVars uint16
+ CImplTypes uint16
+ CbSizeVft uint16
+ CbAlignment uint16
+ WTypeFlags uint16
+ WMajorVerNum uint16
+ WMinorVerNum uint16
+ TdescAlias TYPEDESC
+ IdldescType IDLDESC
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection.go b/vendor/github.com/go-ole/go-ole/oleutil/connection.go
new file mode 100644
index 00000000..60df73cd
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection.go
@@ -0,0 +1,100 @@
+// +build windows
+
+package oleutil
+
+import (
+ "reflect"
+ "unsafe"
+
+ ole "github.com/go-ole/go-ole"
+)
+
+type stdDispatch struct {
+ lpVtbl *stdDispatchVtbl
+ ref int32
+ iid *ole.GUID
+ iface interface{}
+ funcMap map[string]int32
+}
+
+type stdDispatchVtbl struct {
+ pQueryInterface uintptr
+ pAddRef uintptr
+ pRelease uintptr
+ pGetTypeInfoCount uintptr
+ pGetTypeInfo uintptr
+ pGetIDsOfNames uintptr
+ pInvoke uintptr
+}
+
+func dispQueryInterface(this *ole.IUnknown, iid *ole.GUID, punk **ole.IUnknown) uint32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ *punk = nil
+ if ole.IsEqualGUID(iid, ole.IID_IUnknown) ||
+ ole.IsEqualGUID(iid, ole.IID_IDispatch) {
+ dispAddRef(this)
+ *punk = this
+ return ole.S_OK
+ }
+ if ole.IsEqualGUID(iid, pthis.iid) {
+ dispAddRef(this)
+ *punk = this
+ return ole.S_OK
+ }
+ return ole.E_NOINTERFACE
+}
+
+func dispAddRef(this *ole.IUnknown) int32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ pthis.ref++
+ return pthis.ref
+}
+
+func dispRelease(this *ole.IUnknown) int32 {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ pthis.ref--
+ return pthis.ref
+}
+
+func dispGetIDsOfNames(this *ole.IUnknown, iid *ole.GUID, wnames []*uint16, namelen int, lcid int, pdisp []int32) uintptr {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ names := make([]string, len(wnames))
+ for i := 0; i < len(names); i++ {
+ names[i] = ole.LpOleStrToString(wnames[i])
+ }
+ for n := 0; n < namelen; n++ {
+ if id, ok := pthis.funcMap[names[n]]; ok {
+ pdisp[n] = id
+ }
+ }
+ return ole.S_OK
+}
+
+func dispGetTypeInfoCount(pcount *int) uintptr {
+ if pcount != nil {
+ *pcount = 0
+ }
+ return ole.S_OK
+}
+
+func dispGetTypeInfo(ptypeif *uintptr) uintptr {
+ return ole.E_NOTIMPL
+}
+
+func dispInvoke(this *ole.IDispatch, dispid int32, riid *ole.GUID, lcid int, flags int16, dispparams *ole.DISPPARAMS, result *ole.VARIANT, pexcepinfo *ole.EXCEPINFO, nerr *uint) uintptr {
+ pthis := (*stdDispatch)(unsafe.Pointer(this))
+ found := ""
+ for name, id := range pthis.funcMap {
+ if id == dispid {
+ found = name
+ }
+ }
+ if found != "" {
+ rv := reflect.ValueOf(pthis.iface).Elem()
+ rm := rv.MethodByName(found)
+ rr := rm.Call([]reflect.Value{})
+ println(len(rr))
+ return ole.S_OK
+ }
+ return ole.E_NOTIMPL
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go
new file mode 100644
index 00000000..8818fb82
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_func.go
@@ -0,0 +1,10 @@
+// +build !windows
+
+package oleutil
+
+import ole "github.com/go-ole/go-ole"
+
+// ConnectObject creates a connection point between two services for communication.
+func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (uint32, error) {
+ return 0, ole.NewError(ole.E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
new file mode 100644
index 00000000..ab9c0d8d
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/connection_windows.go
@@ -0,0 +1,58 @@
+// +build windows
+
+package oleutil
+
+import (
+ "reflect"
+ "syscall"
+ "unsafe"
+
+ ole "github.com/go-ole/go-ole"
+)
+
+// ConnectObject creates a connection point between two services for communication.
+func ConnectObject(disp *ole.IDispatch, iid *ole.GUID, idisp interface{}) (cookie uint32, err error) {
+ unknown, err := disp.QueryInterface(ole.IID_IConnectionPointContainer)
+ if err != nil {
+ return
+ }
+
+ container := (*ole.IConnectionPointContainer)(unsafe.Pointer(unknown))
+ var point *ole.IConnectionPoint
+ err = container.FindConnectionPoint(iid, &point)
+ if err != nil {
+ return
+ }
+ if edisp, ok := idisp.(*ole.IUnknown); ok {
+ cookie, err = point.Advise(edisp)
+ container.Release()
+ if err != nil {
+ return
+ }
+ }
+ rv := reflect.ValueOf(disp).Elem()
+ if rv.Type().Kind() == reflect.Struct {
+ dest := &stdDispatch{}
+ dest.lpVtbl = &stdDispatchVtbl{}
+ dest.lpVtbl.pQueryInterface = syscall.NewCallback(dispQueryInterface)
+ dest.lpVtbl.pAddRef = syscall.NewCallback(dispAddRef)
+ dest.lpVtbl.pRelease = syscall.NewCallback(dispRelease)
+ dest.lpVtbl.pGetTypeInfoCount = syscall.NewCallback(dispGetTypeInfoCount)
+ dest.lpVtbl.pGetTypeInfo = syscall.NewCallback(dispGetTypeInfo)
+ dest.lpVtbl.pGetIDsOfNames = syscall.NewCallback(dispGetIDsOfNames)
+ dest.lpVtbl.pInvoke = syscall.NewCallback(dispInvoke)
+ dest.iface = disp
+ dest.iid = iid
+ cookie, err = point.Advise((*ole.IUnknown)(unsafe.Pointer(dest)))
+ container.Release()
+ if err != nil {
+ point.Release()
+ return
+ }
+ return
+ }
+
+ container.Release()
+
+ return 0, ole.NewError(ole.E_INVALIDARG)
+}
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/go-get.go b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go
new file mode 100644
index 00000000..58347628
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/go-get.go
@@ -0,0 +1,6 @@
+// This file is here so go get succeeds as without it errors with:
+// no buildable Go source files in ...
+//
+// +build !windows
+
+package oleutil
diff --git a/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
new file mode 100644
index 00000000..f7803c1e
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/oleutil/oleutil.go
@@ -0,0 +1,127 @@
+package oleutil
+
+import ole "github.com/go-ole/go-ole"
+
+// ClassIDFrom retrieves class ID whether given is program ID or application string.
+func ClassIDFrom(programID string) (classID *ole.GUID, err error) {
+ return ole.ClassIDFrom(programID)
+}
+
+// CreateObject creates object from programID based on interface type.
+//
+// Only supports IUnknown.
+//
+// Program ID can be either program ID or application string.
+func CreateObject(programID string) (unknown *ole.IUnknown, err error) {
+ classID, err := ole.ClassIDFrom(programID)
+ if err != nil {
+ return
+ }
+
+ unknown, err = ole.CreateInstance(classID, ole.IID_IUnknown)
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+// GetActiveObject retrieves active object for program ID and interface ID based
+// on interface type.
+//
+// Only supports IUnknown.
+//
+// Program ID can be either program ID or application string.
+func GetActiveObject(programID string) (unknown *ole.IUnknown, err error) {
+ classID, err := ole.ClassIDFrom(programID)
+ if err != nil {
+ return
+ }
+
+ unknown, err = ole.GetActiveObject(classID, ole.IID_IUnknown)
+ if err != nil {
+ return
+ }
+
+ return
+}
+
+// CallMethod calls method on IDispatch with parameters.
+func CallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_METHOD, params)
+}
+
+// MustCallMethod calls method on IDispatch with parameters or panics.
+func MustCallMethod(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := CallMethod(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+// GetProperty retrieves property from IDispatch.
+func GetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYGET, params)
+}
+
+// MustGetProperty retrieves property from IDispatch or panics.
+func MustGetProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := GetProperty(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+// PutProperty mutates property.
+func PutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUT, params)
+}
+
+// MustPutProperty mutates property or panics.
+func MustPutProperty(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := PutProperty(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+// PutPropertyRef mutates property reference.
+func PutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT, err error) {
+ return disp.InvokeWithOptionalArgs(name, ole.DISPATCH_PROPERTYPUTREF, params)
+}
+
+// MustPutPropertyRef mutates property reference or panics.
+func MustPutPropertyRef(disp *ole.IDispatch, name string, params ...interface{}) (result *ole.VARIANT) {
+ r, err := PutPropertyRef(disp, name, params...)
+ if err != nil {
+ panic(err.Error())
+ }
+ return r
+}
+
+func ForEach(disp *ole.IDispatch, f func(v *ole.VARIANT) error) error {
+ newEnum, err := disp.GetProperty("_NewEnum")
+ if err != nil {
+ return err
+ }
+ defer newEnum.Clear()
+
+ enum, err := newEnum.ToIUnknown().IEnumVARIANT(ole.IID_IEnumVariant)
+ if err != nil {
+ return err
+ }
+ defer enum.Release()
+
+ for item, length, err := enum.Next(1); length > 0; item, length, err = enum.Next(1) {
+ if err != nil {
+ return err
+ }
+ if ferr := f(&item); ferr != nil {
+ return ferr
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearray.go b/vendor/github.com/go-ole/go-ole/safearray.go
new file mode 100644
index 00000000..a5201b56
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray.go
@@ -0,0 +1,27 @@
+// Package is meant to retrieve and process safe array data returned from COM.
+
+package ole
+
+// SafeArrayBound defines the SafeArray boundaries.
+type SafeArrayBound struct {
+ Elements uint32
+ LowerBound int32
+}
+
+// SafeArray is how COM handles arrays.
+type SafeArray struct {
+ Dimensions uint16
+ FeaturesFlag uint16
+ ElementsSize uint32
+ LocksAmount uint32
+ Data uint32
+ Bounds [16]byte
+}
+
+// SAFEARRAY is obsolete, exists for backwards compatibility.
+// Use SafeArray
+type SAFEARRAY SafeArray
+
+// SAFEARRAYBOUND is obsolete, exists for backwards compatibility.
+// Use SafeArrayBound
+type SAFEARRAYBOUND SafeArrayBound
diff --git a/vendor/github.com/go-ole/go-ole/safearray_func.go b/vendor/github.com/go-ole/go-ole/safearray_func.go
new file mode 100644
index 00000000..0dee670c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray_func.go
@@ -0,0 +1,211 @@
+// +build !windows
+
+package ole
+
+import (
+ "unsafe"
+)
+
+// safeArrayAccessData returns raw array pointer.
+//
+// AKA: SafeArrayAccessData in Windows API.
+func safeArrayAccessData(safearray *SafeArray) (uintptr, error) {
+ return uintptr(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayUnaccessData releases raw array.
+//
+// AKA: SafeArrayUnaccessData in Windows API.
+func safeArrayUnaccessData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocData allocates SafeArray.
+//
+// AKA: SafeArrayAllocData in Windows API.
+func safeArrayAllocData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocDescriptor allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptor in Windows API.
+func safeArrayAllocDescriptor(dimensions uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayAllocDescriptorEx allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptorEx in Windows API.
+func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCopy returns copy of SafeArray.
+//
+// AKA: SafeArrayCopy in Windows API.
+func safeArrayCopy(original *SafeArray) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCopyData duplicates SafeArray into another SafeArray object.
+//
+// AKA: SafeArrayCopyData in Windows API.
+func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayCreate creates SafeArray.
+//
+// AKA: SafeArrayCreate in Windows API.
+func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateEx creates SafeArray.
+//
+// AKA: SafeArrayCreateEx in Windows API.
+func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateVector creates SafeArray.
+//
+// AKA: SafeArrayCreateVector in Windows API.
+func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayCreateVectorEx creates SafeArray.
+//
+// AKA: SafeArrayCreateVectorEx in Windows API.
+func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (*SafeArray, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroy destroys SafeArray object.
+//
+// AKA: SafeArrayDestroy in Windows API.
+func safeArrayDestroy(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroyData destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyData in Windows API.
+func safeArrayDestroyData(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayDestroyDescriptor destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyDescriptor in Windows API.
+func safeArrayDestroyDescriptor(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayGetDim is the amount of dimensions in the SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetDim in Windows API.
+func safeArrayGetDim(safearray *SafeArray) (*uint32, error) {
+ u := uint32(0)
+ return &u, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElementSize is the element size in bytes.
+//
+// AKA: SafeArrayGetElemsize in Windows API.
+func safeArrayGetElementSize(safearray *SafeArray) (*uint32, error) {
+ u := uint32(0)
+ return &u, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElement retrieves element at given index.
+func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayGetElement retrieves element at given index and converts to string.
+func safeArrayGetElementString(safearray *SafeArray, index int32) (string, error) {
+ return "", NewError(E_NOTIMPL)
+}
+
+// safeArrayGetIID is the InterfaceID of the elements in the SafeArray.
+//
+// AKA: SafeArrayGetIID in Windows API.
+func safeArrayGetIID(safearray *SafeArray) (*GUID, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArrayGetLBound returns lower bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetLBound in Windows API.
+func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (int32, error) {
+ return int32(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayGetUBound returns upper bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetUBound in Windows API.
+func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (int32, error) {
+ return int32(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayGetVartype returns data type of SafeArray.
+//
+// AKA: SafeArrayGetVartype in Windows API.
+func safeArrayGetVartype(safearray *SafeArray) (uint16, error) {
+ return uint16(0), NewError(E_NOTIMPL)
+}
+
+// safeArrayLock locks SafeArray for reading to modify SafeArray.
+//
+// This must be called during some calls to ensure that another process does not
+// read or write to the SafeArray during editing.
+//
+// AKA: SafeArrayLock in Windows API.
+func safeArrayLock(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayUnlock unlocks SafeArray for reading.
+//
+// AKA: SafeArrayUnlock in Windows API.
+func safeArrayUnlock(safearray *SafeArray) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayPutElement stores the data element at the specified location in the
+// array.
+//
+// AKA: SafeArrayPutElement in Windows API.
+func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) error {
+ return NewError(E_NOTIMPL)
+}
+
+// safeArrayGetRecordInfo accesses IRecordInfo info for custom types.
+//
+// AKA: SafeArrayGetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArrayGetRecordInfo(safearray *SafeArray) (interface{}, error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// safeArraySetRecordInfo mutates IRecordInfo info for custom types.
+//
+// AKA: SafeArraySetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) error {
+ return NewError(E_NOTIMPL)
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearray_windows.go b/vendor/github.com/go-ole/go-ole/safearray_windows.go
new file mode 100644
index 00000000..0c1b3a10
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearray_windows.go
@@ -0,0 +1,337 @@
+// +build windows
+
+package ole
+
+import (
+ "unsafe"
+)
+
+var (
+ procSafeArrayAccessData = modoleaut32.NewProc("SafeArrayAccessData")
+ procSafeArrayAllocData = modoleaut32.NewProc("SafeArrayAllocData")
+ procSafeArrayAllocDescriptor = modoleaut32.NewProc("SafeArrayAllocDescriptor")
+ procSafeArrayAllocDescriptorEx = modoleaut32.NewProc("SafeArrayAllocDescriptorEx")
+ procSafeArrayCopy = modoleaut32.NewProc("SafeArrayCopy")
+ procSafeArrayCopyData = modoleaut32.NewProc("SafeArrayCopyData")
+ procSafeArrayCreate = modoleaut32.NewProc("SafeArrayCreate")
+ procSafeArrayCreateEx = modoleaut32.NewProc("SafeArrayCreateEx")
+ procSafeArrayCreateVector = modoleaut32.NewProc("SafeArrayCreateVector")
+ procSafeArrayCreateVectorEx = modoleaut32.NewProc("SafeArrayCreateVectorEx")
+ procSafeArrayDestroy = modoleaut32.NewProc("SafeArrayDestroy")
+ procSafeArrayDestroyData = modoleaut32.NewProc("SafeArrayDestroyData")
+ procSafeArrayDestroyDescriptor = modoleaut32.NewProc("SafeArrayDestroyDescriptor")
+ procSafeArrayGetDim = modoleaut32.NewProc("SafeArrayGetDim")
+ procSafeArrayGetElement = modoleaut32.NewProc("SafeArrayGetElement")
+ procSafeArrayGetElemsize = modoleaut32.NewProc("SafeArrayGetElemsize")
+ procSafeArrayGetIID = modoleaut32.NewProc("SafeArrayGetIID")
+ procSafeArrayGetLBound = modoleaut32.NewProc("SafeArrayGetLBound")
+ procSafeArrayGetUBound = modoleaut32.NewProc("SafeArrayGetUBound")
+ procSafeArrayGetVartype = modoleaut32.NewProc("SafeArrayGetVartype")
+ procSafeArrayLock = modoleaut32.NewProc("SafeArrayLock")
+ procSafeArrayPtrOfIndex = modoleaut32.NewProc("SafeArrayPtrOfIndex")
+ procSafeArrayUnaccessData = modoleaut32.NewProc("SafeArrayUnaccessData")
+ procSafeArrayUnlock = modoleaut32.NewProc("SafeArrayUnlock")
+ procSafeArrayPutElement = modoleaut32.NewProc("SafeArrayPutElement")
+ //procSafeArrayRedim = modoleaut32.NewProc("SafeArrayRedim") // TODO
+ //procSafeArraySetIID = modoleaut32.NewProc("SafeArraySetIID") // TODO
+ procSafeArrayGetRecordInfo = modoleaut32.NewProc("SafeArrayGetRecordInfo")
+ procSafeArraySetRecordInfo = modoleaut32.NewProc("SafeArraySetRecordInfo")
+)
+
+// safeArrayAccessData returns raw array pointer.
+//
+// AKA: SafeArrayAccessData in Windows API.
+// Todo: Test
+func safeArrayAccessData(safearray *SafeArray) (element uintptr, err error) {
+ err = convertHresultToError(
+ procSafeArrayAccessData.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&element))))
+ return
+}
+
+// safeArrayUnaccessData releases raw array.
+//
+// AKA: SafeArrayUnaccessData in Windows API.
+func safeArrayUnaccessData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayUnaccessData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayAllocData allocates SafeArray.
+//
+// AKA: SafeArrayAllocData in Windows API.
+func safeArrayAllocData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayAllocData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayAllocDescriptor allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptor in Windows API.
+func safeArrayAllocDescriptor(dimensions uint32) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayAllocDescriptor.Call(uintptr(dimensions), uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayAllocDescriptorEx allocates SafeArray.
+//
+// AKA: SafeArrayAllocDescriptorEx in Windows API.
+func safeArrayAllocDescriptorEx(variantType VT, dimensions uint32) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayAllocDescriptorEx.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayCopy returns copy of SafeArray.
+//
+// AKA: SafeArrayCopy in Windows API.
+func safeArrayCopy(original *SafeArray) (safearray *SafeArray, err error) {
+ err = convertHresultToError(
+ procSafeArrayCopy.Call(
+ uintptr(unsafe.Pointer(original)),
+ uintptr(unsafe.Pointer(&safearray))))
+ return
+}
+
+// safeArrayCopyData duplicates SafeArray into another SafeArray object.
+//
+// AKA: SafeArrayCopyData in Windows API.
+func safeArrayCopyData(original *SafeArray, duplicate *SafeArray) (err error) {
+ err = convertHresultToError(
+ procSafeArrayCopyData.Call(
+ uintptr(unsafe.Pointer(original)),
+ uintptr(unsafe.Pointer(duplicate))))
+ return
+}
+
+// safeArrayCreate creates SafeArray.
+//
+// AKA: SafeArrayCreate in Windows API.
+func safeArrayCreate(variantType VT, dimensions uint32, bounds *SafeArrayBound) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreate.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(bounds)))
+ safearray = (*SafeArray)(unsafe.Pointer(&sa))
+ return
+}
+
+// safeArrayCreateEx creates SafeArray.
+//
+// AKA: SafeArrayCreateEx in Windows API.
+func safeArrayCreateEx(variantType VT, dimensions uint32, bounds *SafeArrayBound, extra uintptr) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateEx.Call(
+ uintptr(variantType),
+ uintptr(dimensions),
+ uintptr(unsafe.Pointer(bounds)),
+ extra)
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayCreateVector creates SafeArray.
+//
+// AKA: SafeArrayCreateVector in Windows API.
+func safeArrayCreateVector(variantType VT, lowerBound int32, length uint32) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateVector.Call(
+ uintptr(variantType),
+ uintptr(lowerBound),
+ uintptr(length))
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayCreateVectorEx creates SafeArray.
+//
+// AKA: SafeArrayCreateVectorEx in Windows API.
+func safeArrayCreateVectorEx(variantType VT, lowerBound int32, length uint32, extra uintptr) (safearray *SafeArray, err error) {
+ sa, _, err := procSafeArrayCreateVectorEx.Call(
+ uintptr(variantType),
+ uintptr(lowerBound),
+ uintptr(length),
+ extra)
+ safearray = (*SafeArray)(unsafe.Pointer(sa))
+ return
+}
+
+// safeArrayDestroy destroys SafeArray object.
+//
+// AKA: SafeArrayDestroy in Windows API.
+func safeArrayDestroy(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroy.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayDestroyData destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyData in Windows API.
+func safeArrayDestroyData(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroyData.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayDestroyDescriptor destroys SafeArray object.
+//
+// AKA: SafeArrayDestroyDescriptor in Windows API.
+func safeArrayDestroyDescriptor(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayDestroyDescriptor.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayGetDim is the amount of dimensions in the SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetDim in Windows API.
+func safeArrayGetDim(safearray *SafeArray) (dimensions *uint32, err error) {
+ l, _, err := procSafeArrayGetDim.Call(uintptr(unsafe.Pointer(safearray)))
+ dimensions = (*uint32)(unsafe.Pointer(l))
+ return
+}
+
+// safeArrayGetElementSize is the element size in bytes.
+//
+// AKA: SafeArrayGetElemsize in Windows API.
+func safeArrayGetElementSize(safearray *SafeArray) (length *uint32, err error) {
+ l, _, err := procSafeArrayGetElemsize.Call(uintptr(unsafe.Pointer(safearray)))
+ length = (*uint32)(unsafe.Pointer(l))
+ return
+}
+
+// safeArrayGetElement retrieves element at given index.
+func safeArrayGetElement(safearray *SafeArray, index int32, pv unsafe.Pointer) error {
+ return convertHresultToError(
+ procSafeArrayGetElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(pv)))
+}
+
+// safeArrayGetElementString retrieves element at given index and converts to string.
+func safeArrayGetElementString(safearray *SafeArray, index int32) (str string, err error) {
+ var element *int16
+ err = convertHresultToError(
+ procSafeArrayGetElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(unsafe.Pointer(&element))))
+ str = BstrToString(*(**uint16)(unsafe.Pointer(&element)))
+ SysFreeString(element)
+ return
+}
+
+// safeArrayGetIID is the InterfaceID of the elements in the SafeArray.
+//
+// AKA: SafeArrayGetIID in Windows API.
+func safeArrayGetIID(safearray *SafeArray) (guid *GUID, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetIID.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&guid))))
+ return
+}
+
+// safeArrayGetLBound returns lower bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetLBound in Windows API.
+func safeArrayGetLBound(safearray *SafeArray, dimension uint32) (lowerBound int32, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetLBound.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(dimension),
+ uintptr(unsafe.Pointer(&lowerBound))))
+ return
+}
+
+// safeArrayGetUBound returns upper bounds of SafeArray.
+//
+// SafeArrays may have multiple dimensions. Meaning, it could be
+// multidimensional array.
+//
+// AKA: SafeArrayGetUBound in Windows API.
+func safeArrayGetUBound(safearray *SafeArray, dimension uint32) (upperBound int32, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetUBound.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(dimension),
+ uintptr(unsafe.Pointer(&upperBound))))
+ return
+}
+
+// safeArrayGetVartype returns data type of SafeArray.
+//
+// AKA: SafeArrayGetVartype in Windows API.
+func safeArrayGetVartype(safearray *SafeArray) (varType uint16, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetVartype.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&varType))))
+ return
+}
+
+// safeArrayLock locks SafeArray for reading to modify SafeArray.
+//
+// This must be called during some calls to ensure that another process does not
+// read or write to the SafeArray during editing.
+//
+// AKA: SafeArrayLock in Windows API.
+func safeArrayLock(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayLock.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayUnlock unlocks SafeArray for reading.
+//
+// AKA: SafeArrayUnlock in Windows API.
+func safeArrayUnlock(safearray *SafeArray) (err error) {
+ err = convertHresultToError(procSafeArrayUnlock.Call(uintptr(unsafe.Pointer(safearray))))
+ return
+}
+
+// safeArrayPutElement stores the data element at the specified location in the
+// array.
+//
+// AKA: SafeArrayPutElement in Windows API.
+func safeArrayPutElement(safearray *SafeArray, index int64, element uintptr) (err error) {
+ err = convertHresultToError(
+ procSafeArrayPutElement.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&index)),
+ uintptr(unsafe.Pointer(element))))
+ return
+}
+
+// safeArrayGetRecordInfo accesses IRecordInfo info for custom types.
+//
+// AKA: SafeArrayGetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArrayGetRecordInfo(safearray *SafeArray) (recordInfo interface{}, err error) {
+ err = convertHresultToError(
+ procSafeArrayGetRecordInfo.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&recordInfo))))
+ return
+}
+
+// safeArraySetRecordInfo mutates IRecordInfo info for custom types.
+//
+// AKA: SafeArraySetRecordInfo in Windows API.
+//
+// XXX: Must implement IRecordInfo interface for this to return.
+func safeArraySetRecordInfo(safearray *SafeArray, recordInfo interface{}) (err error) {
+ err = convertHresultToError(
+ procSafeArraySetRecordInfo.Call(
+ uintptr(unsafe.Pointer(safearray)),
+ uintptr(unsafe.Pointer(&recordInfo))))
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearrayconversion.go b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
new file mode 100644
index 00000000..da737293
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearrayconversion.go
@@ -0,0 +1,140 @@
+// Helper for converting SafeArray to array of objects.
+
+package ole
+
+import (
+ "unsafe"
+)
+
+type SafeArrayConversion struct {
+ Array *SafeArray
+}
+
+func (sac *SafeArrayConversion) ToStringArray() (strings []string) {
+ totalElements, _ := sac.TotalElements(0)
+ strings = make([]string, totalElements)
+
+ for i := int32(0); i < totalElements; i++ {
+ strings[int32(i)], _ = safeArrayGetElementString(sac.Array, i)
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) ToByteArray() (bytes []byte) {
+ totalElements, _ := sac.TotalElements(0)
+ bytes = make([]byte, totalElements)
+
+ for i := int32(0); i < totalElements; i++ {
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&bytes[int32(i)]))
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) ToValueArray() (values []interface{}) {
+ totalElements, _ := sac.TotalElements(0)
+ values = make([]interface{}, totalElements)
+ vt, _ := safeArrayGetVartype(sac.Array)
+
+ for i := int32(0); i < totalElements; i++ {
+ switch VT(vt) {
+ case VT_BOOL:
+ var v bool
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I1:
+ var v int8
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I2:
+ var v int16
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I4:
+ var v int32
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_I8:
+ var v int64
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI1:
+ var v uint8
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI2:
+ var v uint16
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI4:
+ var v uint32
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_UI8:
+ var v uint64
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_R4:
+ var v float32
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_R8:
+ var v float64
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v
+ case VT_BSTR:
+ v , _ := safeArrayGetElementString(sac.Array, i)
+ values[i] = v
+ case VT_VARIANT:
+ var v VARIANT
+ safeArrayGetElement(sac.Array, i, unsafe.Pointer(&v))
+ values[i] = v.Value()
+ v.Clear()
+ default:
+ // TODO
+ }
+ }
+
+ return
+}
+
+func (sac *SafeArrayConversion) GetType() (varType uint16, err error) {
+ return safeArrayGetVartype(sac.Array)
+}
+
+func (sac *SafeArrayConversion) GetDimensions() (dimensions *uint32, err error) {
+ return safeArrayGetDim(sac.Array)
+}
+
+func (sac *SafeArrayConversion) GetSize() (length *uint32, err error) {
+ return safeArrayGetElementSize(sac.Array)
+}
+
+func (sac *SafeArrayConversion) TotalElements(index uint32) (totalElements int32, err error) {
+ if index < 1 {
+ index = 1
+ }
+
+ // Get array bounds
+ var LowerBounds int32
+ var UpperBounds int32
+
+ LowerBounds, err = safeArrayGetLBound(sac.Array, index)
+ if err != nil {
+ return
+ }
+
+ UpperBounds, err = safeArrayGetUBound(sac.Array, index)
+ if err != nil {
+ return
+ }
+
+ totalElements = UpperBounds - LowerBounds + 1
+ return
+}
+
+// Release Safe Array memory
+func (sac *SafeArrayConversion) Release() {
+ safeArrayDestroy(sac.Array)
+}
diff --git a/vendor/github.com/go-ole/go-ole/safearrayslices.go b/vendor/github.com/go-ole/go-ole/safearrayslices.go
new file mode 100644
index 00000000..a9fa885f
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/safearrayslices.go
@@ -0,0 +1,33 @@
+// +build windows
+
+package ole
+
+import (
+ "unsafe"
+)
+
+func safeArrayFromByteSlice(slice []byte) *SafeArray {
+ array, _ := safeArrayCreateVector(VT_UI1, 0, uint32(len(slice)))
+
+ if array == nil {
+ panic("Could not convert []byte to SAFEARRAY")
+ }
+
+ for i, v := range slice {
+ safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(&v)))
+ }
+ return array
+}
+
+func safeArrayFromStringSlice(slice []string) *SafeArray {
+ array, _ := safeArrayCreateVector(VT_BSTR, 0, uint32(len(slice)))
+
+ if array == nil {
+ panic("Could not convert []string to SAFEARRAY")
+ }
+ // SysAllocStringLen(s)
+ for i, v := range slice {
+ safeArrayPutElement(array, int64(i), uintptr(unsafe.Pointer(SysAllocStringLen(v))))
+ }
+ return array
+}
diff --git a/vendor/github.com/go-ole/go-ole/utility.go b/vendor/github.com/go-ole/go-ole/utility.go
new file mode 100644
index 00000000..99ee82dc
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/utility.go
@@ -0,0 +1,101 @@
+package ole
+
+import (
+ "unicode/utf16"
+ "unsafe"
+)
+
+// ClassIDFrom retrieves class ID whether given is program ID or application string.
+//
+// Helper that provides check against both Class ID from Program ID and Class ID from string. It is
+// faster, if you know which you are using, to use the individual functions, but this will check
+// against available functions for you.
+func ClassIDFrom(programID string) (classID *GUID, err error) {
+ classID, err = CLSIDFromProgID(programID)
+ if err != nil {
+ classID, err = CLSIDFromString(programID)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// BytePtrToString converts byte pointer to a Go string.
+func BytePtrToString(p *byte) string {
+ a := (*[10000]uint8)(unsafe.Pointer(p))
+ i := 0
+ for a[i] != 0 {
+ i++
+ }
+ return string(a[:i])
+}
+
+// UTF16PtrToString is alias for LpOleStrToString.
+//
+// Kept for compatibility reasons.
+func UTF16PtrToString(p *uint16) string {
+ return LpOleStrToString(p)
+}
+
+// LpOleStrToString converts COM Unicode to Go string.
+func LpOleStrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+
+ length := lpOleStrLen(p)
+ a := make([]uint16, length)
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; i < int(length); i++ {
+ a[i] = *(*uint16)(ptr)
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+
+ return string(utf16.Decode(a))
+}
+
+// BstrToString converts COM binary string to Go string.
+func BstrToString(p *uint16) string {
+ if p == nil {
+ return ""
+ }
+ length := SysStringLen((*int16)(unsafe.Pointer(p)))
+ a := make([]uint16, length)
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; i < int(length); i++ {
+ a[i] = *(*uint16)(ptr)
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+ return string(utf16.Decode(a))
+}
+
+// lpOleStrLen returns the length of Unicode string.
+func lpOleStrLen(p *uint16) (length int64) {
+ if p == nil {
+ return 0
+ }
+
+ ptr := unsafe.Pointer(p)
+
+ for i := 0; ; i++ {
+ if 0 == *(*uint16)(ptr) {
+ length = int64(i)
+ break
+ }
+ ptr = unsafe.Pointer(uintptr(ptr) + 2)
+ }
+ return
+}
+
+// convertHresultToError converts syscall to error, if call is unsuccessful.
+func convertHresultToError(hr uintptr, r2 uintptr, ignore error) (err error) {
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
diff --git a/vendor/github.com/go-ole/go-ole/variables.go b/vendor/github.com/go-ole/go-ole/variables.go
new file mode 100644
index 00000000..a6add1b0
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variables.go
@@ -0,0 +1,15 @@
+// +build windows
+
+package ole
+
+import (
+ "golang.org/x/sys/windows"
+)
+
+var (
+ modcombase = windows.NewLazySystemDLL("combase.dll")
+ modkernel32 = windows.NewLazySystemDLL("kernel32.dll")
+ modole32 = windows.NewLazySystemDLL("ole32.dll")
+ modoleaut32 = windows.NewLazySystemDLL("oleaut32.dll")
+ moduser32 = windows.NewLazySystemDLL("user32.dll")
+)
diff --git a/vendor/github.com/go-ole/go-ole/variant.go b/vendor/github.com/go-ole/go-ole/variant.go
new file mode 100644
index 00000000..967a23fe
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant.go
@@ -0,0 +1,105 @@
+package ole
+
+import "unsafe"
+
+// NewVariant returns new variant based on type and value.
+func NewVariant(vt VT, val int64) VARIANT {
+ return VARIANT{VT: vt, Val: val}
+}
+
+// ToIUnknown converts Variant to Unknown object.
+func (v *VARIANT) ToIUnknown() *IUnknown {
+ if v.VT != VT_UNKNOWN {
+ return nil
+ }
+ return (*IUnknown)(unsafe.Pointer(uintptr(v.Val)))
+}
+
+// ToIDispatch converts variant to dispatch object.
+func (v *VARIANT) ToIDispatch() *IDispatch {
+ if v.VT != VT_DISPATCH {
+ return nil
+ }
+ return (*IDispatch)(unsafe.Pointer(uintptr(v.Val)))
+}
+
+// ToArray converts variant to SafeArray helper.
+func (v *VARIANT) ToArray() *SafeArrayConversion {
+ if v.VT != VT_SAFEARRAY {
+ if v.VT&VT_ARRAY == 0 {
+ return nil
+ }
+ }
+ var safeArray *SafeArray = (*SafeArray)(unsafe.Pointer(uintptr(v.Val)))
+ return &SafeArrayConversion{safeArray}
+}
+
+// ToString converts variant to Go string.
+func (v *VARIANT) ToString() string {
+ if v.VT != VT_BSTR {
+ return ""
+ }
+ return BstrToString(*(**uint16)(unsafe.Pointer(&v.Val)))
+}
+
+// Clear the memory of variant object.
+func (v *VARIANT) Clear() error {
+ return VariantClear(v)
+}
+
+// Value returns variant value based on its type.
+//
+// Currently supported types: 2- and 4-byte integers, strings, bools.
+// Note that 64-bit integers, datetimes, and other types are stored as strings
+// and will be returned as strings.
+//
+// Needs to be further converted, because this returns an interface{}.
+func (v *VARIANT) Value() interface{} {
+ switch v.VT {
+ case VT_I1:
+ return int8(v.Val)
+ case VT_UI1:
+ return uint8(v.Val)
+ case VT_I2:
+ return int16(v.Val)
+ case VT_UI2:
+ return uint16(v.Val)
+ case VT_I4:
+ return int32(v.Val)
+ case VT_UI4:
+ return uint32(v.Val)
+ case VT_I8:
+ return int64(v.Val)
+ case VT_UI8:
+ return uint64(v.Val)
+ case VT_INT:
+ return int(v.Val)
+ case VT_UINT:
+ return uint(v.Val)
+ case VT_INT_PTR:
+ return uintptr(v.Val) // TODO
+ case VT_UINT_PTR:
+ return uintptr(v.Val)
+ case VT_R4:
+ return *(*float32)(unsafe.Pointer(&v.Val))
+ case VT_R8:
+ return *(*float64)(unsafe.Pointer(&v.Val))
+ case VT_BSTR:
+ return v.ToString()
+ case VT_DATE:
+ // VT_DATE type will either return float64 or time.Time.
+ d := uint64(v.Val)
+ date, err := GetVariantDate(d)
+ if err != nil {
+ return float64(v.Val)
+ }
+ return date
+ case VT_UNKNOWN:
+ return v.ToIUnknown()
+ case VT_DISPATCH:
+ return v.ToIDispatch()
+ case VT_BOOL:
+ return v.Val != 0
+ }
+ return nil
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_386.go b/vendor/github.com/go-ole/go-ole/variant_386.go
new file mode 100644
index 00000000..e73736bf
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_386.go
@@ -0,0 +1,11 @@
+// +build 386
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_amd64.go b/vendor/github.com/go-ole/go-ole/variant_amd64.go
new file mode 100644
index 00000000..dccdde13
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_amd64.go
@@ -0,0 +1,12 @@
+// +build amd64
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_arm.go b/vendor/github.com/go-ole/go-ole/variant_arm.go
new file mode 100644
index 00000000..d4724544
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_arm.go
@@ -0,0 +1,11 @@
+// +build arm
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_arm64.go b/vendor/github.com/go-ole/go-ole/variant_arm64.go
new file mode 100644
index 00000000..78473cec
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_arm64.go
@@ -0,0 +1,13 @@
+//go:build arm64
+// +build arm64
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_386.go b/vendor/github.com/go-ole/go-ole/variant_date_386.go
new file mode 100644
index 00000000..1b970f63
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_386.go
@@ -0,0 +1,22 @@
+// +build windows,386
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ v1 := uint32(value)
+ v2 := uint32(value >> 32)
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_amd64.go b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go
new file mode 100644
index 00000000..6952f1f0
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_amd64.go
@@ -0,0 +1,20 @@
+// +build windows,amd64
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(value), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm.go b/vendor/github.com/go-ole/go-ole/variant_date_arm.go
new file mode 100644
index 00000000..09ec7b5c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_arm.go
@@ -0,0 +1,22 @@
+// +build windows,arm
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ v1 := uint32(value)
+ v2 := uint32(value >> 32)
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_date_arm64.go b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go
new file mode 100644
index 00000000..02b04a0d
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_date_arm64.go
@@ -0,0 +1,23 @@
+//go:build windows && arm64
+// +build windows,arm64
+
+package ole
+
+import (
+ "errors"
+ "syscall"
+ "time"
+ "unsafe"
+)
+
+// GetVariantDate converts COM Variant Time value to Go time.Time.
+func GetVariantDate(value uint64) (time.Time, error) {
+ var st syscall.Systemtime
+ v1 := uint32(value)
+ v2 := uint32(value >> 32)
+ r, _, _ := procVariantTimeToSystemTime.Call(uintptr(v1), uintptr(v2), uintptr(unsafe.Pointer(&st)))
+ if r != 0 {
+ return time.Date(int(st.Year), time.Month(st.Month), int(st.Day), int(st.Hour), int(st.Minute), int(st.Second), int(st.Milliseconds/1000), time.UTC), nil
+ }
+ return time.Now(), errors.New("Could not convert to time, passing current time.")
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_ppc64le.go b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go
new file mode 100644
index 00000000..326427a7
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_ppc64le.go
@@ -0,0 +1,12 @@
+// +build ppc64le
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/variant_s390x.go b/vendor/github.com/go-ole/go-ole/variant_s390x.go
new file mode 100644
index 00000000..9874ca66
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/variant_s390x.go
@@ -0,0 +1,12 @@
+// +build s390x
+
+package ole
+
+type VARIANT struct {
+ VT VT // 2
+ wReserved1 uint16 // 4
+ wReserved2 uint16 // 6
+ wReserved3 uint16 // 8
+ Val int64 // 16
+ _ [8]byte // 24
+}
diff --git a/vendor/github.com/go-ole/go-ole/vt_string.go b/vendor/github.com/go-ole/go-ole/vt_string.go
new file mode 100644
index 00000000..729b4a04
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/vt_string.go
@@ -0,0 +1,58 @@
+// generated by stringer -output vt_string.go -type VT; DO NOT EDIT
+
+package ole
+
+import "fmt"
+
+const (
+ _VT_name_0 = "VT_EMPTYVT_NULLVT_I2VT_I4VT_R4VT_R8VT_CYVT_DATEVT_BSTRVT_DISPATCHVT_ERRORVT_BOOLVT_VARIANTVT_UNKNOWNVT_DECIMAL"
+ _VT_name_1 = "VT_I1VT_UI1VT_UI2VT_UI4VT_I8VT_UI8VT_INTVT_UINTVT_VOIDVT_HRESULTVT_PTRVT_SAFEARRAYVT_CARRAYVT_USERDEFINEDVT_LPSTRVT_LPWSTR"
+ _VT_name_2 = "VT_RECORDVT_INT_PTRVT_UINT_PTR"
+ _VT_name_3 = "VT_FILETIMEVT_BLOBVT_STREAMVT_STORAGEVT_STREAMED_OBJECTVT_STORED_OBJECTVT_BLOB_OBJECTVT_CFVT_CLSID"
+ _VT_name_4 = "VT_BSTR_BLOBVT_VECTOR"
+ _VT_name_5 = "VT_ARRAY"
+ _VT_name_6 = "VT_BYREF"
+ _VT_name_7 = "VT_RESERVED"
+ _VT_name_8 = "VT_ILLEGAL"
+)
+
+var (
+ _VT_index_0 = [...]uint8{0, 8, 15, 20, 25, 30, 35, 40, 47, 54, 65, 73, 80, 90, 100, 110}
+ _VT_index_1 = [...]uint8{0, 5, 11, 17, 23, 28, 34, 40, 47, 54, 64, 70, 82, 91, 105, 113, 122}
+ _VT_index_2 = [...]uint8{0, 9, 19, 30}
+ _VT_index_3 = [...]uint8{0, 11, 18, 27, 37, 55, 71, 85, 90, 98}
+ _VT_index_4 = [...]uint8{0, 12, 21}
+ _VT_index_5 = [...]uint8{0, 8}
+ _VT_index_6 = [...]uint8{0, 8}
+ _VT_index_7 = [...]uint8{0, 11}
+ _VT_index_8 = [...]uint8{0, 10}
+)
+
+func (i VT) String() string {
+ switch {
+ case 0 <= i && i <= 14:
+ return _VT_name_0[_VT_index_0[i]:_VT_index_0[i+1]]
+ case 16 <= i && i <= 31:
+ i -= 16
+ return _VT_name_1[_VT_index_1[i]:_VT_index_1[i+1]]
+ case 36 <= i && i <= 38:
+ i -= 36
+ return _VT_name_2[_VT_index_2[i]:_VT_index_2[i+1]]
+ case 64 <= i && i <= 72:
+ i -= 64
+ return _VT_name_3[_VT_index_3[i]:_VT_index_3[i+1]]
+ case 4095 <= i && i <= 4096:
+ i -= 4095
+ return _VT_name_4[_VT_index_4[i]:_VT_index_4[i+1]]
+ case i == 8192:
+ return _VT_name_5
+ case i == 16384:
+ return _VT_name_6
+ case i == 32768:
+ return _VT_name_7
+ case i == 65535:
+ return _VT_name_8
+ default:
+ return fmt.Sprintf("VT(%d)", i)
+ }
+}
diff --git a/vendor/github.com/go-ole/go-ole/winrt.go b/vendor/github.com/go-ole/go-ole/winrt.go
new file mode 100644
index 00000000..4e9eca73
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/winrt.go
@@ -0,0 +1,99 @@
+// +build windows
+
+package ole
+
+import (
+ "reflect"
+ "syscall"
+ "unicode/utf8"
+ "unsafe"
+)
+
+var (
+ procRoInitialize = modcombase.NewProc("RoInitialize")
+ procRoActivateInstance = modcombase.NewProc("RoActivateInstance")
+ procRoGetActivationFactory = modcombase.NewProc("RoGetActivationFactory")
+ procWindowsCreateString = modcombase.NewProc("WindowsCreateString")
+ procWindowsDeleteString = modcombase.NewProc("WindowsDeleteString")
+ procWindowsGetStringRawBuffer = modcombase.NewProc("WindowsGetStringRawBuffer")
+)
+
+func RoInitialize(thread_type uint32) (err error) {
+ hr, _, _ := procRoInitialize.Call(uintptr(thread_type))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func RoActivateInstance(clsid string) (ins *IInspectable, err error) {
+ hClsid, err := NewHString(clsid)
+ if err != nil {
+ return nil, err
+ }
+ defer DeleteHString(hClsid)
+
+ hr, _, _ := procRoActivateInstance.Call(
+ uintptr(unsafe.Pointer(hClsid)),
+ uintptr(unsafe.Pointer(&ins)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) {
+ hClsid, err := NewHString(clsid)
+ if err != nil {
+ return nil, err
+ }
+ defer DeleteHString(hClsid)
+
+ hr, _, _ := procRoGetActivationFactory.Call(
+ uintptr(unsafe.Pointer(hClsid)),
+ uintptr(unsafe.Pointer(iid)),
+ uintptr(unsafe.Pointer(&ins)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// HString is handle string for pointers.
+type HString uintptr
+
+// NewHString returns a new HString for Go string.
+func NewHString(s string) (hstring HString, err error) {
+ u16 := syscall.StringToUTF16Ptr(s)
+ len := uint32(utf8.RuneCountInString(s))
+ hr, _, _ := procWindowsCreateString.Call(
+ uintptr(unsafe.Pointer(u16)),
+ uintptr(len),
+ uintptr(unsafe.Pointer(&hstring)))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// DeleteHString deletes HString.
+func DeleteHString(hstring HString) (err error) {
+ hr, _, _ := procWindowsDeleteString.Call(uintptr(hstring))
+ if hr != 0 {
+ err = NewError(hr)
+ }
+ return
+}
+
+// String returns Go string value of HString.
+func (h HString) String() string {
+ var u16buf uintptr
+ var u16len uint32
+ u16buf, _, _ = procWindowsGetStringRawBuffer.Call(
+ uintptr(h),
+ uintptr(unsafe.Pointer(&u16len)))
+
+ u16hdr := reflect.SliceHeader{Data: u16buf, Len: int(u16len), Cap: int(u16len)}
+ u16 := *(*[]uint16)(unsafe.Pointer(&u16hdr))
+ return syscall.UTF16ToString(u16)
+}
diff --git a/vendor/github.com/go-ole/go-ole/winrt_doc.go b/vendor/github.com/go-ole/go-ole/winrt_doc.go
new file mode 100644
index 00000000..52e6d74c
--- /dev/null
+++ b/vendor/github.com/go-ole/go-ole/winrt_doc.go
@@ -0,0 +1,36 @@
+// +build !windows
+
+package ole
+
+// RoInitialize
+func RoInitialize(thread_type uint32) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+// RoActivateInstance
+func RoActivateInstance(clsid string) (ins *IInspectable, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// RoGetActivationFactory
+func RoGetActivationFactory(clsid string, iid *GUID) (ins *IInspectable, err error) {
+ return nil, NewError(E_NOTIMPL)
+}
+
+// HString is handle string for pointers.
+type HString uintptr
+
+// NewHString returns a new HString for Go string.
+func NewHString(s string) (hstring HString, err error) {
+ return HString(uintptr(0)), NewError(E_NOTIMPL)
+}
+
+// DeleteHString deletes HString.
+func DeleteHString(hstring HString) (err error) {
+ return NewError(E_NOTIMPL)
+}
+
+// String returns Go string value of HString.
+func (h HString) String() string {
+ return ""
+}
diff --git a/vendor/github.com/jaypipes/ghw/.gitignore b/vendor/github.com/jaypipes/ghw/.gitignore
new file mode 100644
index 00000000..34d0d840
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/.gitignore
@@ -0,0 +1,3 @@
+vendor/
+coverage*.*
+*~
diff --git a/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md
new file mode 100644
index 00000000..a4b37714
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/CODE_OF_CONDUCT.md
@@ -0,0 +1,134 @@
+
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+We as members, contributors, and leaders pledge to make participation in our
+community a harassment-free experience for everyone, regardless of age, body
+size, visible or invisible disability, ethnicity, sex characteristics, gender
+identity and expression, level of experience, education, socio-economic status,
+nationality, personal appearance, race, caste, color, religion, or sexual identity
+and orientation.
+
+We pledge to act and interact in ways that contribute to an open, welcoming,
+diverse, inclusive, and healthy community.
+
+## Our Standards
+
+Examples of behavior that contributes to a positive environment for our
+community include:
+
+* Demonstrating empathy and kindness toward other people
+* Being respectful of differing opinions, viewpoints, and experiences
+* Giving and gracefully accepting constructive feedback
+* Accepting responsibility and apologizing to those affected by our mistakes,
+ and learning from the experience
+* Focusing on what is best not just for us as individuals, but for the
+ overall community
+
+Examples of unacceptable behavior include:
+
+* The use of sexualized language or imagery, and sexual attention or
+ advances of any kind
+* Trolling, insulting or derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or email
+ address, without their explicit permission
+* Other conduct which could reasonably be considered inappropriate in a
+ professional setting
+
+## Enforcement Responsibilities
+
+Community leaders are responsible for clarifying and enforcing our standards of
+acceptable behavior and will take appropriate and fair corrective action in
+response to any behavior that they deem inappropriate, threatening, offensive,
+or harmful.
+
+Community leaders have the right and responsibility to remove, edit, or reject
+comments, commits, code, wiki edits, issues, and other contributions that are
+not aligned to this Code of Conduct, and will communicate reasons for moderation
+decisions when appropriate.
+
+## Scope
+
+This Code of Conduct applies within all community spaces, and also applies when
+an individual is officially representing the community in public spaces.
+Examples of representing our community include using an official e-mail address,
+posting via an official social media account, or acting as an appointed
+representative at an online or offline event.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be
+reported to the community leaders responsible for enforcement at
+[INSERT CONTACT METHOD].
+All complaints will be reviewed and investigated promptly and fairly.
+
+All community leaders are obligated to respect the privacy and security of the
+reporter of any incident.
+
+## Enforcement Guidelines
+
+Community leaders will follow these Community Impact Guidelines in determining
+the consequences for any action they deem in violation of this Code of Conduct:
+
+### 1. Correction
+
+**Community Impact**: Use of inappropriate language or other behavior deemed
+unprofessional or unwelcome in the community.
+
+**Consequence**: A private, written warning from community leaders, providing
+clarity around the nature of the violation and an explanation of why the
+behavior was inappropriate. A public apology may be requested.
+
+### 2. Warning
+
+**Community Impact**: A violation through a single incident or series
+of actions.
+
+**Consequence**: A warning with consequences for continued behavior. No
+interaction with the people involved, including unsolicited interaction with
+those enforcing the Code of Conduct, for a specified period of time. This
+includes avoiding interactions in community spaces as well as external channels
+like social media. Violating these terms may lead to a temporary or
+permanent ban.
+
+### 3. Temporary Ban
+
+**Community Impact**: A serious violation of community standards, including
+sustained inappropriate behavior.
+
+**Consequence**: A temporary ban from any sort of interaction or public
+communication with the community for a specified period of time. No public or
+private interaction with the people involved, including unsolicited interaction
+with those enforcing the Code of Conduct, is allowed during this period.
+Violating these terms may lead to a permanent ban.
+
+### 4. Permanent Ban
+
+**Community Impact**: Demonstrating a pattern of violation of community
+standards, including sustained inappropriate behavior, harassment of an
+individual, or aggression toward or disparagement of classes of individuals.
+
+**Consequence**: A permanent ban from any sort of public interaction within
+the community.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage],
+version 2.1, available at
+[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1].
+
+Community Impact Guidelines were inspired by
+[Mozilla's code of conduct enforcement ladder][Mozilla CoC].
+
+For answers to common questions about this code of conduct, see the FAQ at
+[https://www.contributor-covenant.org/faq][FAQ]. Translations are available
+at [https://www.contributor-covenant.org/translations][translations].
+
+[homepage]: https://www.contributor-covenant.org
+[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html
+[Mozilla CoC]: https://github.com/mozilla/diversity
+[FAQ]: https://www.contributor-covenant.org/faq
+[translations]: https://www.contributor-covenant.org/translations
+
diff --git a/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md
new file mode 100644
index 00000000..b790517b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/CONTRIBUTING.md
@@ -0,0 +1,54 @@
+# How to Contribute
+
+We welcome any and all contributions to `ghw`! Filing [bug reports][gh-issues],
+asking questions and submitting patches are all encouraged.
+
+[gh-issues]: https://github.com/jaypipes/ghw/issues
+
+## Submitting patches via pull requests
+
+We use GitHub pull requests to review code submissions.
+
+Consult [GitHub Help][pr-help] for more information on using pull requests.
+
+[pr-help]: https://help.github.com/articles/about-pull-requests/
+
+We ask that contributors submitting a pull request sign their commits and
+attest to the Developer Certificate of Origin (DCO).
+
+## Developer Certificate of Origin
+
+The DCO is a lightweight way for contributors to certify that they wrote or
+otherwise have the right to submit the code they are contributing to the
+project. Here is the [full text of the DCO][dco], reformatted for readability:
+
+> By making a contribution to this project, I certify that:
+>
+> a. The contribution was created in whole or in part by me and I have the
+> right to submit it under the open source license indicated in the file; or
+>
+> b. The contribution is based upon previous work that, to the best of my
+> knowledge, is covered under an appropriate open source license and I have the
+> right under that license to submit that work with modifications, whether
+> created in whole or in part by me, under the same open source license (unless
+> I am permitted to submit under a different license), as indicated in the
+> file; or
+>
+> c. The contribution was provided directly to me by some other person who
+> certified (a), (b) or (c) and I have not modified it.
+>
+> d. I understand and agree that this project and the contribution are public
+> and that a record of the contribution (including all personal information I
+> submit with it, including my sign-off) is maintained indefinitely and may be
+> redistributed consistent with this project or the open source license(s)
+> involved.
+
+[dco]: https://developercertificate.org/
+
+You can sign your commits using `git commit -s` before pushing commits to
+Github and creating a pull request.
+
+## Community Guidelines
+
+1. Be kind.
+2. Seriously, that's it.
diff --git a/vendor/github.com/jaypipes/ghw/COPYING b/vendor/github.com/jaypipes/ghw/COPYING
new file mode 100644
index 00000000..68c771a0
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/COPYING
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/vendor/github.com/jaypipes/ghw/Dockerfile b/vendor/github.com/jaypipes/ghw/Dockerfile
new file mode 100644
index 00000000..cbd587d6
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/Dockerfile
@@ -0,0 +1,26 @@
+FROM golang:1.15-buster as builder
+WORKDIR /go/src/github.com/jaypipes/ghw
+
+# Force the go compiler to use modules.
+ENV GO111MODULE=on
+ENV GOPROXY=direct
+
+# go.mod and go.sum go into their own layers.
+COPY go.mod .
+COPY go.sum .
+
+# This ensures `go mod download` happens only when go.mod and go.sum change.
+RUN go mod download
+
+COPY . .
+
+RUN CGO_ENABLED=0 go build -o ghwc ./cmd/ghwc/
+
+FROM alpine:3.7
+RUN apk add --no-cache ethtool
+
+WORKDIR /bin
+
+COPY --from=builder /go/src/github.com/jaypipes/ghw/ghwc /bin
+
+CMD ghwc
diff --git a/vendor/github.com/jaypipes/ghw/Makefile b/vendor/github.com/jaypipes/ghw/Makefile
new file mode 100644
index 00000000..c7e0db40
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/Makefile
@@ -0,0 +1,39 @@
+VENDOR := vendor
+PKGS := $(shell go list ./... | grep -v /$(VENDOR)/)
+SRC = $(shell find . -type f -name '*.go' -not -path "*/$(VENDOR)/*")
+BIN_DIR := $(GOPATH)/bin
+GOMETALINTER := $(BIN_DIR)/gometalinter
+
+.PHONY: test
+test: vet
+ go test $(PKGS)
+
+$(GOMETALINTER):
+ go get -u github.com/alecthomas/gometalinter
+ $(GOMETALINTER) --install &> /dev/null
+
+.PHONY: lint
+lint: $(GOMETALINTER)
+ $(GOMETALINTER) ./... --vendor
+
+.PHONY: fmt
+fmt:
+ @echo "Running gofmt on all sources..."
+ @gofmt -s -l -w $(SRC)
+
+.PHONY: fmtcheck
+fmtcheck:
+ @bash -c "diff -u <(echo -n) <(gofmt -d $(SRC))"
+
+.PHONY: vet
+vet:
+ go vet $(PKGS)
+
+.PHONY: cover
+cover:
+ $(shell [ -e coverage.out ] && rm coverage.out)
+ @echo "mode: count" > coverage-all.out
+ $(foreach pkg,$(PKGS),\
+ go test -coverprofile=coverage.out -covermode=count $(pkg);\
+ tail -n +2 coverage.out >> coverage-all.out;)
+ go tool cover -html=coverage-all.out -o=coverage-all.html
diff --git a/vendor/github.com/jaypipes/ghw/README.md b/vendor/github.com/jaypipes/ghw/README.md
new file mode 100644
index 00000000..498a0f78
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/README.md
@@ -0,0 +1,1399 @@
+# `ghw` - Golang HardWare discovery/inspection library
+
+[![Build Status](https://github.com/jaypipes/ghw/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/jaypipes/ghw/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/ghw)](https://goreportcard.com/report/github.com/jaypipes/ghw)
+[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)
+
+![ghw mascot](images/ghw-gopher.png)
+
+`ghw` is a small Golang library providing hardware inspection and discovery
+for Linux and Windows. There currently exists partial support for MacOSX.
+
+## Design Principles
+
+* No root privileges needed for discovery
+
+ `ghw` goes the extra mile to be useful without root priveleges. We query for
+ host hardware information as directly as possible without relying on shellouts
+ to programs like `dmidecode` that require root privileges to execute.
+
+ Elevated privileges are indeed required to query for some information, but
+ `ghw` will never error out if blocked from reading that information. Instead,
+ `ghw` will print a warning message about the information that could not be
+ retrieved. You may disable these warning messages with `GHW_DISABLE_WARNINGS`
+ environment variable.
+
+* Well-documented code and plenty of example code
+
+ The code itself should be well-documented with lots of usage
+ examples.
+
+* Interfaces should be consistent across modules
+
+ Each module in the library should be structured in a consistent fashion, and
+ the structs returned by various library functions should have consistent
+ attribute and method names.
+
+## Inspecting != Monitoring
+
+`ghw` is a tool for gathering information about your hardware's **capacity**
+and **capabilities**.
+
+It is important to point out that `ghw` does **NOT** report information that is
+temporary or variable. It is **NOT** a system monitor nor is it an appropriate
+tool for gathering data points for metrics that change over time. If you are
+looking for a system that tracks usage of CPU, memory, network I/O or disk I/O,
+there are plenty of great open source tools that do this! Check out the
+[Prometheus project](https://prometheus.io/) for a great example.
+
+## Usage
+
+You can use the functions in `ghw` to determine various hardware-related
+information about the host computer:
+
+* [Memory](#memory)
+* [CPU](#cpu)
+* [Block storage](#block-storage)
+* [Topology](#topology)
+* [Network](#network)
+* [PCI](#pci)
+* [GPU](#gpu)
+* [Chassis](#chassis)
+* [BIOS](#bios)
+* [Baseboard](#baseboard)
+* [Product](#product)
+* [YAML and JSON serialization](#serialization)
+
+### Overriding the root mountpoint `ghw` uses
+
+The default root mountpoint that `ghw` uses when looking for information about
+the host system is `/`. So, for example, when looking up CPU information on a
+Linux system, `ghw.CPU()` will use the path `/proc/cpuinfo`.
+
+If you are calling `ghw` from a system that has an alternate root mountpoint,
+you can either set the `GHW_CHROOT` environment variable to that alternate
+path, or call the module constructor function with the `ghw.WithChroot()`
+modifier.
+
+For example, if you are executing from within an application container that has
+bind-mounted the root host filesystem to the mount point `/host`, you would set
+`GHW_CHROOT` to `/host` so that `ghw` can find `/proc/cpuinfo` at
+`/host/proc/cpuinfo`.
+
+Alternately, you can use the `ghw.WithChroot()` function like so:
+
+```go
+cpu, err := ghw.CPU(ghw.WithChroot("/host"))
+```
+
+### Overriding the per-mountpoint `ghw` uses
+
+When running inside containers, it could be a bit cumbersome to just override
+the root mountpoint. Inside containers, when granting access to the host
+file systems, is more common to bind-mount them in non standard location,
+like `/sys` on `/host-sys` or `/proc` on `/host-proc`.
+Is rarer to mount them in a common subtree (e.g. `/sys` on `/host/sys` and
+ `/proc` on /host/proc...)
+
+To better cover this use case, `ghw` allows to *programmatically* override
+the initial component of filesystems subtrees, allowing to access `sysfs`
+(or `procfs` or...) mounted on non-standard locations.
+
+
+```go
+cpu, err := ghw.CPU(ghw.WithPathOverrides(ghw.PathOverrides{
+ "/proc": "/host-proc",
+ "/sys": "/host-sys",
+}))
+```
+
+Please note
+- this feature works in addition and is composable with the
+ `WithChroot`/`GHW_CHROOT` feature.
+- `ghw` doesn't support yet environs variable to override individual
+ mountpoints, because this could lead to significant environs variables
+ proliferation.
+
+### Consuming snapshots
+
+You can make `ghw` read from snapshots (created with `ghw-snapshot`) using
+environment variables or programmatically.
+Please check `SNAPSHOT.md` to learn more about how ghw creates and consumes
+snapshots.
+
+The environment variable `GHW_SNAPSHOT_PATH` let users specify a snapshot
+that `ghw` will automatically consume. All the needed chroot changes will be
+automatically performed. By default, the snapshot is unpacked on a temporary
+directory managed by `ghw`, and cleaned up when no longer needed, avoiding
+leftovers.
+
+The rest of the environment variables are relevant iff `GHW_SNAPSHOT_PATH` is given.
+`GHW_SNAPSHOT_ROOT` let users specify the directory
+on which the snapshot should be unpacked. This moves the ownership of that
+directory from `ghw` to users. For this reason, `ghw` will *not* clean up automatically
+the content unpacked in `GHW_SNAPSHOT_ROOT`.
+
+`GHW_SNAPSHOT_EXCLUSIVE` is relevant iff `GHW_SNAPSHOT_ROOT` is given.
+Set it to any value to toggle it on. This tells `ghw` that the directory is meant
+only to contain the given snapshot, thus `ghw` will *not* attempt to unpack it
+(and will go ahead silently!) unless the directory is empty.
+You can use both `GHW_SNAPSHOT_ROOT` and `GHW_SNAPSHOT_EXCLUSIVE` to make sure
+`ghw` unpacks the snapshot only once regardless of how many `ghw` packages
+(e.g. cpu, memory) access it.
+
+Set `GHW_SNAPSHOT_PRESERVE` to any value to enable it. If set, `ghw` will *not*
+clean up the unpacked snapshot once done, leaving it into the system.
+
+```go
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+}))
+
+
+myRoot := "/my/safe/directory"
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+ Root: &myRoot,
+}))
+
+myOtherRoot := "/my/other/safe/directory"
+cpu, err := ghw.CPU(ghw.WithSnapshot(ghw.SnapshotOptions{
+ Path: "/path/to/linux-amd64-d4771ed3300339bc75f856be09fc6430.tar.gz",
+ Root: &myOtherRoot,
+ Exclusive: true,
+}))
+```
+
+### Creating snapshots
+
+You can create ghw snapshots in two ways.
+You can just consume the `ghw-snapshot` tool, or you can create them programmatically
+from your golang code. We explore now the latter case.
+
+Snapshotting takes two phases:
+1. clone the relevant pseudofiles/pseudodirectories into a temporary tree
+ This tree is usually deleted once the packing is successful.
+2. pack the cloned tree into a tar.gz
+
+```go
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/jaypipes/ghw/pkg/snapshot"
+)
+
+// ...
+
+scratchDir, err := ioutil.TempDir("", "ghw-snapshot-*")
+if err != nil {
+ fmt.Printf("Error creating clone directory: %v", err)
+}
+defer os.RemoveAll(scratchDir)
+
+// this step clones all the files and directories ghw cares about
+if err := snapshot.CloneTreeInto(scratchDir); err != nil {
+ fmt.Printf("error cloning into %q: %v", scratchDir, err)
+}
+
+// optionally, you may add extra content into your snapshot.
+// ghw will ignore the extra content.
+// Glob patterns like `filepath.Glob` are supported.
+fileSpecs := []string{
+ "/proc/cmdline",
+}
+
+// options allows the client code to optionally deference symlinks, or copy
+// them into the cloned tree as symlinks
+var opts *snapshot.CopyFileOptions
+if err := snapshot.CopyFilesInto(fileSpecs, scratchDir, opts); err != nil {
+ fmt.Printf("error cloning extra files into %q: %v", scratchDir, err)
+}
+
+// automates the creation of the gzipped tarball out of the given tree.
+if err := snapshot.PackFrom("my-snapshot.tgz", scratchDir); err != nil {
+ fmt.Printf("error packing %q into %q: %v", scratchDir, *output, err)
+}
+```
+
+### Disabling warning messages
+
+When `ghw` isn't able to retrieve some information, it may print certain
+warning messages to `stderr`. To disable these warnings, simply set the
+`GHW_DISABLE_WARNINGS` environs variable:
+
+```
+$ ghwc memory
+WARNING:
+Could not determine total physical bytes of memory. This may
+be due to the host being a virtual machine or container with no
+/var/log/syslog file, or the current user may not have necessary
+privileges to read the syslog. We are falling back to setting the
+total physical amount of memory to the total usable amount of memory
+memory (24GB physical, 24GB usable)
+```
+
+```
+$ GHW_DISABLE_WARNINGS=1 ghwc memory
+memory (24GB physical, 24GB usable)
+```
+
+You can disable warning programmatically using the `WithDisableWarnings` option:
+
+```go
+
+import (
+ "github.com/jaypipes/ghw"
+)
+
+mem, err := ghw.Memory(ghw.WithDisableWarnings())
+```
+
+`WithDisableWarnings` is a alias for the `WithNullAlerter` option, which in turn
+leverages the more general `Alerter` feature of ghw.
+
+You may supply a `Alerter` to ghw to redirect all the warnings there, like
+logger objects (see for example golang's stdlib `log.Logger`).
+`Alerter` is in fact the minimal logging interface `ghw needs.
+To learn more, please check the `option.Alerter` interface and the `ghw.WithAlerter()`
+function.
+
+### Memory
+
+The basic building block of the memory support in ghw is the `ghw.MemoryArea` struct.
+A "memory area" is a block of memory which share common properties. In the simplest
+case, the whole system memory fits in a single memory area; in more complex scenarios,
+like multi-NUMA systems, many memory areas may be present in the system (e.g. one for
+each NUMA cell).
+
+The `ghw.MemoryArea` struct contains the following fields:
+
+* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
+ the host
+* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the
+ system can actually use. Usable memory accounts for things like the kernel's
+ resident memory size and some reserved system bits
+
+Information about the host computer's memory can be retrieved using the
+`ghw.Memory()` function which returns a pointer to a `ghw.MemoryInfo` struct.
+`ghw.MemoryInfo` is a superset of `ghw.MemoryArea`. Thus, it contains all the
+fields found in the `ghw.MemoryArea` (replicated for clarity) plus some:
+
+* `ghw.MemoryInfo.TotalPhysicalBytes` contains the amount of physical memory on
+ the host
+* `ghw.MemoryInfo.TotalUsableBytes` contains the amount of memory the
+ system can actually use. Usable memory accounts for things like the kernel's
+ resident memory size and some reserved system bits
+* `ghw.MemoryInfo.SupportedPageSizes` is an array of integers representing the
+ size, in bytes, of memory pages the system supports
+* `ghw.MemoryInfo.Modules` is an array of pointers to `ghw.MemoryModule`
+ structs, one for each physical [DIMM](https://en.wikipedia.org/wiki/DIMM).
+ Currently, this information is only included on Windows, with Linux support
+ [planned](https://github.com/jaypipes/ghw/pull/171#issuecomment-597082409).
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Println(memory.String())
+}
+```
+
+Example output from my personal workstation:
+
+```
+memory (24GB physical, 24GB usable)
+```
+
+#### Physical versus Usable Memory
+
+There has been [some](https://github.com/jaypipes/ghw/pull/171)
+[confusion](https://github.com/jaypipes/ghw/issues/183) regarding the
+difference between the total physical bytes versus total usable bytes of
+memory.
+
+Some of this confusion has been due to a misunderstanding of the term "usable".
+As mentioned [above](#inspection!=monitoring), `ghw` does inspection of the
+system's capacity.
+
+A host computer has two capacities when it comes to RAM. The first capacity is
+the amount of RAM that is contained in all memory banks (DIMMs) that are
+attached to the motherboard. `ghw.MemoryInfo.TotalPhysicalBytes` refers to this
+first capacity.
+
+There is a (usually small) amount of RAM that is consumed by the bootloader
+before the operating system is started (booted). Once the bootloader has booted
+the operating system, the amount of RAM that may be used by the operating
+system and its applications is fixed. `ghw.MemoryInfo.TotalUsableBytes` refers
+to this second capacity.
+
+You can determine the amount of RAM that the bootloader used (that is not made
+available to the operating system) by subtracting
+`ghw.MemoryInfo.TotalUsableBytes` from `ghw.MemoryInfo.TotalPhysicalBytes`:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ phys := memory.TotalPhysicalBytes
+ usable := memory.TotalUsableBytes
+
+ fmt.Printf("The bootloader consumes %d bytes of RAM\n", phys - usable)
+}
+```
+
+Example output from my personal workstation booted into a Windows10 operating
+system with a Linux GRUB bootloader:
+
+```
+The bootloader consumes 3832720 bytes of RAM
+```
+
+### CPU
+
+The `ghw.CPU()` function returns a `ghw.CPUInfo` struct that contains
+information about the CPUs on the host system.
+
+`ghw.CPUInfo` contains the following fields:
+
+* `ghw.CPUInfo.TotalCores` has the total number of physical cores the host
+ system contains
+* `ghw.CPUInfo.TotalThreads` has the total number of hardware threads the
+ host system contains
+* `ghw.CPUInfo.Processors` is an array of `ghw.Processor` structs, one for each
+ physical processor package contained in the host
+
+Each `ghw.Processor` struct contains a number of fields:
+
+* `ghw.Processor.ID` is the physical processor `uint32` ID according to the
+ system
+* `ghw.Processor.NumCores` is the number of physical cores in the processor
+ package
+* `ghw.Processor.NumThreads` is the number of hardware threads in the processor
+ package
+* `ghw.Processor.Vendor` is a string containing the vendor name
+* `ghw.Processor.Model` is a string containing the vendor's model name
+* `ghw.Processor.Capabilities` is an array of strings indicating the features
+ the processor has enabled
+* `ghw.Processor.Cores` is an array of `ghw.ProcessorCore` structs that are
+ packed onto this physical processor
+
+A `ghw.ProcessorCore` has the following fields:
+
+* `ghw.ProcessorCore.ID` is the `uint32` identifier that the host gave this
+ core. Note that this does *not* necessarily equate to a zero-based index of
+ the core within a physical package. For example, the core IDs for an Intel Core
+ i7 are 0, 1, 2, 8, 9, and 10
+* `ghw.ProcessorCore.Index` is the zero-based index of the core on the physical
+ processor package
+* `ghw.ProcessorCore.NumThreads` is the number of hardware threads associated
+ with the core
+* `ghw.ProcessorCore.LogicalProcessors` is an array of logical processor IDs
+ assigned to any processing unit for the core
+
+```go
+package main
+
+import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ cpu, err := ghw.CPU()
+ if err != nil {
+ fmt.Printf("Error getting CPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", cpu)
+
+ for _, proc := range cpu.Processors {
+ fmt.Printf(" %v\n", proc)
+ for _, core := range proc.Cores {
+ fmt.Printf(" %v\n", core)
+ }
+ if len(proc.Capabilities) > 0 {
+ // pretty-print the (large) block of capability strings into rows
+ // of 6 capability strings
+ rows := int(math.Ceil(float64(len(proc.Capabilities)) / float64(6)))
+ for row := 1; row < rows; row = row + 1 {
+ rowStart := (row * 6) - 1
+ rowEnd := int(math.Min(float64(rowStart+6), float64(len(proc.Capabilities))))
+ rowElems := proc.Capabilities[rowStart:rowEnd]
+ capStr := strings.Join(rowElems, " ")
+ if row == 1 {
+ fmt.Printf(" capabilities: [%s\n", capStr)
+ } else if rowEnd < len(proc.Capabilities) {
+ fmt.Printf(" %s\n", capStr)
+ } else {
+ fmt.Printf(" %s]\n", capStr)
+ }
+ }
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+cpu (1 physical package, 6 cores, 12 hardware threads)
+ physical package #0 (6 cores, 12 hardware threads)
+ processor core #0 (2 threads), logical processors [0 6]
+ processor core #1 (2 threads), logical processors [1 7]
+ processor core #2 (2 threads), logical processors [2 8]
+ processor core #3 (2 threads), logical processors [3 9]
+ processor core #4 (2 threads), logical processors [4 10]
+ processor core #5 (2 threads), logical processors [5 11]
+ capabilities: [msr pae mce cx8 apic sep
+ mtrr pge mca cmov pat pse36
+ clflush dts acpi mmx fxsr sse
+ sse2 ss ht tm pbe syscall
+ nx pdpe1gb rdtscp lm constant_tsc arch_perfmon
+ pebs bts rep_good nopl xtopology nonstop_tsc
+ cpuid aperfmperf pni pclmulqdq dtes64 monitor
+ ds_cpl vmx est tm2 ssse3 cx16
+ xtpr pdcm pcid sse4_1 sse4_2 popcnt
+ aes lahf_lm pti retpoline tpr_shadow vnmi
+ flexpriority ept vpid dtherm ida arat]
+```
+
+### Block storage
+
+Information about the host computer's local block storage is returned from the
+`ghw.Block()` function. This function returns a pointer to a `ghw.BlockInfo`
+struct.
+
+The `ghw.BlockInfo` struct contains two fields:
+
+* `ghw.BlockInfo.TotalPhysicalBytes` contains the amount of physical block
+ storage on the host
+* `ghw.BlockInfo.Disks` is an array of pointers to `ghw.Disk` structs, one for
+ each disk drive found by the system
+
+Each `ghw.Disk` struct contains the following fields:
+
+* `ghw.Disk.Name` contains a string with the short name of the disk, e.g. "sda"
+* `ghw.Disk.SizeBytes` contains the amount of storage the disk provides
+* `ghw.Disk.PhysicalBlockSizeBytes` contains the size of the physical blocks
+ used on the disk, in bytes
+* `ghw.Disk.IsRemovable` contains a boolean indicating if the disk drive is
+ removable
+* `ghw.Disk.DriveType` is the type of drive. It is of type `ghw.DriveType`
+ which has a `ghw.DriveType.String()` method that can be called to return a
+ string representation of the bus. This string will be "HDD", "FDD", "ODD",
+ or "SSD", which correspond to a hard disk drive (rotational), floppy drive,
+ optical (CD/DVD) drive and solid-state drive.
+* `ghw.Disk.StorageController` is the type of storage controller/drive. It is
+ of type `ghw.StorageController` which has a `ghw.StorageController.String()`
+ method that can be called to return a string representation of the bus. This
+ string will be "SCSI", "IDE", "virtio", "MMC", or "NVMe"
+* `ghw.Disk.NUMANodeID` is the numeric index of the NUMA node this disk is
+ local to, or -1
+* `ghw.Disk.Vendor` contains a string with the name of the hardware vendor for
+ the disk drive
+* `ghw.Disk.Model` contains a string with the vendor-assigned disk model name
+* `ghw.Disk.SerialNumber` contains a string with the disk's serial number
+* `ghw.Disk.WWN` contains a string with the disk's
+ [World Wide Name](https://en.wikipedia.org/wiki/World_Wide_Name)
+* `ghw.Disk.Partitions` contains an array of pointers to `ghw.Partition`
+ structs, one for each partition on the disk
+
+Each `ghw.Partition` struct contains these fields:
+
+* `ghw.Partition.Name` contains a string with the short name of the partition,
+ e.g. "sda1"
+* `ghw.Partition.Label` contains the label for the partition itself. On Linux
+ systems, this is derived from the `ID_PART_ENTRY_NAME` udev entry for the
+ partition.
+* `ghw.Partition.FilesystemLabel` contains the label for the filesystem housed
+ on the partition. On Linux systems, this is derived from the `ID_FS_NAME`
+ udev entry for the partition.
+* `ghw.Partition.SizeBytes` contains the amount of storage the partition
+ provides
+* `ghw.Partition.MountPoint` contains a string with the partition's mount
+ point, or "" if no mount point was discovered
+* `ghw.Partition.Type` contains a string indicated the filesystem type for the
+ partition, or "" if the system could not determine the type
+* `ghw.Partition.IsReadOnly` is a bool indicating the partition is read-only
+* `ghw.Partition.Disk` is a pointer to the `ghw.Disk` object associated with
+ the partition. This will be `nil` if the `ghw.Partition` struct was returned
+ by the `ghw.DiskPartitions()` library function.
+* `ghw.Partition.UUID` is a string containing the partition UUID on Linux, the
+ partition UUID on MacOS and nothing on Windows. On Linux
+ systems, this is derived from the `ID_PART_ENTRY_UUID` udev entry for the
+ partition.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ block, err := ghw.Block()
+ if err != nil {
+ fmt.Printf("Error getting block storage info: %v", err)
+ }
+
+ fmt.Printf("%v\n", block)
+
+ for _, disk := range block.Disks {
+ fmt.Printf(" %v\n", disk)
+ for _, part := range disk.Partitions {
+ fmt.Printf(" %v\n", part)
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+block storage (1 disk, 2TB physical storage)
+ sda HDD (2TB) SCSI [@pci-0000:04:00.0-scsi-0:1:0:0 (node #0)] vendor=LSI model=Logical_Volume serial=600508e000000000f8253aac9a1abd0c WWN=0x600508e000000000f8253aac9a1abd0c
+ /dev/sda1 (100MB)
+ /dev/sda2 (187GB)
+ /dev/sda3 (449MB)
+ /dev/sda4 (1KB)
+ /dev/sda5 (15GB)
+ /dev/sda6 (2TB) [ext4] mounted@/
+```
+
+> Note that `ghw` looks in the udev runtime database for some information. If
+> you are using `ghw` in a container, remember to bind mount `/dev/disk` and
+> `/run` into your container, otherwise `ghw` won't be able to query the udev
+> DB or sysfs paths for information.
+
+### Topology
+
+> **NOTE**: Topology support is currently Linux-only. Windows support is
+> [planned](https://github.com/jaypipes/ghw/issues/166).
+
+Information about the host computer's architecture (NUMA vs. SMP), the host's
+node layout and processor caches can be retrieved from the `ghw.Topology()`
+function. This function returns a pointer to a `ghw.TopologyInfo` struct.
+
+The `ghw.TopologyInfo` struct contains two fields:
+
+* `ghw.TopologyInfo.Architecture` contains an enum with the value `ghw.NUMA` or
+ `ghw.SMP` depending on what the topology of the system is
+* `ghw.TopologyInfo.Nodes` is an array of pointers to `ghw.TopologyNode`
+ structs, one for each topology node (typically physical processor package)
+ found by the system
+
+Each `ghw.TopologyNode` struct contains the following fields:
+
+* `ghw.TopologyNode.ID` is the system's `uint32` identifier for the node
+* `ghw.TopologyNode.Cores` is an array of pointers to `ghw.ProcessorCore` structs that
+ are contained in this node
+* `ghw.TopologyNode.Caches` is an array of pointers to `ghw.MemoryCache` structs that
+ represent the low-level caches associated with processors and cores on the
+ system
+* `ghw.TopologyNode.Distance` is an array of distances between NUMA nodes as reported
+ by the system.
+* `ghw.TopologyNode.Memory` is a struct describing the memory attached to this node.
+ Please refer to the documentation of `ghw.MemoryArea`.
+
+See above in the [CPU](#cpu) section for information about the
+`ghw.ProcessorCore` struct and how to use and query it.
+
+Each `ghw.MemoryCache` struct contains the following fields:
+
+* `ghw.MemoryCache.Type` is an enum that contains one of `ghw.DATA`,
+ `ghw.INSTRUCTION` or `ghw.UNIFIED` depending on whether the cache stores CPU
+ instructions, program data, or both
+* `ghw.MemoryCache.Level` is a positive integer indicating how close the cache
+ is to the processor
+* `ghw.MemoryCache.SizeBytes` is an integer containing the number of bytes the
+ cache can contain
+* `ghw.MemoryCache.LogicalProcessors` is an array of integers representing the
+ logical processors that use the cache
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ topology, err := ghw.Topology()
+ if err != nil {
+ fmt.Printf("Error getting topology info: %v", err)
+ }
+
+ fmt.Printf("%v\n", topology)
+
+ for _, node := range topology.Nodes {
+ fmt.Printf(" %v\n", node)
+ for _, cache := range node.Caches {
+ fmt.Printf(" %v\n", cache)
+ }
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+topology SMP (1 nodes)
+ node #0 (6 cores)
+ L1i cache (32 KB) shared with logical processors: 3,9
+ L1i cache (32 KB) shared with logical processors: 2,8
+ L1i cache (32 KB) shared with logical processors: 11,5
+ L1i cache (32 KB) shared with logical processors: 10,4
+ L1i cache (32 KB) shared with logical processors: 0,6
+ L1i cache (32 KB) shared with logical processors: 1,7
+ L1d cache (32 KB) shared with logical processors: 11,5
+ L1d cache (32 KB) shared with logical processors: 10,4
+ L1d cache (32 KB) shared with logical processors: 3,9
+ L1d cache (32 KB) shared with logical processors: 1,7
+ L1d cache (32 KB) shared with logical processors: 0,6
+ L1d cache (32 KB) shared with logical processors: 2,8
+ L2 cache (256 KB) shared with logical processors: 2,8
+ L2 cache (256 KB) shared with logical processors: 3,9
+ L2 cache (256 KB) shared with logical processors: 0,6
+ L2 cache (256 KB) shared with logical processors: 10,4
+ L2 cache (256 KB) shared with logical processors: 1,7
+ L2 cache (256 KB) shared with logical processors: 11,5
+ L3 cache (12288 KB) shared with logical processors: 0,1,10,11,2,3,4,5,6,7,8,9
+```
+
+### Network
+
+Information about the host computer's networking hardware is returned from the
+`ghw.Network()` function. This function returns a pointer to a
+`ghw.NetworkInfo` struct.
+
+The `ghw.NetworkInfo` struct contains one field:
+
+* `ghw.NetworkInfo.NICs` is an array of pointers to `ghw.NIC` structs, one
+ for each network interface controller found for the systen
+
+Each `ghw.NIC` struct contains the following fields:
+
+* `ghw.NIC.Name` is the system's identifier for the NIC
+* `ghw.NIC.MacAddress` is the MAC address for the NIC, if any
+* `ghw.NIC.IsVirtual` is a boolean indicating if the NIC is a virtualized
+ device
+* `ghw.NIC.Capabilities` is an array of pointers to `ghw.NICCapability` structs
+ that can describe the things the NIC supports. These capabilities match the
+ returned values from the `ethtool -k ` call on Linux
+* `ghw.NIC.PCIAddress` is the PCI device address of the device backing the NIC.
+ this is not-nil only if the backing device is indeed a PCI device; more backing
+ devices (e.g. USB) will be added in future versions.
+
+The `ghw.NICCapability` struct contains the following fields:
+
+* `ghw.NICCapability.Name` is the string name of the capability (e.g.
+ "tcp-segmentation-offload")
+* `ghw.NICCapability.IsEnabled` is a boolean indicating whether the capability
+ is currently enabled/active on the NIC
+* `ghw.NICCapability.CanEnable` is a boolean indicating whether the capability
+ may be enabled
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ net, err := ghw.Network()
+ if err != nil {
+ fmt.Printf("Error getting network info: %v", err)
+ }
+
+ fmt.Printf("%v\n", net)
+
+ for _, nic := range net.NICs {
+ fmt.Printf(" %v\n", nic)
+
+ enabledCaps := make([]int, 0)
+ for x, cap := range nic.Capabilities {
+ if cap.IsEnabled {
+ enabledCaps = append(enabledCaps, x)
+ }
+ }
+ if len(enabledCaps) > 0 {
+ fmt.Printf(" enabled capabilities:\n")
+ for _, x := range enabledCaps {
+ fmt.Printf(" - %s\n", nic.Capabilities[x].Name)
+ }
+ }
+ }
+}
+```
+
+Example output from my personal laptop:
+
+```
+net (3 NICs)
+ docker0
+ enabled capabilities:
+ - tx-checksumming
+ - tx-checksum-ip-generic
+ - scatter-gather
+ - tx-scatter-gather
+ - tx-scatter-gather-fraglist
+ - tcp-segmentation-offload
+ - tx-tcp-segmentation
+ - tx-tcp-ecn-segmentation
+ - tx-tcp-mangleid-segmentation
+ - tx-tcp6-segmentation
+ - udp-fragmentation-offload
+ - generic-segmentation-offload
+ - generic-receive-offload
+ - tx-vlan-offload
+ - highdma
+ - tx-lockless
+ - netns-local
+ - tx-gso-robust
+ - tx-fcoe-segmentation
+ - tx-gre-segmentation
+ - tx-gre-csum-segmentation
+ - tx-ipxip4-segmentation
+ - tx-ipxip6-segmentation
+ - tx-udp_tnl-segmentation
+ - tx-udp_tnl-csum-segmentation
+ - tx-gso-partial
+ - tx-sctp-segmentation
+ - tx-esp-segmentation
+ - tx-vlan-stag-hw-insert
+ enp58s0f1
+ enabled capabilities:
+ - rx-checksumming
+ - generic-receive-offload
+ - rx-vlan-offload
+ - tx-vlan-offload
+ - highdma
+ wlp59s0
+ enabled capabilities:
+ - scatter-gather
+ - tx-scatter-gather
+ - generic-segmentation-offload
+ - generic-receive-offload
+ - highdma
+ - netns-local
+```
+
+### PCI
+
+`ghw` contains a PCI database inspection and querying facility that allows
+developers to not only gather information about devices on a local PCI bus but
+also query for information about hardware device classes, vendor and product
+information.
+
+**NOTE**: Parsing of the PCI-IDS file database is provided by the separate
+[github.com/jaypipes/pcidb library](http://github.com/jaypipes/pcidb). You can
+read that library's README for more information about the various structs that
+are exposed on the `ghw.PCIInfo` struct.
+
+The `ghw.PCI()` function returns a `ghw.PCIInfo` struct. The `ghw.PCIInfo`
+struct contains a number of fields that may be queried for PCI information:
+
+* `ghw.PCIInfo.Devices` is a slice of pointers to `ghw.PCIDevice` structs that
+ describe the PCI devices on the host system
+* `ghw.PCIInfo.Classes` is a map, keyed by the PCI class ID (a hex-encoded
+ string) of pointers to `pcidb.Class` structs, one for each class of PCI
+ device known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+* `ghw.PCIInfo.Vendors` is a map, keyed by the PCI vendor ID (a hex-encoded
+ string) of pointers to `pcidb.Vendor` structs, one for each PCI vendor
+ known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+* `ghw.PCIInfo.Products` is a map, keyed by the PCI product ID (a hex-encoded
+ string) of pointers to `pcidb.Product` structs, one for each PCI product
+ known to `ghw`
+ (**DEPRECATED**, will be removed in `ghw` `v1.0`. Use the
+ `github.com/jaypipes/pcidb` library for exploring PCI database information)
+
+**NOTE**: PCI products are often referred to by their "device ID". We use
+the term "product ID" in `ghw` because it more accurately reflects what the
+identifier is for: a specific product line produced by the vendor.
+
+The `ghw.PCIDevice` struct has the following fields:
+
+* `ghw.PCIDevice.Vendor` is a pointer to a `pcidb.Vendor` struct that
+ describes the device's primary vendor. This will always be non-nil.
+* `ghw.PCIDevice.Product` is a pointer to a `pcidb.Product` struct that
+ describes the device's primary product. This will always be non-nil.
+* `ghw.PCIDevice.Subsystem` is a pointer to a `pcidb.Product` struct that
+ describes the device's secondary/sub-product. This will always be non-nil.
+* `ghw.PCIDevice.Class` is a pointer to a `pcidb.Class` struct that
+ describes the device's class. This will always be non-nil.
+* `ghw.PCIDevice.Subclass` is a pointer to a `pcidb.Subclass` struct
+ that describes the device's subclass. This will always be non-nil.
+* `ghw.PCIDevice.ProgrammingInterface` is a pointer to a
+ `pcidb.ProgrammingInterface` struct that describes the device subclass'
+ programming interface. This will always be non-nil.
+* `ghw.PCIDevice.Driver` is a string representing the device driver the
+ system is using to handle this device. Can be empty string if this
+ information is not available. If the information is not available,
+ this doesn't mean at all the device is not functioning, but only the
+ fact `ghw` was not able to retrieve this information.
+
+The `ghw.PCIAddress` (which is an alias for the `ghw.pci.address.Address`
+struct) contains the PCI address fields. It has a `ghw.PCIAddress.String()`
+method that returns the canonical Domain:Bus:Device.Function ([D]BDF)
+representation of this Address.
+
+The `ghw.PCIAddress` struct has the following fields:
+
+* `ghw.PCIAddress.Domain` is a string representing the PCI domain component of
+ the address.
+* `ghw.PCIAddress.Bus` is a string representing the PCI bus component of
+ the address.
+* `ghw.PCIAddress.Device` is a string representing the PCI device component of
+ the address.
+* `ghw.PCIAddress.Function` is a string representing the PCI function component of
+ the address.
+
+**NOTE**: Older versions (pre-`v0.9.0`) erroneously referred to the `Device`
+field as the `Slot` field. As noted by [@pearsonk](https://github.com/pearsonk)
+in [#220](https://github.com/jaypipes/ghw/issues/220), this was a misnomer.
+
+#### Finding a PCI device by PCI address
+
+In addition to the above information, the `ghw.PCIInfo` struct has the
+following method:
+
+* `ghw.PCIInfo.GetDevice(address string)`
+
+The following code snippet shows how to call the `ghw.PCIInfo.ListDevices()`
+method and output a simple list of PCI address and vendor/product information:
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+ fmt.Printf("host PCI devices:\n")
+ fmt.Println("====================================================")
+
+ for _, device := range pci.Devices {
+ vendor := device.Vendor
+ vendorName := vendor.Name
+ if len(vendor.Name) > 20 {
+ vendorName = string([]byte(vendorName)[0:17]) + "..."
+ }
+ product := device.Product
+ productName := product.Name
+ if len(product.Name) > 40 {
+ productName = string([]byte(productName)[0:37]) + "..."
+ }
+ fmt.Printf("%-12s\t%-20s\t%-40s\n", device.Address, vendorName, productName)
+ }
+}
+```
+
+on my local workstation the output of the above looks like the following:
+
+```
+host PCI devices:
+====================================================
+0000:00:00.0 Intel Corporation 5520/5500/X58 I/O Hub to ESI Port
+0000:00:01.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:02.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:03.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:07.0 Intel Corporation 5520/5500/X58 I/O Hub PCI Express Roo...
+0000:00:10.0 Intel Corporation 7500/5520/5500/X58 Physical and Link ...
+0000:00:10.1 Intel Corporation 7500/5520/5500/X58 Routing and Protoc...
+0000:00:14.0 Intel Corporation 7500/5520/5500/X58 I/O Hub System Man...
+0000:00:14.1 Intel Corporation 7500/5520/5500/X58 I/O Hub GPIO and S...
+0000:00:14.2 Intel Corporation 7500/5520/5500/X58 I/O Hub Control St...
+0000:00:14.3 Intel Corporation 7500/5520/5500/X58 I/O Hub Throttle R...
+0000:00:19.0 Intel Corporation 82567LF-2 Gigabit Network Connection
+0000:00:1a.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1a.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont...
+0000:00:1b.0 Intel Corporation 82801JI (ICH10 Family) HD Audio Contr...
+0000:00:1c.0 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro...
+0000:00:1c.1 Intel Corporation 82801JI (ICH10 Family) PCI Express Po...
+0000:00:1c.4 Intel Corporation 82801JI (ICH10 Family) PCI Express Ro...
+0000:00:1d.0 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.1 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.2 Intel Corporation 82801JI (ICH10 Family) USB UHCI Contr...
+0000:00:1d.7 Intel Corporation 82801JI (ICH10 Family) USB2 EHCI Cont...
+0000:00:1e.0 Intel Corporation 82801 PCI Bridge
+0000:00:1f.0 Intel Corporation 82801JIR (ICH10R) LPC Interface Contr...
+0000:00:1f.2 Intel Corporation 82801JI (ICH10 Family) SATA AHCI Cont...
+0000:00:1f.3 Intel Corporation 82801JI (ICH10 Family) SMBus Controller
+0000:01:00.0 NEC Corporation uPD720200 USB 3.0 Host Controller
+0000:02:00.0 Marvell Technolog... 88SE9123 PCIe SATA 6.0 Gb/s controller
+0000:02:00.1 Marvell Technolog... 88SE912x IDE Controller
+0000:03:00.0 NVIDIA Corporation GP107 [GeForce GTX 1050 Ti]
+0000:03:00.1 NVIDIA Corporation UNKNOWN
+0000:04:00.0 LSI Logic / Symbi... SAS2004 PCI-Express Fusion-MPT SAS-2 ...
+0000:06:00.0 Qualcomm Atheros AR5418 Wireless Network Adapter [AR50...
+0000:08:03.0 LSI Corporation FW322/323 [TrueFire] 1394a Controller
+0000:3f:00.0 Intel Corporation UNKNOWN
+0000:3f:00.1 Intel Corporation Xeon 5600 Series QuickPath Architectu...
+0000:3f:02.0 Intel Corporation Xeon 5600 Series QPI Link 0
+0000:3f:02.1 Intel Corporation Xeon 5600 Series QPI Physical 0
+0000:3f:02.2 Intel Corporation Xeon 5600 Series Mirror Port Link 0
+0000:3f:02.3 Intel Corporation Xeon 5600 Series Mirror Port Link 1
+0000:3f:03.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:03.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:03.4 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:04.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:05.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.0 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.1 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.2 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+0000:3f:06.3 Intel Corporation Xeon 5600 Series Integrated Memory Co...
+```
+
+The following code snippet shows how to call the `ghw.PCIInfo.GetDevice()`
+method and use its returned `ghw.PCIDevice` struct pointer:
+
+```go
+package main
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ addr := "0000:00:00.0"
+ if len(os.Args) == 2 {
+ addr = os.Args[1]
+ }
+ fmt.Printf("PCI device information for %s\n", addr)
+ fmt.Println("====================================================")
+ deviceInfo := pci.GetDevice(addr)
+ if deviceInfo == nil {
+ fmt.Printf("could not retrieve PCI device information for %s\n", addr)
+ return
+ }
+
+ vendor := deviceInfo.Vendor
+ fmt.Printf("Vendor: %s [%s]\n", vendor.Name, vendor.ID)
+ product := deviceInfo.Product
+ fmt.Printf("Product: %s [%s]\n", product.Name, product.ID)
+ subsystem := deviceInfo.Subsystem
+ subvendor := pci.Vendors[subsystem.VendorID]
+ subvendorName := "UNKNOWN"
+ if subvendor != nil {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf("Subsystem: %s [%s] (Subvendor: %s)\n", subsystem.Name, subsystem.ID, subvendorName)
+ class := deviceInfo.Class
+ fmt.Printf("Class: %s [%s]\n", class.Name, class.ID)
+ subclass := deviceInfo.Subclass
+ fmt.Printf("Subclass: %s [%s]\n", subclass.Name, subclass.ID)
+ progIface := deviceInfo.ProgrammingInterface
+ fmt.Printf("Programming Interface: %s [%s]\n", progIface.Name, progIface.ID)
+}
+```
+
+Here's a sample output from my local workstation:
+
+```
+PCI device information for 0000:03:00.0
+====================================================
+Vendor: NVIDIA Corporation [10de]
+Product: GP107 [GeForce GTX 1050 Ti] [1c82]
+Subsystem: UNKNOWN [8613] (Subvendor: ASUSTeK Computer Inc.)
+Class: Display controller [03]
+Subclass: VGA compatible controller [00]
+Programming Interface: VGA controller [00]
+```
+
+### GPU
+
+Information about the host computer's graphics hardware is returned from the
+`ghw.GPU()` function. This function returns a pointer to a `ghw.GPUInfo`
+struct.
+
+The `ghw.GPUInfo` struct contains one field:
+
+* `ghw.GPUInfo.GraphicCards` is an array of pointers to `ghw.GraphicsCard`
+ structs, one for each graphics card found for the systen
+
+Each `ghw.GraphicsCard` struct contains the following fields:
+
+* `ghw.GraphicsCard.Index` is the system's numeric zero-based index for the
+ card on the bus
+* `ghw.GraphicsCard.Address` is the PCI address for the graphics card
+* `ghw.GraphicsCard.DeviceInfo` is a pointer to a `ghw.PCIDevice` struct
+ describing the graphics card. This may be `nil` if no PCI device information
+ could be determined for the card.
+* `ghw.GraphicsCard.Node` is an pointer to a `ghw.TopologyNode` struct that the
+ GPU/graphics card is affined to. On non-NUMA systems, this will always be
+ `nil`.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ gpu, err := ghw.GPU()
+ if err != nil {
+ fmt.Printf("Error getting GPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", gpu)
+
+ for _, card := range gpu.GraphicsCards {
+ fmt.Printf(" %v\n", card)
+ }
+}
+```
+
+Example output from my personal workstation:
+
+```
+gpu (1 graphics card)
+ card #0 @0000:03:00.0 -> class: 'Display controller' vendor: 'NVIDIA Corporation' product: 'GP107 [GeForce GTX 1050 Ti]'
+```
+
+**NOTE**: You can [read more](#pci) about the fields of the `ghw.PCIDevice`
+struct if you'd like to dig deeper into PCI subsystem and programming interface
+information
+
+**NOTE**: You can [read more](#topology) about the fields of the
+`ghw.TopologyNode` struct if you'd like to dig deeper into the NUMA/topology
+subsystem
+
+### Chassis
+
+The host's chassis information is accessible with the `ghw.Chassis()` function. This
+function returns a pointer to a `ghw.ChassisInfo` struct.
+
+The `ghw.ChassisInfo` struct contains multiple fields:
+
+* `ghw.ChassisInfo.AssetTag` is a string with the chassis asset tag
+* `ghw.ChassisInfo.SerialNumber` is a string with the chassis serial number
+* `ghw.ChassisInfo.Type` is a string with the chassis type *code*
+* `ghw.ChassisInfo.TypeDescription` is a string with a description of the chassis type
+* `ghw.ChassisInfo.Vendor` is a string with the chassis vendor
+* `ghw.ChassisInfo.Version` is a string with the chassis version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ chassis, err := ghw.Chassis()
+ if err != nil {
+ fmt.Printf("Error getting chassis info: %v", err)
+ }
+
+ fmt.Printf("%v\n", chassis)
+}
+```
+
+Example output from my personal workstation:
+
+```
+chassis type=Desktop vendor=System76 version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read chassis_serial: open /sys/class/dmi/id/chassis_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+### BIOS
+
+The host's basis input/output system (BIOS) information is accessible with the `ghw.BIOS()` function. This
+function returns a pointer to a `ghw.BIOSInfo` struct.
+
+The `ghw.BIOSInfo` struct contains multiple fields:
+
+* `ghw.BIOSInfo.Vendor` is a string with the BIOS vendor
+* `ghw.BIOSInfo.Version` is a string with the BIOS version
+* `ghw.BIOSInfo.Date` is a string with the date the BIOS was flashed/created
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ bios, err := ghw.BIOS()
+ if err != nil {
+ fmt.Printf("Error getting BIOS info: %v", err)
+ }
+
+ fmt.Printf("%v\n", bios)
+}
+```
+
+Example output from my personal workstation:
+
+```
+bios vendor=System76 version=F2 Z5 date=11/14/2018
+```
+
+### Baseboard
+
+The host's baseboard information is accessible with the `ghw.Baseboard()` function. This
+function returns a pointer to a `ghw.BaseboardInfo` struct.
+
+The `ghw.BaseboardInfo` struct contains multiple fields:
+
+* `ghw.BaseboardInfo.AssetTag` is a string with the baseboard asset tag
+* `ghw.BaseboardInfo.SerialNumber` is a string with the baseboard serial number
+* `ghw.BaseboardInfo.Vendor` is a string with the baseboard vendor
+* `ghw.BaseboardInfo.Product` is a string with the baseboard name on Linux and
+ Product on Windows
+* `ghw.BaseboardInfo.Version` is a string with the baseboard version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ baseboard, err := ghw.Baseboard()
+ if err != nil {
+ fmt.Printf("Error getting baseboard info: %v", err)
+ }
+
+ fmt.Printf("%v\n", baseboard)
+}
+```
+
+Example output from my personal workstation:
+
+```
+baseboard vendor=System76 version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read board_serial: open /sys/class/dmi/id/board_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+### Product
+
+The host's product information is accessible with the `ghw.Product()` function. This
+function returns a pointer to a `ghw.ProductInfo` struct.
+
+The `ghw.ProductInfo` struct contains multiple fields:
+
+* `ghw.ProductInfo.Family` is a string describing the product family
+* `ghw.ProductInfo.Name` is a string with the product name
+* `ghw.ProductInfo.SerialNumber` is a string with the product serial number
+* `ghw.ProductInfo.UUID` is a string with the product UUID
+* `ghw.ProductInfo.SKU` is a string with the product stock unit identifier (SKU)
+* `ghw.ProductInfo.Vendor` is a string with the product vendor
+* `ghw.ProductInfo.Version` is a string with the product version
+
+**NOTE**: These fields are often missing for non-server hardware. Don't be
+surprised to see empty string, "Default string" or "None" values.
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ product, err := ghw.Product()
+ if err != nil {
+ fmt.Printf("Error getting product info: %v", err)
+ }
+
+ fmt.Printf("%v\n", product)
+}
+```
+
+Example output from my personal workstation:
+
+```
+product family=Default string name=Thelio vendor=System76 sku=Default string version=thelio-r1
+```
+
+**NOTE**: Some of the values such as serial numbers are shown as unknown because
+the Linux kernel by default disallows access to those fields if you're not running
+as root. They will be populated if it runs as root or otherwise you may see warnings
+like the following:
+
+```
+WARNING: Unable to read product_serial: open /sys/class/dmi/id/product_serial: permission denied
+```
+
+You can ignore them or use the [Disabling warning messages](#disabling-warning-messages)
+feature to quiet things down.
+
+## Serialization
+
+All of the `ghw` `XXXInfo` structs -- e.g. `ghw.CPUInfo` -- have two methods
+for producing a serialized JSON or YAML string representation of the contained
+information:
+
+* `JSONString()` returns a string containing the information serialized into
+ JSON. It accepts a single boolean parameter indicating whether to use
+ indentation when outputting the string
+* `YAMLString()` returns a string containing the information serialized into
+ YAML
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+)
+
+func main() {
+ mem, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Printf("%s", mem.YAMLString())
+}
+```
+
+the above example code prints the following out on my local workstation:
+
+```
+memory:
+ supported_page_sizes:
+ - 1073741824
+ - 2097152
+ total_physical_bytes: 25263415296
+ total_usable_bytes: 25263415296
+```
+
+## Calling external programs
+
+By default ghw may call external programs, for example `ethtool`, to learn about hardware capabilities.
+In some rare circumstances it may be useful to opt out from this behaviour and rely only on the data
+provided by pseudo-filesystems, like sysfs.
+The most common use case is when we want to consume a snapshot from ghw. In these cases the information
+provided by tools will be most likely inconsistent with the data from the snapshot - they will run on
+a different host!
+To prevent ghw from calling external tools, set the environs variable `GHW_DISABLE_TOOLS` to any value,
+or, programmatically, check the `WithDisableTools` function.
+The default behaviour of ghw is to call external tools when available.
+
+**WARNING**:
+- on all platforms, disabling external tools make ghw return less data.
+ Unless noted otherwise, there is _no fallback flow_ if external tools are disabled.
+- on darwin, disabling external tools disable block support entirely
+
+## Developers
+
+[Contributions](CONTRIBUTING.md) to `ghw` are welcomed! Fork the repo on GitHub
+and submit a pull request with your proposed changes. Or, feel free to log an
+issue for a feature request or bug report.
+
+### Running tests
+
+You can run unit tests easily using the `make test` command, like so:
+
+```
+[jaypipes@uberbox ghw]$ make test
+go test github.com/jaypipes/ghw github.com/jaypipes/ghw/cmd/ghwc
+ok github.com/jaypipes/ghw 0.084s
+? github.com/jaypipes/ghw/cmd/ghwc [no test files]
+```
diff --git a/vendor/github.com/jaypipes/ghw/SNAPSHOT.md b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md
new file mode 100644
index 00000000..696a3ea6
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/SNAPSHOT.md
@@ -0,0 +1,45 @@
+# ghw snapshots
+
+For ghw, snapshots are partial clones of the `/proc`, `/sys` (et. al.) subtrees copied from arbitrary
+machines, which ghw can consume later. "partial" is because the snapshot doesn't need to contain a
+complete copy of all the filesystem subtree (that is doable but inpractical). It only needs to contain
+the paths ghw cares about. The snapshot concept was introduced [to make ghw easier to test](https://github.com/jaypipes/ghw/issues/66).
+
+## Create and consume snapshot
+
+The recommended way to create snapshots for ghw is to use the `ghw-snapshot` tool.
+This tool is maintained by the ghw authors, and snapshots created with this tool are guaranteed to work.
+
+To consume the ghw snapshots, please check the `README.md` document.
+
+## Snapshot design and definitions
+
+The remainder of this document will describe how a snapshot looks like and provides rationale for all the major design decisions.
+Even though this document aims to provide all the necessary information to understand how ghw creates snapshots and what you should
+expect, we recommend to check also the [project issues](https://github.com/jaypipes/ghw/issues) and the `git` history to have the full picture.
+
+### Scope
+
+ghw supports snapshots only on linux platforms. This restriction may be lifted in future releases.
+Snapshots must be consumable in the following supported ways:
+
+1. (way 1) from docker (or podman), mounting them as volumes. See `hack/run-against-snapshot.sh`
+2. (way 2) using the environment variables `GHW_SNAPSHOT_*`. See `README.md` for the full documentation.
+
+Other combinations are possible, but are unsupported and may stop working any time.
+You should depend only on the supported ways to consume snapshots.
+
+### Snapshot content constraints
+
+Stemming from the use cases, the snapshot content must have the following properties:
+
+0. (constraint 0) MUST contain the same information as live system (obviously). Whatever you learn from a live system, you MUST be able to learn from a snapshot.
+1. (constraint 1) MUST NOT require any post processing before it is consumable besides, obviously, unpacking the `.tar.gz` on the right directory - and pointing ghw to that directory.
+2. (constraint 2) MUST NOT require any special handling nor special code path in ghw. From ghw perspective running against a live system or against a snapshot should be completely transparent.
+3. (constraint 3) MUST contain only data - no executable code is allowed ever. This makes snapshots trivially safe to share and consume.
+4. (constraint 4) MUST NOT contain any personally-identifiable data. Data gathered into a snapshot is for testing and troubleshooting purposes and should be safe to send to troubleshooters to analyze.
+
+It must be noted that trivially cloning subtrees from `/proc` and `/sys` and creating a tarball out of them doesn't work
+because both pseudo filesystems make use of symlinks, and [docker doesn't really play nice with symlinks](https://github.com/jaypipes/ghw/commit/f8ffd4d24e62eb9017511f072ccf51f13d4a3399).
+This conflcits with (way 1) above.
+
diff --git a/vendor/github.com/jaypipes/ghw/alias.go b/vendor/github.com/jaypipes/ghw/alias.go
new file mode 100644
index 00000000..2e679a96
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/alias.go
@@ -0,0 +1,152 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package ghw
+
+import (
+ "github.com/jaypipes/ghw/pkg/baseboard"
+ "github.com/jaypipes/ghw/pkg/bios"
+ "github.com/jaypipes/ghw/pkg/block"
+ "github.com/jaypipes/ghw/pkg/chassis"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/gpu"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/net"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/pci"
+ pciaddress "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/product"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+type WithOption = option.Option
+
+var (
+ WithChroot = option.WithChroot
+ WithSnapshot = option.WithSnapshot
+ WithAlerter = option.WithAlerter
+ WithNullAlerter = option.WithNullAlerter
+ // match the existing environ variable to minimize surprises
+ WithDisableWarnings = option.WithNullAlerter
+ WithDisableTools = option.WithDisableTools
+ WithPathOverrides = option.WithPathOverrides
+)
+
+type SnapshotOptions = option.SnapshotOptions
+
+type PathOverrides = option.PathOverrides
+
+type CPUInfo = cpu.Info
+
+var (
+ CPU = cpu.New
+)
+
+type MemoryArea = memory.Area
+type MemoryInfo = memory.Info
+type MemoryCacheType = memory.CacheType
+type MemoryModule = memory.Module
+
+const (
+ MEMORY_CACHE_TYPE_UNIFIED = memory.CACHE_TYPE_UNIFIED
+ MEMORY_CACHE_TYPE_INSTRUCTION = memory.CACHE_TYPE_INSTRUCTION
+ MEMORY_CACHE_TYPE_DATA = memory.CACHE_TYPE_DATA
+)
+
+var (
+ Memory = memory.New
+)
+
+type BlockInfo = block.Info
+type Disk = block.Disk
+type Partition = block.Partition
+
+var (
+ Block = block.New
+)
+
+type DriveType = block.DriveType
+
+const (
+ DRIVE_TYPE_UNKNOWN = block.DRIVE_TYPE_UNKNOWN
+ DRIVE_TYPE_HDD = block.DRIVE_TYPE_HDD
+ DRIVE_TYPE_FDD = block.DRIVE_TYPE_FDD
+ DRIVE_TYPE_ODD = block.DRIVE_TYPE_ODD
+ DRIVE_TYPE_SSD = block.DRIVE_TYPE_SSD
+)
+
+type StorageController = block.StorageController
+
+const (
+ STORAGE_CONTROLLER_UNKNOWN = block.STORAGE_CONTROLLER_UNKNOWN
+ STORAGE_CONTROLLER_IDE = block.STORAGE_CONTROLLER_IDE
+ STORAGE_CONTROLLER_SCSI = block.STORAGE_CONTROLLER_SCSI
+ STORAGE_CONTROLLER_NVME = block.STORAGE_CONTROLLER_NVME
+ STORAGE_CONTROLLER_VIRTIO = block.STORAGE_CONTROLLER_VIRTIO
+ STORAGE_CONTROLLER_MMC = block.STORAGE_CONTROLLER_MMC
+)
+
+type NetworkInfo = net.Info
+type NIC = net.NIC
+type NICCapability = net.NICCapability
+
+var (
+ Network = net.New
+)
+
+type BIOSInfo = bios.Info
+
+var (
+ BIOS = bios.New
+)
+
+type ChassisInfo = chassis.Info
+
+var (
+ Chassis = chassis.New
+)
+
+type BaseboardInfo = baseboard.Info
+
+var (
+ Baseboard = baseboard.New
+)
+
+type TopologyInfo = topology.Info
+type TopologyNode = topology.Node
+
+var (
+ Topology = topology.New
+)
+
+type Architecture = topology.Architecture
+
+const (
+ ARCHITECTURE_SMP = topology.ARCHITECTURE_SMP
+ ARCHITECTURE_NUMA = topology.ARCHITECTURE_NUMA
+)
+
+type PCIInfo = pci.Info
+type PCIAddress = pciaddress.Address
+type PCIDevice = pci.Device
+
+var (
+ PCI = pci.New
+ PCIAddressFromString = pciaddress.FromString
+)
+
+type ProductInfo = product.Info
+
+var (
+ Product = product.New
+)
+
+type GPUInfo = gpu.Info
+type GraphicsCard = gpu.GraphicsCard
+
+var (
+ GPU = gpu.New
+)
diff --git a/vendor/github.com/jaypipes/ghw/doc.go b/vendor/github.com/jaypipes/ghw/doc.go
new file mode 100644
index 00000000..9ae0c30a
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/doc.go
@@ -0,0 +1,314 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+/*
+ package ghw can determine various hardware-related
+ information about the host computer:
+
+ * Memory
+ * CPU
+ * Block storage
+ * Topology
+ * Network
+ * PCI
+ * GPU
+
+ Memory
+
+ Information about the host computer's memory can be retrieved using the
+ Memory function which returns a pointer to a MemoryInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ memory, err := ghw.Memory()
+ if err != nil {
+ fmt.Printf("Error getting memory info: %v", err)
+ }
+
+ fmt.Println(memory.String())
+ }
+
+ CPU
+
+ The CPU function returns a CPUInfo struct that contains information about
+ the CPUs on the host system.
+
+ package main
+
+ import (
+ "fmt"
+ "math"
+ "strings"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ cpu, err := ghw.CPU()
+ if err != nil {
+ fmt.Printf("Error getting CPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", cpu)
+
+ for _, proc := range cpu.Processors {
+ fmt.Printf(" %v\n", proc)
+ for _, core := range proc.Cores {
+ fmt.Printf(" %v\n", core)
+ }
+ if len(proc.Capabilities) > 0 {
+ // pretty-print the (large) block of capability strings into rows
+ // of 6 capability strings
+ rows := int(math.Ceil(float64(len(proc.Capabilities)) / float64(6)))
+ for row := 1; row < rows; row = row + 1 {
+ rowStart := (row * 6) - 1
+ rowEnd := int(math.Min(float64(rowStart+6), float64(len(proc.Capabilities))))
+ rowElems := proc.Capabilities[rowStart:rowEnd]
+ capStr := strings.Join(rowElems, " ")
+ if row == 1 {
+ fmt.Printf(" capabilities: [%s\n", capStr)
+ } else if rowEnd < len(proc.Capabilities) {
+ fmt.Printf(" %s\n", capStr)
+ } else {
+ fmt.Printf(" %s]\n", capStr)
+ }
+ }
+ }
+ }
+ }
+
+ Block storage
+
+ Information about the host computer's local block storage is returned from
+ the Block function. This function returns a pointer to a BlockInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ block, err := ghw.Block()
+ if err != nil {
+ fmt.Printf("Error getting block storage info: %v", err)
+ }
+
+ fmt.Printf("%v\n", block)
+
+ for _, disk := range block.Disks {
+ fmt.Printf(" %v\n", disk)
+ for _, part := range disk.Partitions {
+ fmt.Printf(" %v\n", part)
+ }
+ }
+ }
+
+ Topology
+
+ Information about the host computer's architecture (NUMA vs. SMP), the
+ host's node layout and processor caches can be retrieved from the Topology
+ function. This function returns a pointer to a TopologyInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ topology, err := ghw.Topology()
+ if err != nil {
+ fmt.Printf("Error getting topology info: %v", err)
+ }
+
+ fmt.Printf("%v\n", topology)
+
+ for _, node := range topology.Nodes {
+ fmt.Printf(" %v\n", node)
+ for _, cache := range node.Caches {
+ fmt.Printf(" %v\n", cache)
+ }
+ }
+ }
+
+ Network
+
+ Information about the host computer's networking hardware is returned from
+ the Network function. This function returns a pointer to a NetworkInfo
+ struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ net, err := ghw.Network()
+ if err != nil {
+ fmt.Printf("Error getting network info: %v", err)
+ }
+
+ fmt.Printf("%v\n", net)
+
+ for _, nic := range net.NICs {
+ fmt.Printf(" %v\n", nic)
+
+ enabledCaps := make([]int, 0)
+ for x, cap := range nic.Capabilities {
+ if cap.IsEnabled {
+ enabledCaps = append(enabledCaps, x)
+ }
+ }
+ if len(enabledCaps) > 0 {
+ fmt.Printf(" enabled capabilities:\n")
+ for _, x := range enabledCaps {
+ fmt.Printf(" - %s\n", nic.Capabilities[x].Name)
+ }
+ }
+ }
+ }
+
+ PCI
+
+ ghw contains a PCI database inspection and querying facility that allows
+ developers to not only gather information about devices on a local PCI bus
+ but also query for information about hardware device classes, vendor and
+ product information.
+
+ **NOTE**: Parsing of the PCI-IDS file database is provided by the separate
+ http://github.com/jaypipes/pcidb library. You can read that library's
+ README for more information about the various structs that are exposed on
+ the PCIInfo struct.
+
+ PCIInfo.ListDevices is used to iterate over a host's PCI devices:
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+ fmt.Printf("host PCI devices:\n")
+ fmt.Println("====================================================")
+ devices := pci.ListDevices()
+ if len(devices) == 0 {
+ fmt.Printf("error: could not retrieve PCI devices\n")
+ return
+ }
+
+ for _, device := range devices {
+ vendor := device.Vendor
+ vendorName := vendor.Name
+ if len(vendor.Name) > 20 {
+ vendorName = string([]byte(vendorName)[0:17]) + "..."
+ }
+ product := device.Product
+ productName := product.Name
+ if len(product.Name) > 40 {
+ productName = string([]byte(productName)[0:37]) + "..."
+ }
+ fmt.Printf("%-12s\t%-20s\t%-40s\n", device.Address, vendorName, productName)
+ }
+ }
+
+ The following code snippet shows how to call the PCIInfo.GetDevice method
+ and use its returned PCIDevice struct pointer:
+
+ package main
+
+ import (
+ "fmt"
+ "os"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ pci, err := ghw.PCI()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ addr := "0000:00:00.0"
+ if len(os.Args) == 2 {
+ addr = os.Args[1]
+ }
+ fmt.Printf("PCI device information for %s\n", addr)
+ fmt.Println("====================================================")
+ deviceInfo := pci.GetDevice(addr)
+ if deviceInfo == nil {
+ fmt.Printf("could not retrieve PCI device information for %s\n", addr)
+ return
+ }
+
+ vendor := deviceInfo.Vendor
+ fmt.Printf("Vendor: %s [%s]\n", vendor.Name, vendor.ID)
+ product := deviceInfo.Product
+ fmt.Printf("Product: %s [%s]\n", product.Name, product.ID)
+ subsystem := deviceInfo.Subsystem
+ subvendor := pci.Vendors[subsystem.VendorID]
+ subvendorName := "UNKNOWN"
+ if subvendor != nil {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf("Subsystem: %s [%s] (Subvendor: %s)\n", subsystem.Name, subsystem.ID, subvendorName)
+ class := deviceInfo.Class
+ fmt.Printf("Class: %s [%s]\n", class.Name, class.ID)
+ subclass := deviceInfo.Subclass
+ fmt.Printf("Subclass: %s [%s]\n", subclass.Name, subclass.ID)
+ progIface := deviceInfo.ProgrammingInterface
+ fmt.Printf("Programming Interface: %s [%s]\n", progIface.Name, progIface.ID)
+ }
+
+ GPU
+
+ Information about the host computer's graphics hardware is returned from
+ the GPU function. This function returns a pointer to a GPUInfo struct.
+
+ package main
+
+ import (
+ "fmt"
+
+ "github.com/jaypipes/ghw"
+ )
+
+ func main() {
+ gpu, err := ghw.GPU()
+ if err != nil {
+ fmt.Printf("Error getting GPU info: %v", err)
+ }
+
+ fmt.Printf("%v\n", gpu)
+
+ for _, card := range gpu.GraphicsCards {
+ fmt.Printf(" %v\n", card)
+ }
+ }
+*/
+package ghw
diff --git a/vendor/github.com/jaypipes/ghw/host.go b/vendor/github.com/jaypipes/ghw/host.go
new file mode 100644
index 00000000..5d82a53a
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/host.go
@@ -0,0 +1,139 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package ghw
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+
+ "github.com/jaypipes/ghw/pkg/baseboard"
+ "github.com/jaypipes/ghw/pkg/bios"
+ "github.com/jaypipes/ghw/pkg/block"
+ "github.com/jaypipes/ghw/pkg/chassis"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/gpu"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/net"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/product"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+// HostInfo is a wrapper struct containing information about the host system's
+// memory, block storage, CPU, etc
+type HostInfo struct {
+ ctx *context.Context
+ Memory *memory.Info `json:"memory"`
+ Block *block.Info `json:"block"`
+ CPU *cpu.Info `json:"cpu"`
+ Topology *topology.Info `json:"topology"`
+ Network *net.Info `json:"network"`
+ GPU *gpu.Info `json:"gpu"`
+ Chassis *chassis.Info `json:"chassis"`
+ BIOS *bios.Info `json:"bios"`
+ Baseboard *baseboard.Info `json:"baseboard"`
+ Product *product.Info `json:"product"`
+ PCI *pci.Info `json:"pci"`
+}
+
+// Host returns a pointer to a HostInfo struct that contains fields with
+// information about the host system's CPU, memory, network devices, etc
+func Host(opts ...*WithOption) (*HostInfo, error) {
+ ctx := context.New(opts...)
+
+ memInfo, err := memory.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ blockInfo, err := block.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ cpuInfo, err := cpu.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ topologyInfo, err := topology.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ netInfo, err := net.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ gpuInfo, err := gpu.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ chassisInfo, err := chassis.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ biosInfo, err := bios.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ baseboardInfo, err := baseboard.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ productInfo, err := product.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ pciInfo, err := pci.New(opts...)
+ if err != nil {
+ return nil, err
+ }
+ return &HostInfo{
+ ctx: ctx,
+ CPU: cpuInfo,
+ Memory: memInfo,
+ Block: blockInfo,
+ Topology: topologyInfo,
+ Network: netInfo,
+ GPU: gpuInfo,
+ Chassis: chassisInfo,
+ BIOS: biosInfo,
+ Baseboard: baseboardInfo,
+ Product: productInfo,
+ PCI: pciInfo,
+ }, nil
+}
+
+// String returns a newline-separated output of the HostInfo's component
+// structs' String-ified output
+func (info *HostInfo) String() string {
+ return fmt.Sprintf(
+ "%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n%s\n",
+ info.Block.String(),
+ info.CPU.String(),
+ info.GPU.String(),
+ info.Memory.String(),
+ info.Network.String(),
+ info.Topology.String(),
+ info.Chassis.String(),
+ info.BIOS.String(),
+ info.Baseboard.String(),
+ info.Product.String(),
+ info.PCI.String(),
+ )
+}
+
+// YAMLString returns a string with the host information formatted as YAML
+// under a top-level "host:" key
+func (i *HostInfo) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, i)
+}
+
+// JSONString returns a string with the host information formatted as JSON
+// under a top-level "host:" key
+func (i *HostInfo) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, i, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go
new file mode 100644
index 00000000..ac4bf41a
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard.go
@@ -0,0 +1,80 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines baseboard release information
+type Info struct {
+ ctx *context.Context
+ AssetTag string `json:"asset_tag"`
+ SerialNumber string `json:"serial_number"`
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+ Product string `json:"product"`
+}
+
+func (i *Info) String() string {
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ productStr := ""
+ if i.Product != "" {
+ productStr = " product=" + i.Product
+ }
+
+ return "baseboard" + util.ConcatStrings(
+ vendorStr,
+ serialStr,
+ versionStr,
+ productStr,
+ )
+}
+
+// New returns a pointer to an Info struct containing information about the
+// host's baseboard
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate baseboard information in a top-level
+// "baseboard" YAML/JSON map/object key
+type baseboardPrinter struct {
+ Info *Info `json:"baseboard"`
+}
+
+// YAMLString returns a string with the baseboard information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, baseboardPrinter{info})
+}
+
+// JSONString returns a string with the baseboard information formatted as JSON
+// under a top-level "baseboard:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, baseboardPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go
new file mode 100644
index 00000000..c8c598d4
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_linux.go
@@ -0,0 +1,20 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+)
+
+func (i *Info) load() error {
+ i.AssetTag = linuxdmi.Item(i.ctx, "board_asset_tag")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "board_serial")
+ i.Vendor = linuxdmi.Item(i.ctx, "board_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "board_version")
+ i.Product = linuxdmi.Item(i.ctx, "board_name")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go
new file mode 100644
index 00000000..f5b14691
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("baseboardFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go
new file mode 100644
index 00000000..0fb14fbf
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/baseboard/baseboard_windows.go
@@ -0,0 +1,37 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package baseboard
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wqlBaseboard = "SELECT Manufacturer, SerialNumber, Tag, Version, Product FROM Win32_BaseBoard"
+
+type win32Baseboard struct {
+ Manufacturer *string
+ SerialNumber *string
+ Tag *string
+ Version *string
+ Product *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32BaseboardDescriptions []win32Baseboard
+ if err := wmi.Query(wqlBaseboard, &win32BaseboardDescriptions); err != nil {
+ return err
+ }
+ if len(win32BaseboardDescriptions) > 0 {
+ i.AssetTag = *win32BaseboardDescriptions[0].Tag
+ i.SerialNumber = *win32BaseboardDescriptions[0].SerialNumber
+ i.Vendor = *win32BaseboardDescriptions[0].Manufacturer
+ i.Version = *win32BaseboardDescriptions[0].Version
+ i.Product = *win32BaseboardDescriptions[0].Product
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go
new file mode 100644
index 00000000..85a7c64b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios.go
@@ -0,0 +1,77 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines BIOS release information
+type Info struct {
+ ctx *context.Context
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+ Date string `json:"date"`
+}
+
+func (i *Info) String() string {
+
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+ dateStr := ""
+ if i.Date != "" && i.Date != util.UNKNOWN {
+ dateStr = " date=" + i.Date
+ }
+
+ res := fmt.Sprintf(
+ "bios%s%s%s",
+ vendorStr,
+ versionStr,
+ dateStr,
+ )
+ return res
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's BIOS
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate BIOS information in a top-level
+// "bios" YAML/JSON map/object key
+type biosPrinter struct {
+ Info *Info `json:"bios"`
+}
+
+// YAMLString returns a string with the BIOS information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, biosPrinter{info})
+}
+
+// JSONString returns a string with the BIOS information formatted as JSON
+// under a top-level "bios:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, biosPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go
new file mode 100644
index 00000000..9788f4f7
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_linux.go
@@ -0,0 +1,16 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import "github.com/jaypipes/ghw/pkg/linuxdmi"
+
+func (i *Info) load() error {
+ i.Vendor = linuxdmi.Item(i.ctx, "bios_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "bios_version")
+ i.Date = linuxdmi.Item(i.ctx, "bios_date")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go
new file mode 100644
index 00000000..5307b4a0
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("biosFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go
new file mode 100644
index 00000000..778628e9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/bios/bios_windows.go
@@ -0,0 +1,32 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package bios
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wqlBIOS = "SELECT InstallDate, Manufacturer, Version FROM CIM_BIOSElement"
+
+type win32BIOS struct {
+ InstallDate *string
+ Manufacturer *string
+ Version *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32BIOSDescriptions []win32BIOS
+ if err := wmi.Query(wqlBIOS, &win32BIOSDescriptions); err != nil {
+ return err
+ }
+ if len(win32BIOSDescriptions) > 0 {
+ i.Vendor = *win32BIOSDescriptions[0].Manufacturer
+ i.Version = *win32BIOSDescriptions[0].Version
+ i.Date = *win32BIOSDescriptions[0].InstallDate
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block.go b/vendor/github.com/jaypipes/ghw/pkg/block/block.go
new file mode 100644
index 00000000..a495f69c
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block.go
@@ -0,0 +1,316 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "encoding/json"
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// DriveType describes the general category of drive device
+type DriveType int
+
+const (
+ DRIVE_TYPE_UNKNOWN DriveType = iota
+ DRIVE_TYPE_HDD // Hard disk drive
+ DRIVE_TYPE_FDD // Floppy disk drive
+ DRIVE_TYPE_ODD // Optical disk drive
+ DRIVE_TYPE_SSD // Solid-state drive
+ DRIVE_TYPE_VIRTUAL // virtual drive i.e. loop devices
+)
+
+var (
+ driveTypeString = map[DriveType]string{
+ DRIVE_TYPE_UNKNOWN: "Unknown",
+ DRIVE_TYPE_HDD: "HDD",
+ DRIVE_TYPE_FDD: "FDD",
+ DRIVE_TYPE_ODD: "ODD",
+ DRIVE_TYPE_SSD: "SSD",
+ DRIVE_TYPE_VIRTUAL: "virtual",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `driveTypeString`.
+ // This is done because of the choice we made in
+ // DriveType::MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringDriveType = map[string]DriveType{
+ "unknown": DRIVE_TYPE_UNKNOWN,
+ "hdd": DRIVE_TYPE_HDD,
+ "fdd": DRIVE_TYPE_FDD,
+ "odd": DRIVE_TYPE_ODD,
+ "ssd": DRIVE_TYPE_SSD,
+ "virtual": DRIVE_TYPE_VIRTUAL,
+ }
+)
+
+func (dt DriveType) String() string {
+ return driveTypeString[dt]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (dt DriveType) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(dt.String()))), nil
+}
+
+func (dt *DriveType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringDriveType[key]
+ if !ok {
+ return fmt.Errorf("unknown drive type: %q", key)
+ }
+ *dt = val
+ return nil
+}
+
+// StorageController is a category of block storage controller/driver. It
+// represents more of the physical hardware interface than the storage
+// protocol, which represents more of the software interface.
+//
+// See discussion on https://github.com/jaypipes/ghw/issues/117
+type StorageController int
+
+const (
+ STORAGE_CONTROLLER_UNKNOWN StorageController = iota
+ STORAGE_CONTROLLER_IDE // Integrated Drive Electronics
+ STORAGE_CONTROLLER_SCSI // Small computer system interface
+ STORAGE_CONTROLLER_NVME // Non-volatile Memory Express
+ STORAGE_CONTROLLER_VIRTIO // Virtualized storage controller/driver
+ STORAGE_CONTROLLER_MMC // Multi-media controller (used for mobile phone storage devices)
+ STORAGE_CONTROLLER_LOOP // loop device
+)
+
+var (
+ storageControllerString = map[StorageController]string{
+ STORAGE_CONTROLLER_UNKNOWN: "Unknown",
+ STORAGE_CONTROLLER_IDE: "IDE",
+ STORAGE_CONTROLLER_SCSI: "SCSI",
+ STORAGE_CONTROLLER_NVME: "NVMe",
+ STORAGE_CONTROLLER_VIRTIO: "virtio",
+ STORAGE_CONTROLLER_MMC: "MMC",
+ STORAGE_CONTROLLER_LOOP: "loop",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `storageControllerString`.
+ // This is done/ because of the choice we made in
+ // StorageController::MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringStorageController = map[string]StorageController{
+ "unknown": STORAGE_CONTROLLER_UNKNOWN,
+ "ide": STORAGE_CONTROLLER_IDE,
+ "scsi": STORAGE_CONTROLLER_SCSI,
+ "nvme": STORAGE_CONTROLLER_NVME,
+ "virtio": STORAGE_CONTROLLER_VIRTIO,
+ "mmc": STORAGE_CONTROLLER_MMC,
+ "loop": STORAGE_CONTROLLER_LOOP,
+ }
+)
+
+func (sc StorageController) String() string {
+ return storageControllerString[sc]
+}
+
+func (sc *StorageController) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringStorageController[key]
+ if !ok {
+ return fmt.Errorf("unknown storage controller: %q", key)
+ }
+ *sc = val
+ return nil
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (sc StorageController) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(sc.String()))), nil
+}
+
+// Disk describes a single disk drive on the host system. Disk drives provide
+// raw block storage resources.
+type Disk struct {
+ Name string `json:"name"`
+ SizeBytes uint64 `json:"size_bytes"`
+ PhysicalBlockSizeBytes uint64 `json:"physical_block_size_bytes"`
+ DriveType DriveType `json:"drive_type"`
+ IsRemovable bool `json:"removable"`
+ StorageController StorageController `json:"storage_controller"`
+ BusPath string `json:"bus_path"`
+ // TODO(jaypipes): Convert this to a TopologyNode struct pointer and then
+ // add to serialized output as "numa_node,omitempty"
+ NUMANodeID int `json:"-"`
+ Vendor string `json:"vendor"`
+ Model string `json:"model"`
+ SerialNumber string `json:"serial_number"`
+ WWN string `json:"wwn"`
+ Partitions []*Partition `json:"partitions"`
+ // TODO(jaypipes): Add PCI field for accessing PCI device information
+ // PCI *PCIDevice `json:"pci"`
+}
+
+// Partition describes a logical division of a Disk.
+type Partition struct {
+ Disk *Disk `json:"-"`
+ Name string `json:"name"`
+ Label string `json:"label"`
+ MountPoint string `json:"mount_point"`
+ SizeBytes uint64 `json:"size_bytes"`
+ Type string `json:"type"`
+ IsReadOnly bool `json:"read_only"`
+ UUID string `json:"uuid"` // This would be volume UUID on macOS, PartUUID on linux, empty on Windows
+ FilesystemLabel string `json:"filesystem_label"`
+}
+
+// Info describes all disk drives and partitions in the host system.
+type Info struct {
+ ctx *context.Context
+ // TODO(jaypipes): Deprecate this field and replace with TotalSizeBytes
+ TotalPhysicalBytes uint64 `json:"total_size_bytes"`
+ Disks []*Disk `json:"disks"`
+ Partitions []*Partition `json:"-"`
+}
+
+// New returns a pointer to an Info struct that describes the block storage
+// resources of the host system.
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ tpbs := util.UNKNOWN
+ if i.TotalPhysicalBytes > 0 {
+ tpb := i.TotalPhysicalBytes
+ unit, unitStr := unitutil.AmountString(int64(tpb))
+ tpb = uint64(math.Ceil(float64(tpb) / float64(unit)))
+ tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
+ }
+ dplural := "disks"
+ if len(i.Disks) == 1 {
+ dplural = "disk"
+ }
+ return fmt.Sprintf("block storage (%d %s, %s physical storage)",
+ len(i.Disks), dplural, tpbs)
+}
+
+func (d *Disk) String() string {
+ sizeStr := util.UNKNOWN
+ if d.SizeBytes > 0 {
+ size := d.SizeBytes
+ unit, unitStr := unitutil.AmountString(int64(size))
+ size = uint64(math.Ceil(float64(size) / float64(unit)))
+ sizeStr = fmt.Sprintf("%d%s", size, unitStr)
+ }
+ atNode := ""
+ if d.NUMANodeID >= 0 {
+ atNode = fmt.Sprintf(" (node #%d)", d.NUMANodeID)
+ }
+ vendor := ""
+ if d.Vendor != "" {
+ vendor = " vendor=" + d.Vendor
+ }
+ model := ""
+ if d.Model != util.UNKNOWN {
+ model = " model=" + d.Model
+ }
+ serial := ""
+ if d.SerialNumber != util.UNKNOWN {
+ serial = " serial=" + d.SerialNumber
+ }
+ wwn := ""
+ if d.WWN != util.UNKNOWN {
+ wwn = " WWN=" + d.WWN
+ }
+ removable := ""
+ if d.IsRemovable {
+ removable = " removable=true"
+ }
+ return fmt.Sprintf(
+ "%s %s (%s) %s [@%s%s]%s",
+ d.Name,
+ d.DriveType.String(),
+ sizeStr,
+ d.StorageController.String(),
+ d.BusPath,
+ atNode,
+ util.ConcatStrings(
+ vendor,
+ model,
+ serial,
+ wwn,
+ removable,
+ ),
+ )
+}
+
+func (p *Partition) String() string {
+ typeStr := ""
+ if p.Type != "" {
+ typeStr = fmt.Sprintf("[%s]", p.Type)
+ }
+ mountStr := ""
+ if p.MountPoint != "" {
+ mountStr = fmt.Sprintf(" mounted@%s", p.MountPoint)
+ }
+ sizeStr := util.UNKNOWN
+ if p.SizeBytes > 0 {
+ size := p.SizeBytes
+ unit, unitStr := unitutil.AmountString(int64(size))
+ size = uint64(math.Ceil(float64(size) / float64(unit)))
+ sizeStr = fmt.Sprintf("%d%s", size, unitStr)
+ }
+ return fmt.Sprintf(
+ "%s (%s) %s%s",
+ p.Name,
+ sizeStr,
+ typeStr,
+ mountStr,
+ )
+}
+
+// simple private struct used to encapsulate block information in a top-level
+// "block" YAML/JSON map/object key
+type blockPrinter struct {
+ Info *Info `json:"block" yaml:"block"`
+}
+
+// YAMLString returns a string with the block information formatted as YAML
+// under a top-level "block:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, blockPrinter{i})
+}
+
+// JSONString returns a string with the block information formatted as JSON
+// under a top-level "block:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, blockPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go
new file mode 100644
index 00000000..5115d404
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_darwin.go
@@ -0,0 +1,287 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "path"
+ "strings"
+
+ "github.com/pkg/errors"
+ "howett.net/plist"
+)
+
+type diskOrPartitionPlistNode struct {
+ Content string
+ DeviceIdentifier string
+ DiskUUID string
+ VolumeName string
+ VolumeUUID string
+ Size int64
+ MountPoint string
+ Partitions []diskOrPartitionPlistNode
+ APFSVolumes []diskOrPartitionPlistNode
+}
+
+type diskUtilListPlist struct {
+ AllDisks []string
+ AllDisksAndPartitions []diskOrPartitionPlistNode
+ VolumesFromDisks []string
+ WholeDisks []string
+}
+
+type diskUtilInfoPlist struct {
+ AESHardware bool // true
+ Bootable bool // true
+ BooterDeviceIdentifier string // disk1s2
+ BusProtocol string // PCI-Express
+ CanBeMadeBootable bool // false
+ CanBeMadeBootableRequiresDestroy bool // false
+ Content string // some-uuid-foo-bar
+ DeviceBlockSize int64 // 4096
+ DeviceIdentifier string // disk1s1
+ DeviceNode string // /dev/disk1s1
+ DeviceTreePath string // IODeviceTree:/PCI0@0/RP17@1B/ANS2@0/AppleANS2Controller
+ DiskUUID string // some-uuid-foo-bar
+ Ejectable bool // false
+ EjectableMediaAutomaticUnderSoftwareControl bool // false
+ EjectableOnly bool // false
+ FilesystemName string // APFS
+ FilesystemType string // apfs
+ FilesystemUserVisibleName string // APFS
+ FreeSpace int64 // 343975677952
+ GlobalPermissionsEnabled bool // true
+ IOKitSize int64 // 499963174912
+ IORegistryEntryName string // Macintosh HD
+ Internal bool // true
+ MediaName string //
+ MediaType string // Generic
+ MountPoint string // /
+ ParentWholeDisk string // disk1
+ PartitionMapPartition bool // false
+ RAIDMaster bool // false
+ RAIDSlice bool // false
+ RecoveryDeviceIdentifier string // disk1s3
+ Removable bool // false
+ RemovableMedia bool // false
+ RemovableMediaOrExternalDevice bool // false
+ SMARTStatus string // Verified
+ Size int64 // 499963174912
+ SolidState bool // true
+ SupportsGlobalPermissionsDisable bool // true
+ SystemImage bool // false
+ TotalSize int64 // 499963174912
+ VolumeAllocationBlockSize int64 // 4096
+ VolumeName string // Macintosh HD
+ VolumeSize int64 // 499963174912
+ VolumeUUID string // some-uuid-foo-bar
+ WholeDisk bool // false
+ Writable bool // true
+ WritableMedia bool // true
+ WritableVolume bool // true
+ // also has a SMARTDeviceSpecificKeysMayVaryNotGuaranteed dict with various info
+ // NOTE: VolumeUUID sometimes == DiskUUID, but not always. So far Content is always a different UUID.
+}
+
+type ioregPlist struct {
+ // there's a lot more than just this...
+ ModelNumber string `plist:"Model Number"`
+ SerialNumber string `plist:"Serial Number"`
+ VendorName string `plist:"Vendor Name"`
+}
+
+func getDiskUtilListPlist() (*diskUtilListPlist, error) {
+ out, err := exec.Command("diskutil", "list", "-plist").Output()
+ if err != nil {
+ return nil, errors.Wrap(err, "diskutil list failed")
+ }
+
+ var data diskUtilListPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrap(err, "diskutil list plist unmarshal failed")
+ }
+
+ return &data, nil
+}
+
+func getDiskUtilInfoPlist(device string) (*diskUtilInfoPlist, error) {
+ out, err := exec.Command("diskutil", "info", "-plist", device).Output()
+ if err != nil {
+ return nil, errors.Wrapf(err, "diskutil info for %q failed", device)
+ }
+
+ var data diskUtilInfoPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrapf(err, "diskutil info plist unmarshal for %q failed", device)
+ }
+
+ return &data, nil
+}
+
+func getIoregPlist(ioDeviceTreePath string) (*ioregPlist, error) {
+ name := path.Base(ioDeviceTreePath)
+
+ args := []string{
+ "ioreg",
+ "-a", // use XML output
+ "-d", "1", // limit device tree output depth to root node
+ "-r", // root device tree at matched node
+ "-n", name, // match by name
+ }
+ out, err := exec.Command(args[0], args[1:]...).Output()
+ if err != nil {
+ return nil, errors.Wrapf(err, "ioreg query for %q failed", ioDeviceTreePath)
+ }
+ if out == nil || len(out) == 0 {
+ return nil, nil
+ }
+
+ var data []ioregPlist
+ if _, err := plist.Unmarshal(out, &data); err != nil {
+ return nil, errors.Wrapf(err, "ioreg unmarshal for %q failed", ioDeviceTreePath)
+ }
+ if len(data) != 1 {
+ err := errors.Errorf("ioreg unmarshal resulted in %d I/O device tree nodes (expected 1)", len(data))
+ return nil, err
+ }
+
+ return &data[0], nil
+}
+
+func makePartition(disk, s diskOrPartitionPlistNode, isAPFS bool) (*Partition, error) {
+ if s.Size < 0 {
+ return nil, errors.Errorf("invalid size %q of partition %q", s.Size, s.DeviceIdentifier)
+ }
+
+ var partType string
+ if isAPFS {
+ partType = "APFS Volume"
+ } else {
+ partType = s.Content
+ }
+
+ info, err := getDiskUtilInfoPlist(s.DeviceIdentifier)
+ if err != nil {
+ return nil, err
+ }
+
+ return &Partition{
+ Disk: nil, // filled in later
+ Name: s.DeviceIdentifier,
+ Label: s.VolumeName,
+ MountPoint: s.MountPoint,
+ SizeBytes: uint64(s.Size),
+ Type: partType,
+ IsReadOnly: !info.WritableVolume,
+ UUID: s.VolumeUUID,
+ }, nil
+}
+
+// driveTypeFromPlist looks at the supplied property list struct and attempts to
+// determine the disk type
+func driveTypeFromPlist(infoPlist *diskUtilInfoPlist) DriveType {
+ dt := DRIVE_TYPE_HDD
+ if infoPlist.SolidState {
+ dt = DRIVE_TYPE_SSD
+ }
+ // TODO(jaypipes): Figure out how to determine floppy and/or CD/optical
+ // drive type on Mac
+ return dt
+}
+
+// storageControllerFromPlist looks at the supplied property list struct and
+// attempts to determine the storage controller in use for the device
+func storageControllerFromPlist(infoPlist *diskUtilInfoPlist) StorageController {
+ sc := STORAGE_CONTROLLER_SCSI
+ if strings.HasSuffix(infoPlist.DeviceTreePath, "IONVMeController") {
+ sc = STORAGE_CONTROLLER_NVME
+ }
+ // TODO(jaypipes): I don't know if Mac even supports IDE controllers and
+ // the "virtio" controller is libvirt-specific
+ return sc
+}
+
+func (info *Info) load() error {
+ if !info.ctx.EnableTools {
+ return fmt.Errorf("EnableTools=false on darwin disables block support entirely.")
+ }
+
+ listPlist, err := getDiskUtilListPlist()
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err.Error())
+ return err
+ }
+
+ info.TotalPhysicalBytes = 0
+ info.Disks = make([]*Disk, 0, len(listPlist.AllDisksAndPartitions))
+ info.Partitions = []*Partition{}
+
+ for _, disk := range listPlist.AllDisksAndPartitions {
+ if disk.Size < 0 {
+ return errors.Errorf("invalid size %q of disk %q", disk.Size, disk.DeviceIdentifier)
+ }
+
+ infoPlist, err := getDiskUtilInfoPlist(disk.DeviceIdentifier)
+ if err != nil {
+ return err
+ }
+ if infoPlist.DeviceBlockSize < 0 {
+ return errors.Errorf("invalid block size %q of disk %q", infoPlist.DeviceBlockSize, disk.DeviceIdentifier)
+ }
+
+ busPath := strings.TrimPrefix(infoPlist.DeviceTreePath, "IODeviceTree:")
+
+ ioregPlist, err := getIoregPlist(infoPlist.DeviceTreePath)
+ if err != nil {
+ return err
+ }
+ if ioregPlist == nil {
+ continue
+ }
+
+ // The NUMA node & WWN don't seem to be reported by any tools available by default in macOS.
+ diskReport := &Disk{
+ Name: disk.DeviceIdentifier,
+ SizeBytes: uint64(disk.Size),
+ PhysicalBlockSizeBytes: uint64(infoPlist.DeviceBlockSize),
+ DriveType: driveTypeFromPlist(infoPlist),
+ IsRemovable: infoPlist.Removable,
+ StorageController: storageControllerFromPlist(infoPlist),
+ BusPath: busPath,
+ NUMANodeID: -1,
+ Vendor: ioregPlist.VendorName,
+ Model: ioregPlist.ModelNumber,
+ SerialNumber: ioregPlist.SerialNumber,
+ WWN: "",
+ Partitions: make([]*Partition, 0, len(disk.Partitions)+len(disk.APFSVolumes)),
+ }
+
+ for _, partition := range disk.Partitions {
+ part, err := makePartition(disk, partition, false)
+ if err != nil {
+ return err
+ }
+ part.Disk = diskReport
+ diskReport.Partitions = append(diskReport.Partitions, part)
+ }
+ for _, volume := range disk.APFSVolumes {
+ part, err := makePartition(disk, volume, true)
+ if err != nil {
+ return err
+ }
+ part.Disk = diskReport
+ diskReport.Partitions = append(diskReport.Partitions, part)
+ }
+
+ info.TotalPhysicalBytes += uint64(disk.Size)
+ info.Disks = append(info.Disks, diskReport)
+ info.Partitions = append(info.Partitions, diskReport.Partitions...)
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go
new file mode 100644
index 00000000..ce164132
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_linux.go
@@ -0,0 +1,497 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "bufio"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ sectorSize = 512
+)
+
+func (i *Info) load() error {
+ paths := linuxpath.New(i.ctx)
+ i.Disks = disks(i.ctx, paths)
+ var tpb uint64
+ for _, d := range i.Disks {
+ tpb += d.SizeBytes
+ }
+ i.TotalPhysicalBytes = tpb
+ return nil
+}
+
+func diskPhysicalBlockSizeBytes(paths *linuxpath.Paths, disk string) uint64 {
+ // We can find the sector size in Linux by looking at the
+ // /sys/block/$DEVICE/queue/physical_block_size file in sysfs
+ path := filepath.Join(paths.SysBlock, disk, "queue", "physical_block_size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size
+}
+
+func diskSizeBytes(paths *linuxpath.Paths, disk string) uint64 {
+ // We can find the number of 512-byte sectors by examining the contents of
+ // /sys/block/$DEVICE/size and calculate the physical bytes accordingly.
+ path := filepath.Join(paths.SysBlock, disk, "size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size * sectorSize
+}
+
+func diskNUMANodeID(paths *linuxpath.Paths, disk string) int {
+ link, err := os.Readlink(filepath.Join(paths.SysBlock, disk))
+ if err != nil {
+ return -1
+ }
+ for partial := link; strings.HasPrefix(partial, "../devices/"); partial = filepath.Base(partial) {
+ if nodeContents, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, partial, "numa_node")); err != nil {
+ if nodeInt, err := strconv.Atoi(string(nodeContents)); err != nil {
+ return nodeInt
+ }
+ }
+ }
+ return -1
+}
+
+func diskVendor(paths *linuxpath.Paths, disk string) string {
+ // In Linux, the vendor for a disk device is found in the
+ // /sys/block/$DEVICE/device/vendor file in sysfs
+ path := filepath.Join(paths.SysBlock, disk, "device", "vendor")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return util.UNKNOWN
+ }
+ return strings.TrimSpace(string(contents))
+}
+
+// udevInfoDisk gets the udev info for a disk
+func udevInfoDisk(paths *linuxpath.Paths, disk string) (map[string]string, error) {
+ // Get device major:minor numbers
+ devNo, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, disk, "dev"))
+ if err != nil {
+ return nil, err
+ }
+ return udevInfo(paths, string(devNo))
+}
+
+// udevInfoPartition gets the udev info for a partition
+func udevInfoPartition(paths *linuxpath.Paths, disk string, partition string) (map[string]string, error) {
+ // Get device major:minor numbers
+ devNo, err := ioutil.ReadFile(filepath.Join(paths.SysBlock, disk, partition, "dev"))
+ if err != nil {
+ return nil, err
+ }
+ return udevInfo(paths, string(devNo))
+}
+
+func udevInfo(paths *linuxpath.Paths, devNo string) (map[string]string, error) {
+ // Look up block device in udev runtime database
+ udevID := "b" + strings.TrimSpace(devNo)
+ udevBytes, err := ioutil.ReadFile(filepath.Join(paths.RunUdevData, udevID))
+ if err != nil {
+ return nil, err
+ }
+
+ udevInfo := make(map[string]string)
+ for _, udevLine := range strings.Split(string(udevBytes), "\n") {
+ if strings.HasPrefix(udevLine, "E:") {
+ if s := strings.SplitN(udevLine[2:], "=", 2); len(s) == 2 {
+ udevInfo[s[0]] = s[1]
+ }
+ }
+ }
+ return udevInfo, nil
+}
+
+func diskModel(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if model, ok := info["ID_MODEL"]; ok {
+ return model
+ }
+ return util.UNKNOWN
+}
+
+func diskSerialNumber(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // There are two serial number keys, ID_SERIAL and ID_SERIAL_SHORT The
+ // non-_SHORT version often duplicates vendor information collected
+ // elsewhere, so use _SHORT and fall back to ID_SERIAL if missing...
+ if serial, ok := info["ID_SERIAL_SHORT"]; ok {
+ return serial
+ }
+ if serial, ok := info["ID_SERIAL"]; ok {
+ return serial
+ }
+ return util.UNKNOWN
+}
+
+func diskBusPath(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // There are two path keys, ID_PATH and ID_PATH_TAG.
+ // The difference seems to be _TAG has funky characters converted to underscores.
+ if path, ok := info["ID_PATH"]; ok {
+ return path
+ }
+ return util.UNKNOWN
+}
+
+func diskWWN(paths *linuxpath.Paths, disk string) string {
+ info, err := udevInfoDisk(paths, disk)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ // Trying ID_WWN_WITH_EXTENSION and falling back to ID_WWN is the same logic lsblk uses
+ if wwn, ok := info["ID_WWN_WITH_EXTENSION"]; ok {
+ return wwn
+ }
+ if wwn, ok := info["ID_WWN"]; ok {
+ return wwn
+ }
+ return util.UNKNOWN
+}
+
+// diskPartitions takes the name of a disk (note: *not* the path of the disk,
+// but just the name. In other words, "sda", not "/dev/sda" and "nvme0n1" not
+// "/dev/nvme0n1") and returns a slice of pointers to Partition structs
+// representing the partitions in that disk
+func diskPartitions(ctx *context.Context, paths *linuxpath.Paths, disk string) []*Partition {
+ out := make([]*Partition, 0)
+ path := filepath.Join(paths.SysBlock, disk)
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ ctx.Warn("failed to read disk partitions: %s\n", err)
+ return out
+ }
+ for _, file := range files {
+ fname := file.Name()
+ if !strings.HasPrefix(fname, disk) {
+ continue
+ }
+ size := partitionSizeBytes(paths, disk, fname)
+ mp, pt, ro := partitionInfo(paths, fname)
+ du := diskPartUUID(paths, disk, fname)
+ label := diskPartLabel(paths, disk, fname)
+ if pt == "" {
+ pt = diskPartTypeUdev(paths, disk, fname)
+ }
+ fsLabel := diskFSLabel(paths, disk, fname)
+ p := &Partition{
+ Name: fname,
+ SizeBytes: size,
+ MountPoint: mp,
+ Type: pt,
+ IsReadOnly: ro,
+ UUID: du,
+ Label: label,
+ FilesystemLabel: fsLabel,
+ }
+ out = append(out, p)
+ }
+ return out
+}
+
+func diskFSLabel(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if label, ok := info["ID_FS_LABEL"]; ok {
+ return label
+ }
+ return util.UNKNOWN
+}
+
+func diskPartLabel(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if label, ok := info["ID_PART_ENTRY_NAME"]; ok {
+ return label
+ }
+ return util.UNKNOWN
+}
+
+// diskPartTypeUdev gets the partition type from the udev database directly and its only used as fallback when
+// the partition is not mounted, so we cannot get the type from paths.ProcMounts from the partitionInfo function
+func diskPartTypeUdev(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if pType, ok := info["ID_FS_TYPE"]; ok {
+ return pType
+ }
+ return util.UNKNOWN
+}
+
+func diskPartUUID(paths *linuxpath.Paths, disk string, partition string) string {
+ info, err := udevInfoPartition(paths, disk, partition)
+ if err != nil {
+ return util.UNKNOWN
+ }
+
+ if pType, ok := info["ID_PART_ENTRY_UUID"]; ok {
+ return pType
+ }
+ return util.UNKNOWN
+}
+
+func diskIsRemovable(paths *linuxpath.Paths, disk string) bool {
+ path := filepath.Join(paths.SysBlock, disk, "removable")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return false
+ }
+ removable := strings.TrimSpace(string(contents))
+ return removable == "1"
+}
+
+func disks(ctx *context.Context, paths *linuxpath.Paths) []*Disk {
+ // In Linux, we could use the fdisk, lshw or blockdev commands to list disk
+ // information, however all of these utilities require root privileges to
+ // run. We can get all of this information by examining the /sys/block
+ // and /sys/class/block files
+ disks := make([]*Disk, 0)
+ files, err := ioutil.ReadDir(paths.SysBlock)
+ if err != nil {
+ return nil
+ }
+ for _, file := range files {
+ dname := file.Name()
+
+ driveType, storageController := diskTypes(dname)
+ // TODO(jaypipes): Move this into diskTypes() once abstracting
+ // diskIsRotational for ease of unit testing
+ if !diskIsRotational(ctx, paths, dname) {
+ driveType = DRIVE_TYPE_SSD
+ }
+ size := diskSizeBytes(paths, dname)
+ pbs := diskPhysicalBlockSizeBytes(paths, dname)
+ busPath := diskBusPath(paths, dname)
+ node := diskNUMANodeID(paths, dname)
+ vendor := diskVendor(paths, dname)
+ model := diskModel(paths, dname)
+ serialNo := diskSerialNumber(paths, dname)
+ wwn := diskWWN(paths, dname)
+ removable := diskIsRemovable(paths, dname)
+
+ if storageController == STORAGE_CONTROLLER_LOOP && size == 0 {
+ // We don't care about unused loop devices...
+ continue
+ }
+ d := &Disk{
+ Name: dname,
+ SizeBytes: size,
+ PhysicalBlockSizeBytes: pbs,
+ DriveType: driveType,
+ IsRemovable: removable,
+ StorageController: storageController,
+ BusPath: busPath,
+ NUMANodeID: node,
+ Vendor: vendor,
+ Model: model,
+ SerialNumber: serialNo,
+ WWN: wwn,
+ }
+
+ parts := diskPartitions(ctx, paths, dname)
+ // Map this Disk object into the Partition...
+ for _, part := range parts {
+ part.Disk = d
+ }
+ d.Partitions = parts
+
+ disks = append(disks, d)
+ }
+
+ return disks
+}
+
+// diskTypes returns the drive type, storage controller and bus type of a disk
+func diskTypes(dname string) (
+ DriveType,
+ StorageController,
+) {
+ // The conditionals below which set the controller and drive type are
+ // based on information listed here:
+ // https://en.wikipedia.org/wiki/Device_file
+ driveType := DRIVE_TYPE_UNKNOWN
+ storageController := STORAGE_CONTROLLER_UNKNOWN
+ if strings.HasPrefix(dname, "fd") {
+ driveType = DRIVE_TYPE_FDD
+ } else if strings.HasPrefix(dname, "sd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "hd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_IDE
+ } else if strings.HasPrefix(dname, "vd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_VIRTIO
+ } else if strings.HasPrefix(dname, "nvme") {
+ driveType = DRIVE_TYPE_SSD
+ storageController = STORAGE_CONTROLLER_NVME
+ } else if strings.HasPrefix(dname, "sr") {
+ driveType = DRIVE_TYPE_ODD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "xvd") {
+ driveType = DRIVE_TYPE_HDD
+ storageController = STORAGE_CONTROLLER_SCSI
+ } else if strings.HasPrefix(dname, "mmc") {
+ driveType = DRIVE_TYPE_SSD
+ storageController = STORAGE_CONTROLLER_MMC
+ } else if strings.HasPrefix(dname, "loop") {
+ driveType = DRIVE_TYPE_VIRTUAL
+ storageController = STORAGE_CONTROLLER_LOOP
+ }
+
+ return driveType, storageController
+}
+
+func diskIsRotational(ctx *context.Context, paths *linuxpath.Paths, devName string) bool {
+ path := filepath.Join(paths.SysBlock, devName, "queue", "rotational")
+ contents := util.SafeIntFromFile(ctx, path)
+ return contents == 1
+}
+
+// partitionSizeBytes returns the size in bytes of the partition given a disk
+// name and a partition name. Note: disk name and partition name do *not*
+// contain any leading "/dev" parts. In other words, they are *names*, not
+// paths.
+func partitionSizeBytes(paths *linuxpath.Paths, disk string, part string) uint64 {
+ path := filepath.Join(paths.SysBlock, disk, part, "size")
+ contents, err := ioutil.ReadFile(path)
+ if err != nil {
+ return 0
+ }
+ size, err := strconv.ParseUint(strings.TrimSpace(string(contents)), 10, 64)
+ if err != nil {
+ return 0
+ }
+ return size * sectorSize
+}
+
+// Given a full or short partition name, returns the mount point, the type of
+// the partition and whether it's readonly
+func partitionInfo(paths *linuxpath.Paths, part string) (string, string, bool) {
+ // Allow calling PartitionInfo with either the full partition name
+ // "/dev/sda1" or just "sda1"
+ if !strings.HasPrefix(part, "/dev") {
+ part = "/dev/" + part
+ }
+
+ // mount entries for mounted partitions look like this:
+ // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
+ var r io.ReadCloser
+ r, err := os.Open(paths.ProcMounts)
+ if err != nil {
+ return "", "", true
+ }
+ defer util.SafeClose(r)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ entry := parseMountEntry(line)
+ if entry == nil || entry.Partition != part {
+ continue
+ }
+ ro := true
+ for _, opt := range entry.Options {
+ if opt == "rw" {
+ ro = false
+ break
+ }
+ }
+
+ return entry.Mountpoint, entry.FilesystemType, ro
+ }
+ return "", "", true
+}
+
+type mountEntry struct {
+ Partition string
+ Mountpoint string
+ FilesystemType string
+ Options []string
+}
+
+func parseMountEntry(line string) *mountEntry {
+ // mount entries for mounted partitions look like this:
+ // /dev/sda6 / ext4 rw,relatime,errors=remount-ro,data=ordered 0 0
+ if line[0] != '/' {
+ return nil
+ }
+ fields := strings.Fields(line)
+
+ if len(fields) < 4 {
+ return nil
+ }
+
+ // We do some special parsing of the mountpoint, which may contain space,
+ // tab and newline characters, encoded into the mount entry line using their
+ // octal-to-string representations. From the GNU mtab man pages:
+ //
+ // "Therefore these characters are encoded in the files and the getmntent
+ // function takes care of the decoding while reading the entries back in.
+ // '\040' is used to encode a space character, '\011' to encode a tab
+ // character, '\012' to encode a newline character, and '\\' to encode a
+ // backslash."
+ mp := fields[1]
+ r := strings.NewReplacer(
+ "\\011", "\t", "\\012", "\n", "\\040", " ", "\\\\", "\\",
+ )
+ mp = r.Replace(mp)
+
+ res := &mountEntry{
+ Partition: fields[0],
+ Mountpoint: mp,
+ FilesystemType: fields[2],
+ }
+ opts := strings.Split(fields[3], ",")
+ res.Options = opts
+ return res
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go
new file mode 100644
index 00000000..f5b51645
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !darwin && !windows
+// +build !linux,!darwin,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("blockFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go
new file mode 100644
index 00000000..804046e1
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/block/block_windows.go
@@ -0,0 +1,223 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package block
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlDiskDrive = "SELECT Caption, CreationClassName, DefaultBlockSize, Description, DeviceID, Index, InterfaceType, Manufacturer, MediaType, Model, Name, Partitions, SerialNumber, Size, TotalCylinders, TotalHeads, TotalSectors, TotalTracks, TracksPerCylinder FROM Win32_DiskDrive"
+
+type win32DiskDrive struct {
+ Caption *string
+ CreationClassName *string
+ DefaultBlockSize *uint64
+ Description *string
+ DeviceID *string
+ Index *uint32 // Used to link with partition
+ InterfaceType *string
+ Manufacturer *string
+ MediaType *string
+ Model *string
+ Name *string
+ Partitions *int32
+ SerialNumber *string
+ Size *uint64
+ TotalCylinders *int64
+ TotalHeads *int32
+ TotalSectors *int64
+ TotalTracks *int64
+ TracksPerCylinder *int32
+}
+
+const wqlDiskPartition = "SELECT Access, BlockSize, Caption, CreationClassName, Description, DeviceID, DiskIndex, Index, Name, Size, SystemName, Type FROM Win32_DiskPartition"
+
+type win32DiskPartition struct {
+ Access *uint16
+ BlockSize *uint64
+ Caption *string
+ CreationClassName *string
+ Description *string
+ DeviceID *string
+ DiskIndex *uint32 // Used to link with Disk Drive
+ Index *uint32
+ Name *string
+ Size *int64
+ SystemName *string
+ Type *string
+}
+
+const wqlLogicalDiskToPartition = "SELECT Antecedent, Dependent FROM Win32_LogicalDiskToPartition"
+
+type win32LogicalDiskToPartition struct {
+ Antecedent *string
+ Dependent *string
+}
+
+const wqlLogicalDisk = "SELECT Caption, CreationClassName, Description, DeviceID, FileSystem, FreeSpace, Name, Size, SystemName FROM Win32_LogicalDisk"
+
+type win32LogicalDisk struct {
+ Caption *string
+ CreationClassName *string
+ Description *string
+ DeviceID *string
+ FileSystem *string
+ FreeSpace *uint64
+ Name *string
+ Size *uint64
+ SystemName *string
+}
+
+func (i *Info) load() error {
+ win32DiskDriveDescriptions, err := getDiskDrives()
+ if err != nil {
+ return err
+ }
+
+ win32DiskPartitionDescriptions, err := getDiskPartitions()
+ if err != nil {
+ return err
+ }
+
+ win32LogicalDiskToPartitionDescriptions, err := getLogicalDisksToPartitions()
+ if err != nil {
+ return err
+ }
+
+ win32LogicalDiskDescriptions, err := getLogicalDisks()
+ if err != nil {
+ return err
+ }
+
+ // Converting into standard structures
+ disks := make([]*Disk, 0)
+ for _, diskdrive := range win32DiskDriveDescriptions {
+ disk := &Disk{
+ Name: strings.TrimSpace(*diskdrive.DeviceID),
+ SizeBytes: *diskdrive.Size,
+ PhysicalBlockSizeBytes: *diskdrive.DefaultBlockSize,
+ DriveType: toDriveType(*diskdrive.MediaType, *diskdrive.Caption),
+ StorageController: toStorageController(*diskdrive.InterfaceType),
+ BusPath: util.UNKNOWN, // TODO: add information
+ NUMANodeID: -1,
+ Vendor: strings.TrimSpace(*diskdrive.Manufacturer),
+ Model: strings.TrimSpace(*diskdrive.Caption),
+ SerialNumber: strings.TrimSpace(*diskdrive.SerialNumber),
+ WWN: util.UNKNOWN, // TODO: add information
+ Partitions: make([]*Partition, 0),
+ }
+ for _, diskpartition := range win32DiskPartitionDescriptions {
+ // Finding disk partition linked to current disk drive
+ if diskdrive.Index == nil || diskpartition.DiskIndex == nil {
+ continue
+ }
+ if *diskdrive.Index == *diskpartition.DiskIndex {
+ disk.PhysicalBlockSizeBytes = *diskpartition.BlockSize
+ // Finding logical partition linked to current disk partition
+ for _, logicaldisk := range win32LogicalDiskDescriptions {
+ for _, logicaldisktodiskpartition := range win32LogicalDiskToPartitionDescriptions {
+ var desiredAntecedent = "\\\\" + *diskpartition.SystemName + "\\root\\cimv2:" + *diskpartition.CreationClassName + ".DeviceID=\"" + *diskpartition.DeviceID + "\""
+ var desiredDependent = "\\\\" + *logicaldisk.SystemName + "\\root\\cimv2:" + *logicaldisk.CreationClassName + ".DeviceID=\"" + *logicaldisk.DeviceID + "\""
+ if *logicaldisktodiskpartition.Antecedent == desiredAntecedent && *logicaldisktodiskpartition.Dependent == desiredDependent {
+ // Appending Partition
+ p := &Partition{
+ Name: strings.TrimSpace(*logicaldisk.Caption),
+ Label: strings.TrimSpace(*logicaldisk.Caption),
+ SizeBytes: *logicaldisk.Size,
+ MountPoint: *logicaldisk.DeviceID,
+ Type: *diskpartition.Type,
+ IsReadOnly: toReadOnly(*diskpartition.Access),
+ UUID: "",
+ }
+ disk.Partitions = append(disk.Partitions, p)
+ break
+ }
+ }
+ }
+ }
+ }
+ disks = append(disks, disk)
+ }
+
+ i.Disks = disks
+ var tpb uint64
+ for _, d := range i.Disks {
+ tpb += d.SizeBytes
+ }
+ i.TotalPhysicalBytes = tpb
+ return nil
+}
+
+func getDiskDrives() ([]win32DiskDrive, error) {
+ // Getting disks drives data from WMI
+ var win3232DiskDriveDescriptions []win32DiskDrive
+ if err := wmi.Query(wqlDiskDrive, &win3232DiskDriveDescriptions); err != nil {
+ return nil, err
+ }
+ return win3232DiskDriveDescriptions, nil
+}
+
+func getDiskPartitions() ([]win32DiskPartition, error) {
+ // Getting disk partitions from WMI
+ var win32DiskPartitionDescriptions []win32DiskPartition
+ if err := wmi.Query(wqlDiskPartition, &win32DiskPartitionDescriptions); err != nil {
+ return nil, err
+ }
+ return win32DiskPartitionDescriptions, nil
+}
+
+func getLogicalDisksToPartitions() ([]win32LogicalDiskToPartition, error) {
+ // Getting links between logical disks and partitions from WMI
+ var win32LogicalDiskToPartitionDescriptions []win32LogicalDiskToPartition
+ if err := wmi.Query(wqlLogicalDiskToPartition, &win32LogicalDiskToPartitionDescriptions); err != nil {
+ return nil, err
+ }
+ return win32LogicalDiskToPartitionDescriptions, nil
+}
+
+func getLogicalDisks() ([]win32LogicalDisk, error) {
+ // Getting logical disks from WMI
+ var win32LogicalDiskDescriptions []win32LogicalDisk
+ if err := wmi.Query(wqlLogicalDisk, &win32LogicalDiskDescriptions); err != nil {
+ return nil, err
+ }
+ return win32LogicalDiskDescriptions, nil
+}
+
+func toDriveType(mediaType string, caption string) DriveType {
+ mediaType = strings.ToLower(mediaType)
+ caption = strings.ToLower(caption)
+ if strings.Contains(mediaType, "fixed") || strings.Contains(mediaType, "ssd") || strings.Contains(caption, "ssd") {
+ return DRIVE_TYPE_SSD
+ } else if strings.ContainsAny(mediaType, "hdd") {
+ return DRIVE_TYPE_HDD
+ }
+ return DRIVE_TYPE_UNKNOWN
+}
+
+// TODO: improve
+func toStorageController(interfaceType string) StorageController {
+ var storageController StorageController
+ switch interfaceType {
+ case "SCSI":
+ storageController = STORAGE_CONTROLLER_SCSI
+ case "IDE":
+ storageController = STORAGE_CONTROLLER_IDE
+ default:
+ storageController = STORAGE_CONTROLLER_UNKNOWN
+ }
+ return storageController
+}
+
+// TODO: improve
+func toReadOnly(access uint16) bool {
+ // See Access property from: https://docs.microsoft.com/en-us/windows/win32/cimwin32prov/win32-diskpartition
+ return access == 0x1
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go
new file mode 100644
index 00000000..a7667bbc
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis.go
@@ -0,0 +1,117 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+var (
+ chassisTypeDescriptions = map[string]string{
+ "1": "Other",
+ "2": "Unknown",
+ "3": "Desktop",
+ "4": "Low profile desktop",
+ "5": "Pizza box",
+ "6": "Mini tower",
+ "7": "Tower",
+ "8": "Portable",
+ "9": "Laptop",
+ "10": "Notebook",
+ "11": "Hand held",
+ "12": "Docking station",
+ "13": "All in one",
+ "14": "Sub notebook",
+ "15": "Space-saving",
+ "16": "Lunch box",
+ "17": "Main server chassis",
+ "18": "Expansion chassis",
+ "19": "SubChassis",
+ "20": "Bus Expansion chassis",
+ "21": "Peripheral chassis",
+ "22": "RAID chassis",
+ "23": "Rack mount chassis",
+ "24": "Sealed-case PC",
+ "25": "Multi-system chassis",
+ "26": "Compact PCI",
+ "27": "Advanced TCA",
+ "28": "Blade",
+ "29": "Blade enclosure",
+ "30": "Tablet",
+ "31": "Convertible",
+ "32": "Detachable",
+ "33": "IoT gateway",
+ "34": "Embedded PC",
+ "35": "Mini PC",
+ "36": "Stick PC",
+ }
+)
+
+// Info defines chassis release information
+type Info struct {
+ ctx *context.Context
+ AssetTag string `json:"asset_tag"`
+ SerialNumber string `json:"serial_number"`
+ Type string `json:"type"`
+ TypeDescription string `json:"type_description"`
+ Vendor string `json:"vendor"`
+ Version string `json:"version"`
+}
+
+func (i *Info) String() string {
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ return "chassis type=" + util.ConcatStrings(
+ i.TypeDescription,
+ vendorStr,
+ serialStr,
+ versionStr,
+ )
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's chassis
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate chassis information in a top-level
+// "chassis" YAML/JSON map/object key
+type chassisPrinter struct {
+ Info *Info `json:"chassis"`
+}
+
+// YAMLString returns a string with the chassis information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, chassisPrinter{info})
+}
+
+// JSONString returns a string with the chassis information formatted as JSON
+// under a top-level "chassis:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, chassisPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go
new file mode 100644
index 00000000..00f64de6
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_linux.go
@@ -0,0 +1,26 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func (i *Info) load() error {
+ i.AssetTag = linuxdmi.Item(i.ctx, "chassis_asset_tag")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "chassis_serial")
+ i.Type = linuxdmi.Item(i.ctx, "chassis_type")
+ typeDesc, found := chassisTypeDescriptions[i.Type]
+ if !found {
+ typeDesc = util.UNKNOWN
+ }
+ i.TypeDescription = typeDesc
+ i.Vendor = linuxdmi.Item(i.ctx, "chassis_vendor")
+ i.Version = linuxdmi.Item(i.ctx, "chassis_version")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go
new file mode 100644
index 00000000..0e3fd94b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("chassisFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go
new file mode 100644
index 00000000..088cbed3
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/chassis/chassis_windows.go
@@ -0,0 +1,43 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package chassis
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlChassis = "SELECT Caption, Description, Name, Manufacturer, Model, SerialNumber, Tag, TypeDescriptions, Version FROM CIM_Chassis"
+
+type win32Chassis struct {
+ Caption *string
+ Description *string
+ Name *string
+ Manufacturer *string
+ Model *string
+ SerialNumber *string
+ Tag *string
+ TypeDescriptions []string
+ Version *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32ChassisDescriptions []win32Chassis
+ if err := wmi.Query(wqlChassis, &win32ChassisDescriptions); err != nil {
+ return err
+ }
+ if len(win32ChassisDescriptions) > 0 {
+ i.AssetTag = *win32ChassisDescriptions[0].Tag
+ i.SerialNumber = *win32ChassisDescriptions[0].SerialNumber
+ i.Type = util.UNKNOWN // TODO:
+ i.TypeDescription = *win32ChassisDescriptions[0].Model
+ i.Vendor = *win32ChassisDescriptions[0].Manufacturer
+ i.Version = *win32ChassisDescriptions[0].Version
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/context/context.go b/vendor/github.com/jaypipes/ghw/pkg/context/context.go
new file mode 100644
index 00000000..fb8de528
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/context/context.go
@@ -0,0 +1,178 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package context
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/snapshot"
+)
+
+// Context contains the merged set of configuration switches that act as an
+// execution context when calling internal discovery methods
+type Context struct {
+ Chroot string
+ EnableTools bool
+ SnapshotPath string
+ SnapshotRoot string
+ SnapshotExclusive bool
+ PathOverrides option.PathOverrides
+ snapshotUnpackedPath string
+ alert option.Alerter
+ err error
+}
+
+// WithContext returns an option.Option that contains a pre-existing Context
+// struct. This is useful for some internal code that sets up snapshots.
+func WithContext(ctx *Context) *option.Option {
+ return &option.Option{
+ Context: ctx,
+ }
+}
+
+// Exists returns true if the supplied (merged) Option already contains
+// a context.
+//
+// TODO(jaypipes): We can get rid of this when we combine the option and
+// context packages, which will make it easier to detect the presence of a
+// pre-setup Context.
+func Exists(opt *option.Option) bool {
+ return opt != nil && opt.Context != nil
+}
+
+// New returns a Context struct pointer that has had various options set on it
+func New(opts ...*option.Option) *Context {
+ merged := option.Merge(opts...)
+ var ctx *Context
+ if merged.Context != nil {
+ var castOK bool
+ ctx, castOK = merged.Context.(*Context)
+ if !castOK {
+ panic("passed in a non-Context for the WithContext() function!")
+ }
+ return ctx
+ }
+ ctx = &Context{
+ alert: option.EnvOrDefaultAlerter(),
+ Chroot: *merged.Chroot,
+ }
+
+ if merged.Snapshot != nil {
+ ctx.SnapshotPath = merged.Snapshot.Path
+ // root is optional, so a extra check is warranted
+ if merged.Snapshot.Root != nil {
+ ctx.SnapshotRoot = *merged.Snapshot.Root
+ }
+ ctx.SnapshotExclusive = merged.Snapshot.Exclusive
+ }
+
+ if merged.Alerter != nil {
+ ctx.alert = merged.Alerter
+ }
+
+ if merged.EnableTools != nil {
+ ctx.EnableTools = *merged.EnableTools
+ }
+
+ if merged.PathOverrides != nil {
+ ctx.PathOverrides = merged.PathOverrides
+ }
+
+ // New is not allowed to return error - it would break the established API.
+ // so the only way out is to actually do the checks here and record the error,
+ // and return it later, at the earliest possible occasion, in Setup()
+ if ctx.SnapshotPath != "" && ctx.Chroot != option.DefaultChroot {
+ // The env/client code supplied a value, but we are will overwrite it when unpacking shapshots!
+ ctx.err = fmt.Errorf("Conflicting options: chroot %q and snapshot path %q", ctx.Chroot, ctx.SnapshotPath)
+ }
+ return ctx
+}
+
+// FromEnv returns a Context that has been populated from the environs or
+// default options values
+func FromEnv() *Context {
+ chrootVal := option.EnvOrDefaultChroot()
+ enableTools := option.EnvOrDefaultTools()
+ snapPathVal := option.EnvOrDefaultSnapshotPath()
+ snapRootVal := option.EnvOrDefaultSnapshotRoot()
+ snapExclusiveVal := option.EnvOrDefaultSnapshotExclusive()
+ return &Context{
+ Chroot: chrootVal,
+ EnableTools: enableTools,
+ SnapshotPath: snapPathVal,
+ SnapshotRoot: snapRootVal,
+ SnapshotExclusive: snapExclusiveVal,
+ }
+}
+
+// Do wraps a Setup/Teardown pair around the given function
+func (ctx *Context) Do(fn func() error) error {
+ err := ctx.Setup()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ err := ctx.Teardown()
+ if err != nil {
+ ctx.Warn("teardown error: %v", err)
+ }
+ }()
+ return fn()
+}
+
+// Setup prepares the extra optional data a Context may use.
+// `Context`s are ready to use once returned by `New`. Optional features,
+// like snapshot unpacking, may require extra steps. Run `Setup` to perform them.
+// You should call `Setup` just once. It is safe to call `Setup` if you don't make
+// use of optional extra features - `Setup` will do nothing.
+func (ctx *Context) Setup() error {
+ if ctx.err != nil {
+ return ctx.err
+ }
+ if ctx.SnapshotPath == "" {
+ // nothing to do!
+ return nil
+ }
+
+ var err error
+ root := ctx.SnapshotRoot
+ if root == "" {
+ root, err = snapshot.Unpack(ctx.SnapshotPath)
+ if err == nil {
+ ctx.snapshotUnpackedPath = root
+ }
+ } else {
+ var flags uint
+ if ctx.SnapshotExclusive {
+ flags |= snapshot.OwnTargetDirectory
+ }
+ _, err = snapshot.UnpackInto(ctx.SnapshotPath, root, flags)
+ }
+ if err != nil {
+ return err
+ }
+
+ ctx.Chroot = root
+ return nil
+}
+
+// Teardown releases any resource acquired by Setup.
+// You should always call `Teardown` if you called `Setup` to free any resources
+// acquired by `Setup`. Check `Do` for more automated management.
+func (ctx *Context) Teardown() error {
+ if ctx.snapshotUnpackedPath == "" {
+ // if the client code provided the unpack directory,
+ // then it is also in charge of the cleanup.
+ return nil
+ }
+ return snapshot.Cleanup(ctx.snapshotUnpackedPath)
+}
+
+func (ctx *Context) Warn(msg string, args ...interface{}) {
+ ctx.alert.Printf("WARNING: "+msg, args...)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go
new file mode 100644
index 00000000..2fa0cd2d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu.go
@@ -0,0 +1,169 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+// ProcessorCore describes a physical host processor core. A processor core is
+// a separate processing unit within some types of central processing units
+// (CPU).
+type ProcessorCore struct {
+ // ID is the `uint32` identifier that the host gave this core. Note that
+ // this does *not* necessarily equate to a zero-based index of the core
+ // within a physical package. For example, the core IDs for an Intel Core
+ // i7 are 0, 1, 2, 8, 9, and 10
+ ID int `json:"id"`
+ // Index is the zero-based index of the core on the physical processor
+ // package
+ Index int `json:"index"`
+ // NumThreads is the number of hardware threads associated with the core
+ NumThreads uint32 `json:"total_threads"`
+ // LogicalProcessors is a slice of ints representing the logical processor
+ // IDs assigned to any processing unit for the core
+ LogicalProcessors []int `json:"logical_processors"`
+}
+
+// String returns a short string indicating important information about the
+// processor core
+func (c *ProcessorCore) String() string {
+ return fmt.Sprintf(
+ "processor core #%d (%d threads), logical processors %v",
+ c.Index,
+ c.NumThreads,
+ c.LogicalProcessors,
+ )
+}
+
+// Processor describes a physical host central processing unit (CPU).
+type Processor struct {
+ // ID is the physical processor `uint32` ID according to the system
+ ID int `json:"id"`
+ // NumCores is the number of physical cores in the processor package
+ NumCores uint32 `json:"total_cores"`
+ // NumThreads is the number of hardware threads in the processor package
+ NumThreads uint32 `json:"total_threads"`
+ // Vendor is a string containing the vendor name
+ Vendor string `json:"vendor"`
+ // Model` is a string containing the vendor's model name
+ Model string `json:"model"`
+ // Capabilities is a slice of strings indicating the features the processor
+ // has enabled
+ Capabilities []string `json:"capabilities"`
+ // Cores is a slice of ProcessorCore` struct pointers that are packed onto
+ // this physical processor
+ Cores []*ProcessorCore `json:"cores"`
+}
+
+// HasCapability returns true if the Processor has the supplied cpuid
+// capability, false otherwise. Example of cpuid capabilities would be 'vmx' or
+// 'sse4_2'. To see a list of potential cpuid capabilitiies, see the section on
+// CPUID feature bits in the following article:
+//
+// https://en.wikipedia.org/wiki/CPUID
+func (p *Processor) HasCapability(find string) bool {
+ for _, c := range p.Capabilities {
+ if c == find {
+ return true
+ }
+ }
+ return false
+}
+
+// String returns a short string describing the Processor
+func (p *Processor) String() string {
+ ncs := "cores"
+ if p.NumCores == 1 {
+ ncs = "core"
+ }
+ nts := "threads"
+ if p.NumThreads == 1 {
+ nts = "thread"
+ }
+ return fmt.Sprintf(
+ "physical package #%d (%d %s, %d hardware %s)",
+ p.ID,
+ p.NumCores,
+ ncs,
+ p.NumThreads,
+ nts,
+ )
+}
+
+// Info describes all central processing unit (CPU) functionality on a host.
+// Returned by the `ghw.CPU()` function.
+type Info struct {
+ ctx *context.Context
+ // TotalCores is the total number of physical cores the host system
+ // contains
+ TotalCores uint32 `json:"total_cores"`
+ // TotalThreads is the total number of hardware threads the host system
+ // contains
+ TotalThreads uint32 `json:"total_threads"`
+ // Processors is a slice of Processor struct pointers, one for each
+ // physical processor package contained in the host
+ Processors []*Processor `json:"processors"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// CPUs on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// String returns a short string indicating a summary of CPU information
+func (i *Info) String() string {
+ nps := "packages"
+ if len(i.Processors) == 1 {
+ nps = "package"
+ }
+ ncs := "cores"
+ if i.TotalCores == 1 {
+ ncs = "core"
+ }
+ nts := "threads"
+ if i.TotalThreads == 1 {
+ nts = "thread"
+ }
+ return fmt.Sprintf(
+ "cpu (%d physical %s, %d %s, %d hardware %s)",
+ len(i.Processors),
+ nps,
+ i.TotalCores,
+ ncs,
+ i.TotalThreads,
+ nts,
+ )
+}
+
+// simple private struct used to encapsulate cpu information in a top-level
+// "cpu" YAML/JSON map/object key
+type cpuPrinter struct {
+ Info *Info `json:"cpu"`
+}
+
+// YAMLString returns a string with the cpu information formatted as YAML
+// under a top-level "cpu:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, cpuPrinter{i})
+}
+
+// JSONString returns a string with the cpu information formatted as JSON
+// under a top-level "cpu:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, cpuPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go
new file mode 100644
index 00000000..44e4ced7
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_linux.go
@@ -0,0 +1,220 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func (i *Info) load() error {
+ i.Processors = processorsGet(i.ctx)
+ var totCores uint32
+ var totThreads uint32
+ for _, p := range i.Processors {
+ totCores += p.NumCores
+ totThreads += p.NumThreads
+ }
+ i.TotalCores = totCores
+ i.TotalThreads = totThreads
+ return nil
+}
+
+func processorsGet(ctx *context.Context) []*Processor {
+ procs := make([]*Processor, 0)
+ paths := linuxpath.New(ctx)
+
+ r, err := os.Open(paths.ProcCpuinfo)
+ if err != nil {
+ return nil
+ }
+ defer util.SafeClose(r)
+
+ // An array of maps of attributes describing the logical processor
+ procAttrs := make([]map[string]string, 0)
+ curProcAttrs := make(map[string]string)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := strings.TrimSpace(scanner.Text())
+ if line == "" {
+ // Output of /proc/cpuinfo has a blank newline to separate logical
+ // processors, so here we collect up all the attributes we've
+ // collected for this logical processor block
+ procAttrs = append(procAttrs, curProcAttrs)
+ // Reset the current set of processor attributes...
+ curProcAttrs = make(map[string]string)
+ continue
+ }
+ parts := strings.Split(line, ":")
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ curProcAttrs[key] = value
+ }
+
+ // Build a set of physical processor IDs which represent the physical
+ // package of the CPU
+ setPhysicalIDs := make(map[int]bool)
+ for _, attrs := range procAttrs {
+ pid, err := strconv.Atoi(attrs["physical id"])
+ if err != nil {
+ continue
+ }
+ setPhysicalIDs[pid] = true
+ }
+
+ for pid := range setPhysicalIDs {
+ p := &Processor{
+ ID: pid,
+ }
+ // The indexes into the array of attribute maps for each logical
+ // processor within the physical processor
+ lps := make([]int, 0)
+ for x := range procAttrs {
+ lppid, err := strconv.Atoi(procAttrs[x]["physical id"])
+ if err != nil {
+ continue
+ }
+ if pid == lppid {
+ lps = append(lps, x)
+ }
+ }
+ first := procAttrs[lps[0]]
+ p.Model = first["model name"]
+ p.Vendor = first["vendor_id"]
+ numCores, err := strconv.Atoi(first["cpu cores"])
+ if err != nil {
+ continue
+ }
+ p.NumCores = uint32(numCores)
+ numThreads, err := strconv.Atoi(first["siblings"])
+ if err != nil {
+ continue
+ }
+ p.NumThreads = uint32(numThreads)
+
+ // The flags field is a space-separated list of CPU capabilities
+ p.Capabilities = strings.Split(first["flags"], " ")
+
+ cores := make([]*ProcessorCore, 0)
+ for _, lpidx := range lps {
+ lpid, err := strconv.Atoi(procAttrs[lpidx]["processor"])
+ if err != nil {
+ continue
+ }
+ coreID, err := strconv.Atoi(procAttrs[lpidx]["core id"])
+ if err != nil {
+ continue
+ }
+ var core *ProcessorCore
+ for _, c := range cores {
+ if c.ID == coreID {
+ c.LogicalProcessors = append(
+ c.LogicalProcessors,
+ lpid,
+ )
+ c.NumThreads = uint32(len(c.LogicalProcessors))
+ core = c
+ }
+ }
+ if core == nil {
+ coreLps := make([]int, 1)
+ coreLps[0] = lpid
+ core = &ProcessorCore{
+ ID: coreID,
+ Index: len(cores),
+ NumThreads: 1,
+ LogicalProcessors: coreLps,
+ }
+ cores = append(cores, core)
+ }
+ }
+ p.Cores = cores
+ procs = append(procs, p)
+ }
+ return procs
+}
+
+func CoresForNode(ctx *context.Context, nodeID int) ([]*ProcessorCore, error) {
+ // The /sys/devices/system/node/nodeX directory contains a subdirectory
+ // called 'cpuX' for each logical processor assigned to the node. Each of
+ // those subdirectories contains a topology subdirectory which has a
+ // core_id file that indicates the 0-based identifier of the physical core
+ // the logical processor (hardware thread) is on.
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+ cores := make([]*ProcessorCore, 0)
+
+ findCoreByID := func(coreID int) *ProcessorCore {
+ for _, c := range cores {
+ if c.ID == coreID {
+ return c
+ }
+ }
+
+ c := &ProcessorCore{
+ ID: coreID,
+ Index: len(cores),
+ LogicalProcessors: make([]int, 0),
+ }
+ cores = append(cores, c)
+ return c
+ }
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "cpu") {
+ continue
+ }
+ if filename == "cpumap" || filename == "cpulist" {
+ // There are two files in the node directory that start with 'cpu'
+ // but are not subdirectories ('cpulist' and 'cpumap'). Ignore
+ // these files.
+ continue
+ }
+ // Grab the logical processor ID by cutting the integer from the
+ // /sys/devices/system/node/nodeX/cpuX filename
+ cpuPath := filepath.Join(path, filename)
+ procID, err := strconv.Atoi(filename[3:])
+ if err != nil {
+ _, _ = fmt.Fprintf(
+ os.Stderr,
+ "failed to determine procID from %s. Expected integer after 3rd char.",
+ filename,
+ )
+ continue
+ }
+ coreIDPath := filepath.Join(cpuPath, "topology", "core_id")
+ coreID := util.SafeIntFromFile(ctx, coreIDPath)
+ core := findCoreByID(coreID)
+ core.LogicalProcessors = append(
+ core.LogicalProcessors,
+ procID,
+ )
+ }
+
+ for _, c := range cores {
+ c.NumThreads = uint32(len(c.LogicalProcessors))
+ }
+
+ return cores, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go
new file mode 100644
index 00000000..5d07ee43
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("cpu.Info.load not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go
new file mode 100644
index 00000000..3de16498
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/cpu/cpu_windows.go
@@ -0,0 +1,57 @@
+//go:build !linux
+// +build !linux
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package cpu
+
+import (
+ "github.com/StackExchange/wmi"
+)
+
+const wmqlProcessor = "SELECT Manufacturer, Name, NumberOfLogicalProcessors, NumberOfCores FROM Win32_Processor"
+
+type win32Processor struct {
+ Manufacturer *string
+ Name *string
+ NumberOfLogicalProcessors uint32
+ NumberOfCores uint32
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32descriptions []win32Processor
+ if err := wmi.Query(wmqlProcessor, &win32descriptions); err != nil {
+ return err
+ }
+ // Converting into standard structures
+ i.Processors = processorsGet(win32descriptions)
+ var totCores uint32
+ var totThreads uint32
+ for _, p := range i.Processors {
+ totCores += p.NumCores
+ totThreads += p.NumThreads
+ }
+ i.TotalCores = totCores
+ i.TotalThreads = totThreads
+ return nil
+}
+
+func processorsGet(win32descriptions []win32Processor) []*Processor {
+ var procs []*Processor
+ // Converting into standard structures
+ for index, description := range win32descriptions {
+ p := &Processor{
+ ID: index,
+ Model: *description.Name,
+ Vendor: *description.Manufacturer,
+ NumCores: description.NumberOfCores,
+ NumThreads: description.NumberOfLogicalProcessors,
+ }
+ procs = append(procs, p)
+ }
+ return procs
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go
new file mode 100644
index 00000000..65864c7e
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu.go
@@ -0,0 +1,95 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/topology"
+)
+
+type GraphicsCard struct {
+ // the PCI address where the graphics card can be found
+ Address string `json:"address"`
+ // The "index" of the card on the bus (generally not useful information,
+ // but might as well include it)
+ Index int `json:"index"`
+ // pointer to a PCIDevice struct that describes the vendor and product
+ // model, etc
+ // TODO(jaypipes): Rename this field to PCI, instead of DeviceInfo
+ DeviceInfo *pci.Device `json:"pci"`
+ // Topology node that the graphics card is affined to. Will be nil if the
+ // architecture is not NUMA.
+ Node *topology.Node `json:"node,omitempty"`
+}
+
+func (card *GraphicsCard) String() string {
+ deviceStr := card.Address
+ if card.DeviceInfo != nil {
+ deviceStr = card.DeviceInfo.String()
+ }
+ nodeStr := ""
+ if card.Node != nil {
+ nodeStr = fmt.Sprintf(" [affined to NUMA node %d]", card.Node.ID)
+ }
+ return fmt.Sprintf(
+ "card #%d %s@%s",
+ card.Index,
+ nodeStr,
+ deviceStr,
+ )
+}
+
+type Info struct {
+ ctx *context.Context
+ GraphicsCards []*GraphicsCard `json:"cards"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// graphics cards on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ numCardsStr := "cards"
+ if len(i.GraphicsCards) == 1 {
+ numCardsStr = "card"
+ }
+ return fmt.Sprintf(
+ "gpu (%d graphics %s)",
+ len(i.GraphicsCards),
+ numCardsStr,
+ )
+}
+
+// simple private struct used to encapsulate gpu information in a top-level
+// "gpu" YAML/JSON map/object key
+type gpuPrinter struct {
+ Info *Info `json:"gpu"`
+}
+
+// YAMLString returns a string with the gpu information formatted as YAML
+// under a top-level "gpu:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, gpuPrinter{i})
+}
+
+// JSONString returns a string with the gpu information formatted as JSON
+// under a top-level "gpu:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, gpuPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go
new file mode 100644
index 00000000..a2791e86
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_linux.go
@@ -0,0 +1,152 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ _WARN_NO_SYS_CLASS_DRM = `
+/sys/class/drm does not exist on this system (likely the host system is a
+virtual machine or container with no graphics). Therefore,
+GPUInfo.GraphicsCards will be an empty array.
+`
+)
+
+func (i *Info) load() error {
+ // In Linux, each graphics card is listed under the /sys/class/drm
+ // directory as a symbolic link named "cardN", where N is a zero-based
+ // index of the card in the system. "DRM" stands for Direct Rendering
+ // Manager and is the Linux subsystem that is responsible for graphics I/O
+ //
+ // Each card may have multiple symbolic
+ // links in this directory representing the interfaces from the graphics
+ // card over a particular wire protocol (HDMI, DisplayPort, etc). These
+ // symbolic links are named cardN--. For
+ // instance, on one of my local workstations with an NVIDIA GTX 1050ti
+ // graphics card with one HDMI, one DisplayPort, and one DVI interface to
+ // the card, I see the following in /sys/class/drm:
+ //
+ // $ ll /sys/class/drm/
+ // total 0
+ // drwxr-xr-x 2 root root 0 Jul 16 11:50 ./
+ // drwxr-xr-x 75 root root 0 Jul 16 11:50 ../
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DP-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DP-1/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-DVI-D-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-DVI-D-1/
+ // lrwxrwxrwx 1 root root 0 Jul 16 11:50 card0-HDMI-A-1 -> ../../devices/pci0000:00/0000:00:03.0/0000:03:00.0/drm/card0/card0-HDMI-A-1/
+ //
+ // In this routine, we are only interested in the first link (card0), which
+ // we follow to gather information about the actual device from the PCI
+ // subsystem (we query the modalias file of the PCI device's sysfs
+ // directory using the `ghw.PCIInfo.GetDevice()` function.
+ paths := linuxpath.New(i.ctx)
+ links, err := ioutil.ReadDir(paths.SysClassDRM)
+ if err != nil {
+ i.ctx.Warn(_WARN_NO_SYS_CLASS_DRM)
+ return nil
+ }
+ cards := make([]*GraphicsCard, 0)
+ for _, link := range links {
+ lname := link.Name()
+ if !strings.HasPrefix(lname, "card") {
+ continue
+ }
+ if strings.ContainsRune(lname, '-') {
+ continue
+ }
+ // Grab the card's zero-based integer index
+ lnameBytes := []byte(lname)
+ cardIdx, err := strconv.Atoi(string(lnameBytes[4:]))
+ if err != nil {
+ cardIdx = -1
+ }
+
+ // Calculate the card's PCI address by looking at the symbolic link's
+ // target
+ lpath := filepath.Join(paths.SysClassDRM, lname)
+ dest, err := os.Readlink(lpath)
+ if err != nil {
+ continue
+ }
+ pathParts := strings.Split(dest, "/")
+ numParts := len(pathParts)
+ pciAddress := pathParts[numParts-3]
+ card := &GraphicsCard{
+ Address: pciAddress,
+ Index: cardIdx,
+ }
+ cards = append(cards, card)
+ }
+ gpuFillNUMANodes(i.ctx, cards)
+ gpuFillPCIDevice(i.ctx, cards)
+ i.GraphicsCards = cards
+ return nil
+}
+
+// Loops through each GraphicsCard struct and attempts to fill the DeviceInfo
+// attribute with PCI device information
+func gpuFillPCIDevice(ctx *context.Context, cards []*GraphicsCard) {
+ pci, err := pci.New(context.WithContext(ctx))
+ if err != nil {
+ return
+ }
+ for _, card := range cards {
+ if card.DeviceInfo == nil {
+ card.DeviceInfo = pci.GetDevice(card.Address)
+ }
+ }
+}
+
+// Loops through each GraphicsCard struct and find which NUMA node the card is
+// affined to, setting the GraphicsCard.Node field accordingly. If the host
+// system is not a NUMA system, the Node field will be set to nil.
+func gpuFillNUMANodes(ctx *context.Context, cards []*GraphicsCard) {
+ paths := linuxpath.New(ctx)
+ topo, err := topology.New(context.WithContext(ctx))
+ if err != nil {
+ // Problem getting topology information so just set the graphics card's
+ // node to nil
+ for _, card := range cards {
+ if topo.Architecture != topology.ARCHITECTURE_NUMA {
+ card.Node = nil
+ }
+ }
+ return
+ }
+ for _, card := range cards {
+ // Each graphics card on a NUMA system will have a pseudo-file
+ // called /sys/class/drm/card$CARD_INDEX/device/numa_node which
+ // contains the NUMA node that the card is affined to
+ cardIndexStr := strconv.Itoa(card.Index)
+ fpath := filepath.Join(
+ paths.SysClassDRM,
+ "card"+cardIndexStr,
+ "device",
+ "numa_node",
+ )
+ nodeIdx := util.SafeIntFromFile(ctx, fpath)
+ if nodeIdx == -1 {
+ continue
+ }
+ for _, node := range topo.Nodes {
+ if nodeIdx == int(node.ID) {
+ card.Node = node
+ }
+ }
+ }
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go
new file mode 100644
index 00000000..48991ec8
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("gpuFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go
new file mode 100644
index 00000000..5fb54281
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/gpu/gpu_windows.go
@@ -0,0 +1,131 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package gpu
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/pci"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlVideoController = "SELECT Caption, CreationClassName, Description, DeviceID, Name, PNPDeviceID, SystemCreationClassName, SystemName, VideoArchitecture, VideoMemoryType, VideoModeDescription, VideoProcessor FROM Win32_VideoController"
+
+type win32VideoController struct {
+ Caption string
+ CreationClassName string
+ Description string
+ DeviceID string
+ Name string
+ PNPDeviceID string
+ SystemCreationClassName string
+ SystemName string
+ VideoArchitecture uint16
+ VideoMemoryType uint16
+ VideoModeDescription string
+ VideoProcessor string
+}
+
+const wqlPnPEntity = "SELECT Caption, CreationClassName, Description, DeviceID, Manufacturer, Name, PNPClass, PNPDeviceID FROM Win32_PnPEntity"
+
+type win32PnPEntity struct {
+ Caption string
+ CreationClassName string
+ Description string
+ DeviceID string
+ Manufacturer string
+ Name string
+ PNPClass string
+ PNPDeviceID string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32VideoControllerDescriptions []win32VideoController
+ if err := wmi.Query(wqlVideoController, &win32VideoControllerDescriptions); err != nil {
+ return err
+ }
+
+ // Building dynamic WHERE clause with addresses to create a single query collecting all desired data
+ queryAddresses := []string{}
+ for _, description := range win32VideoControllerDescriptions {
+ var queryAddres = strings.Replace(description.PNPDeviceID, "\\", `\\`, -1)
+ queryAddresses = append(queryAddresses, "PNPDeviceID='"+queryAddres+"'")
+ }
+ whereClause := strings.Join(queryAddresses[:], " OR ")
+
+ // Getting data from WMI
+ var win32PnPDescriptions []win32PnPEntity
+ var wqlPnPDevice = wqlPnPEntity + " WHERE " + whereClause
+ if err := wmi.Query(wqlPnPDevice, &win32PnPDescriptions); err != nil {
+ return err
+ }
+
+ // Converting into standard structures
+ cards := make([]*GraphicsCard, 0)
+ for _, description := range win32VideoControllerDescriptions {
+ card := &GraphicsCard{
+ Address: description.DeviceID, // https://stackoverflow.com/questions/32073667/how-do-i-discover-the-pcie-bus-topology-and-slot-numbers-on-the-board
+ Index: 0,
+ DeviceInfo: GetDevice(description.PNPDeviceID, win32PnPDescriptions),
+ }
+ cards = append(cards, card)
+ }
+ i.GraphicsCards = cards
+ return nil
+}
+
+func GetDevice(id string, entities []win32PnPEntity) *pci.Device {
+ // Backslashing PnP address ID as requested by JSON and VMI query: https://docs.microsoft.com/en-us/windows/win32/wmisdk/where-clause
+ var queryAddress = strings.Replace(id, "\\", `\\`, -1)
+ // Preparing default structure
+ var device = &pci.Device{
+ Address: queryAddress,
+ Vendor: &pcidb.Vendor{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Products: []*pcidb.Product{},
+ },
+ Subsystem: &pcidb.Product{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ },
+ Product: &pcidb.Product{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ },
+ Class: &pcidb.Class{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ Subclasses: []*pcidb.Subclass{},
+ },
+ Subclass: &pcidb.Subclass{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ ProgrammingInterfaces: []*pcidb.ProgrammingInterface{},
+ },
+ ProgrammingInterface: &pcidb.ProgrammingInterface{
+ ID: util.UNKNOWN,
+ Name: util.UNKNOWN,
+ },
+ }
+ // If an entity is found we get its data inside the standard structure
+ for _, description := range entities {
+ if id == description.PNPDeviceID {
+ device.Vendor.ID = description.Manufacturer
+ device.Vendor.Name = description.Manufacturer
+ device.Product.ID = description.Name
+ device.Product.Name = description.Description
+ break
+ }
+ }
+ return device
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go
new file mode 100644
index 00000000..09398d36
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/linuxdmi/dmi_linux.go
@@ -0,0 +1,29 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package linuxdmi
+
+import (
+ "io/ioutil"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+func Item(ctx *context.Context, value string) string {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(paths.SysClassDMI, "id", value)
+
+ b, err := ioutil.ReadFile(path)
+ if err != nil {
+ ctx.Warn("Unable to read %s: %s\n", value, err)
+ return util.UNKNOWN
+ }
+
+ return strings.TrimSpace(string(b))
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go
new file mode 100644
index 00000000..c5967d61
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/linuxpath/path_linux.go
@@ -0,0 +1,115 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package linuxpath
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+// PathRoots holds the roots of all the filesystem subtrees
+// ghw wants to access.
+type PathRoots struct {
+ Etc string
+ Proc string
+ Run string
+ Sys string
+ Var string
+}
+
+// DefaultPathRoots return the canonical default value for PathRoots
+func DefaultPathRoots() PathRoots {
+ return PathRoots{
+ Etc: "/etc",
+ Proc: "/proc",
+ Run: "/run",
+ Sys: "/sys",
+ Var: "/var",
+ }
+}
+
+// PathRootsFromContext initialize PathRoots from the given Context,
+// allowing overrides of the canonical default paths.
+func PathRootsFromContext(ctx *context.Context) PathRoots {
+ roots := DefaultPathRoots()
+ if pathEtc, ok := ctx.PathOverrides["/etc"]; ok {
+ roots.Etc = pathEtc
+ }
+ if pathProc, ok := ctx.PathOverrides["/proc"]; ok {
+ roots.Proc = pathProc
+ }
+ if pathRun, ok := ctx.PathOverrides["/run"]; ok {
+ roots.Run = pathRun
+ }
+ if pathSys, ok := ctx.PathOverrides["/sys"]; ok {
+ roots.Sys = pathSys
+ }
+ if pathVar, ok := ctx.PathOverrides["/var"]; ok {
+ roots.Var = pathVar
+ }
+ return roots
+}
+
+type Paths struct {
+ VarLog string
+ ProcMeminfo string
+ ProcCpuinfo string
+ ProcMounts string
+ SysKernelMMHugepages string
+ SysBlock string
+ SysDevicesSystemNode string
+ SysDevicesSystemMemory string
+ SysBusPciDevices string
+ SysClassDRM string
+ SysClassDMI string
+ SysClassNet string
+ RunUdevData string
+}
+
+// New returns a new Paths struct containing filepath fields relative to the
+// supplied Context
+func New(ctx *context.Context) *Paths {
+ roots := PathRootsFromContext(ctx)
+ return &Paths{
+ VarLog: filepath.Join(ctx.Chroot, roots.Var, "log"),
+ ProcMeminfo: filepath.Join(ctx.Chroot, roots.Proc, "meminfo"),
+ ProcCpuinfo: filepath.Join(ctx.Chroot, roots.Proc, "cpuinfo"),
+ ProcMounts: filepath.Join(ctx.Chroot, roots.Proc, "self", "mounts"),
+ SysKernelMMHugepages: filepath.Join(ctx.Chroot, roots.Sys, "kernel", "mm", "hugepages"),
+ SysBlock: filepath.Join(ctx.Chroot, roots.Sys, "block"),
+ SysDevicesSystemNode: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "node"),
+ SysDevicesSystemMemory: filepath.Join(ctx.Chroot, roots.Sys, "devices", "system", "memory"),
+ SysBusPciDevices: filepath.Join(ctx.Chroot, roots.Sys, "bus", "pci", "devices"),
+ SysClassDRM: filepath.Join(ctx.Chroot, roots.Sys, "class", "drm"),
+ SysClassDMI: filepath.Join(ctx.Chroot, roots.Sys, "class", "dmi"),
+ SysClassNet: filepath.Join(ctx.Chroot, roots.Sys, "class", "net"),
+ RunUdevData: filepath.Join(ctx.Chroot, roots.Run, "udev", "data"),
+ }
+}
+
+func (p *Paths) NodeCPU(nodeID int, lpID int) string {
+ return filepath.Join(
+ p.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ fmt.Sprintf("cpu%d", lpID),
+ )
+}
+
+func (p *Paths) NodeCPUCache(nodeID int, lpID int) string {
+ return filepath.Join(
+ p.NodeCPU(nodeID, lpID),
+ "cache",
+ )
+}
+
+func (p *Paths) NodeCPUCacheIndex(nodeID int, lpID int, cacheIndex int) string {
+ return filepath.Join(
+ p.NodeCPUCache(nodeID, lpID),
+ fmt.Sprintf("index%d", cacheIndex),
+ )
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go
new file mode 100644
index 00000000..e8f1bbea
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/marshal/marshal.go
@@ -0,0 +1,47 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package marshal
+
+import (
+ "encoding/json"
+
+ "github.com/ghodss/yaml"
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+// safeYAML returns a string after marshalling the supplied parameter into YAML
+func SafeYAML(ctx *context.Context, p interface{}) string {
+ b, err := json.Marshal(p)
+ if err != nil {
+ ctx.Warn("error marshalling JSON: %s", err)
+ return ""
+ }
+ yb, err := yaml.JSONToYAML(b)
+ if err != nil {
+ ctx.Warn("error converting JSON to YAML: %s", err)
+ return ""
+ }
+ return string(yb)
+}
+
+// safeJSON returns a string after marshalling the supplied parameter into
+// JSON. Accepts an optional argument to trigger pretty/indented formatting of
+// the JSON string
+func SafeJSON(ctx *context.Context, p interface{}, indent bool) string {
+ var b []byte
+ var err error
+ if !indent {
+ b, err = json.Marshal(p)
+ } else {
+ b, err = json.MarshalIndent(&p, "", " ")
+ }
+ if err != nil {
+ ctx.Warn("error marshalling JSON: %s", err)
+ return ""
+ }
+ return string(b)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go
new file mode 100644
index 00000000..bdf1ab1a
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory.go
@@ -0,0 +1,88 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "fmt"
+ "math"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+type Module struct {
+ Label string `json:"label"`
+ Location string `json:"location"`
+ SerialNumber string `json:"serial_number"`
+ SizeBytes int64 `json:"size_bytes"`
+ Vendor string `json:"vendor"`
+}
+
+type Area struct {
+ TotalPhysicalBytes int64 `json:"total_physical_bytes"`
+ TotalUsableBytes int64 `json:"total_usable_bytes"`
+ // An array of sizes, in bytes, of memory pages supported in this area
+ SupportedPageSizes []uint64 `json:"supported_page_sizes"`
+ Modules []*Module `json:"modules"`
+}
+
+func (a *Area) String() string {
+ tpbs := util.UNKNOWN
+ if a.TotalPhysicalBytes > 0 {
+ tpb := a.TotalPhysicalBytes
+ unit, unitStr := unitutil.AmountString(tpb)
+ tpb = int64(math.Ceil(float64(a.TotalPhysicalBytes) / float64(unit)))
+ tpbs = fmt.Sprintf("%d%s", tpb, unitStr)
+ }
+ tubs := util.UNKNOWN
+ if a.TotalUsableBytes > 0 {
+ tub := a.TotalUsableBytes
+ unit, unitStr := unitutil.AmountString(tub)
+ tub = int64(math.Ceil(float64(a.TotalUsableBytes) / float64(unit)))
+ tubs = fmt.Sprintf("%d%s", tub, unitStr)
+ }
+ return fmt.Sprintf("memory (%s physical, %s usable)", tpbs, tubs)
+}
+
+type Info struct {
+ ctx *context.Context
+ Area
+}
+
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ return i.Area.String()
+}
+
+// simple private struct used to encapsulate memory information in a top-level
+// "memory" YAML/JSON map/object key
+type memoryPrinter struct {
+ Info *Info `json:"memory"`
+}
+
+// YAMLString returns a string with the memory information formatted as YAML
+// under a top-level "memory:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, memoryPrinter{i})
+}
+
+// JSONString returns a string with the memory information formatted as JSON
+// under a top-level "memory:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, memoryPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go
new file mode 100644
index 00000000..8bc4074d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache.go
@@ -0,0 +1,127 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "encoding/json"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+type CacheType int
+
+const (
+ CACHE_TYPE_UNIFIED CacheType = iota
+ CACHE_TYPE_INSTRUCTION
+ CACHE_TYPE_DATA
+)
+
+var (
+ memoryCacheTypeString = map[CacheType]string{
+ CACHE_TYPE_UNIFIED: "Unified",
+ CACHE_TYPE_INSTRUCTION: "Instruction",
+ CACHE_TYPE_DATA: "Data",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `memoryCacheTypeString`.
+ // This is done because of the choice we made in
+ // CacheType:MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringMemoryCacheType = map[string]CacheType{
+ "unified": CACHE_TYPE_UNIFIED,
+ "instruction": CACHE_TYPE_INSTRUCTION,
+ "data": CACHE_TYPE_DATA,
+ }
+)
+
+func (a CacheType) String() string {
+ return memoryCacheTypeString[a]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (a CacheType) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(a.String()))), nil
+}
+
+func (a *CacheType) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringMemoryCacheType[key]
+ if !ok {
+ return fmt.Errorf("unknown memory cache type: %q", key)
+ }
+ *a = val
+ return nil
+}
+
+type SortByCacheLevelTypeFirstProcessor []*Cache
+
+func (a SortByCacheLevelTypeFirstProcessor) Len() int { return len(a) }
+func (a SortByCacheLevelTypeFirstProcessor) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SortByCacheLevelTypeFirstProcessor) Less(i, j int) bool {
+ if a[i].Level < a[j].Level {
+ return true
+ } else if a[i].Level == a[j].Level {
+ if a[i].Type < a[j].Type {
+ return true
+ } else if a[i].Type == a[j].Type {
+ // NOTE(jaypipes): len(LogicalProcessors) is always >0 and is always
+ // sorted lowest LP ID to highest LP ID
+ return a[i].LogicalProcessors[0] < a[j].LogicalProcessors[0]
+ }
+ }
+ return false
+}
+
+type SortByLogicalProcessorId []uint32
+
+func (a SortByLogicalProcessorId) Len() int { return len(a) }
+func (a SortByLogicalProcessorId) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a SortByLogicalProcessorId) Less(i, j int) bool { return a[i] < a[j] }
+
+type Cache struct {
+ Level uint8 `json:"level"`
+ Type CacheType `json:"type"`
+ SizeBytes uint64 `json:"size_bytes"`
+ // The set of logical processors (hardware threads) that have access to the
+ // cache
+ LogicalProcessors []uint32 `json:"logical_processors"`
+}
+
+func (c *Cache) String() string {
+ sizeKb := c.SizeBytes / uint64(unitutil.KB)
+ typeStr := ""
+ if c.Type == CACHE_TYPE_INSTRUCTION {
+ typeStr = "i"
+ } else if c.Type == CACHE_TYPE_DATA {
+ typeStr = "d"
+ }
+ cacheIDStr := fmt.Sprintf("L%d%s", c.Level, typeStr)
+ processorMapStr := ""
+ if c.LogicalProcessors != nil {
+ lpStrings := make([]string, len(c.LogicalProcessors))
+ for x, lpid := range c.LogicalProcessors {
+ lpStrings[x] = strconv.Itoa(int(lpid))
+ }
+ processorMapStr = " shared with logical processors: " + strings.Join(lpStrings, ",")
+ }
+ return fmt.Sprintf(
+ "%s cache (%d KB)%s",
+ cacheIDStr,
+ sizeKb,
+ processorMapStr,
+ )
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go
new file mode 100644
index 00000000..dfb5c1f1
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_cache_linux.go
@@ -0,0 +1,188 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+func CachesForNode(ctx *context.Context, nodeID int) ([]*Cache, error) {
+ // The /sys/devices/node/nodeX directory contains a subdirectory called
+ // 'cpuX' for each logical processor assigned to the node. Each of those
+ // subdirectories containers a 'cache' subdirectory which contains a number
+ // of subdirectories beginning with 'index' and ending in the cache's
+ // internal 0-based identifier. Those subdirectories contain a number of
+ // files, including 'shared_cpu_list', 'size', and 'type' which we use to
+ // determine cache characteristics.
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+ caches := make(map[string]*Cache)
+
+ files, err := ioutil.ReadDir(path)
+ if err != nil {
+ return nil, err
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "cpu") {
+ continue
+ }
+ if filename == "cpumap" || filename == "cpulist" {
+ // There are two files in the node directory that start with 'cpu'
+ // but are not subdirectories ('cpulist' and 'cpumap'). Ignore
+ // these files.
+ continue
+ }
+ // Grab the logical processor ID by cutting the integer from the
+ // /sys/devices/system/node/nodeX/cpuX filename
+ cpuPath := filepath.Join(path, filename)
+ lpID, _ := strconv.Atoi(filename[3:])
+
+ // Inspect the caches for each logical processor. There will be a
+ // /sys/devices/system/node/nodeX/cpuX/cache directory containing a
+ // number of directories beginning with the prefix "index" followed by
+ // a number. The number indicates the level of the cache, which
+ // indicates the "distance" from the processor. Each of these
+ // directories contains information about the size of that level of
+ // cache and the processors mapped to it.
+ cachePath := filepath.Join(cpuPath, "cache")
+ if _, err = os.Stat(cachePath); errors.Is(err, os.ErrNotExist) {
+ continue
+ }
+ cacheDirFiles, err := ioutil.ReadDir(cachePath)
+ if err != nil {
+ return nil, err
+ }
+ for _, cacheDirFile := range cacheDirFiles {
+ cacheDirFileName := cacheDirFile.Name()
+ if !strings.HasPrefix(cacheDirFileName, "index") {
+ continue
+ }
+ cacheIndex, _ := strconv.Atoi(cacheDirFileName[5:])
+
+ // The cache information is repeated for each node, so here, we
+ // just ensure that we only have a one Cache object for each
+ // unique combination of level, type and processor map
+ level := memoryCacheLevel(ctx, paths, nodeID, lpID, cacheIndex)
+ cacheType := memoryCacheType(ctx, paths, nodeID, lpID, cacheIndex)
+ sharedCpuMap := memoryCacheSharedCPUMap(ctx, paths, nodeID, lpID, cacheIndex)
+ cacheKey := fmt.Sprintf("%d-%d-%s", level, cacheType, sharedCpuMap)
+
+ cache, exists := caches[cacheKey]
+ if !exists {
+ size := memoryCacheSize(ctx, paths, nodeID, lpID, level)
+ cache = &Cache{
+ Level: uint8(level),
+ Type: cacheType,
+ SizeBytes: uint64(size) * uint64(unitutil.KB),
+ LogicalProcessors: make([]uint32, 0),
+ }
+ caches[cacheKey] = cache
+ }
+ cache.LogicalProcessors = append(
+ cache.LogicalProcessors,
+ uint32(lpID),
+ )
+ }
+ }
+
+ cacheVals := make([]*Cache, len(caches))
+ x := 0
+ for _, c := range caches {
+ // ensure the cache's processor set is sorted by logical process ID
+ sort.Sort(SortByLogicalProcessorId(c.LogicalProcessors))
+ cacheVals[x] = c
+ x++
+ }
+
+ return cacheVals, nil
+}
+
+func memoryCacheLevel(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
+ levelPath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "level",
+ )
+ levelContents, err := ioutil.ReadFile(levelPath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return -1
+ }
+ // levelContents is now a []byte with the last byte being a newline
+ // character. Trim that off and convert the contents to an integer.
+ level, err := strconv.Atoi(string(levelContents[:len(levelContents)-1]))
+ if err != nil {
+ ctx.Warn("Unable to parse int from %s", levelContents)
+ return -1
+ }
+ return level
+}
+
+func memoryCacheSize(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) int {
+ sizePath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "size",
+ )
+ sizeContents, err := ioutil.ReadFile(sizePath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return -1
+ }
+ // size comes as XK\n, so we trim off the K and the newline.
+ size, err := strconv.Atoi(string(sizeContents[:len(sizeContents)-2]))
+ if err != nil {
+ ctx.Warn("Unable to parse int from %s", sizeContents)
+ return -1
+ }
+ return size
+}
+
+func memoryCacheType(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) CacheType {
+ typePath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "type",
+ )
+ cacheTypeContents, err := ioutil.ReadFile(typePath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return CACHE_TYPE_UNIFIED
+ }
+ switch string(cacheTypeContents[:len(cacheTypeContents)-1]) {
+ case "Data":
+ return CACHE_TYPE_DATA
+ case "Instruction":
+ return CACHE_TYPE_INSTRUCTION
+ default:
+ return CACHE_TYPE_UNIFIED
+ }
+}
+
+func memoryCacheSharedCPUMap(ctx *context.Context, paths *linuxpath.Paths, nodeID int, lpID int, cacheIndex int) string {
+ scpuPath := filepath.Join(
+ paths.NodeCPUCacheIndex(nodeID, lpID, cacheIndex),
+ "shared_cpu_map",
+ )
+ sharedCpuMap, err := ioutil.ReadFile(scpuPath)
+ if err != nil {
+ ctx.Warn("%s", err)
+ return ""
+ }
+ return string(sharedCpuMap[:len(sharedCpuMap)-1])
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go
new file mode 100644
index 00000000..4b7631a1
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_linux.go
@@ -0,0 +1,299 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "bufio"
+ "compress/gzip"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/unitutil"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ _WARN_CANNOT_DETERMINE_PHYSICAL_MEMORY = `
+Could not determine total physical bytes of memory. This may
+be due to the host being a virtual machine or container with no
+/var/log/syslog file or /sys/devices/system/memory directory, or
+the current user may not have necessary privileges to read the syslog.
+We are falling back to setting the total physical amount of memory to
+the total usable amount of memory
+`
+)
+
+var (
+ // System log lines will look similar to the following:
+ // ... kernel: [0.000000] Memory: 24633272K/25155024K ...
+ _REGEX_SYSLOG_MEMLINE = regexp.MustCompile(`Memory:\s+\d+K\/(\d+)K`)
+)
+
+func (i *Info) load() error {
+ paths := linuxpath.New(i.ctx)
+ tub := memTotalUsableBytes(paths)
+ if tub < 1 {
+ return fmt.Errorf("Could not determine total usable bytes of memory")
+ }
+ i.TotalUsableBytes = tub
+ tpb := memTotalPhysicalBytes(paths)
+ i.TotalPhysicalBytes = tpb
+ if tpb < 1 {
+ i.ctx.Warn(_WARN_CANNOT_DETERMINE_PHYSICAL_MEMORY)
+ i.TotalPhysicalBytes = tub
+ }
+ i.SupportedPageSizes, _ = memorySupportedPageSizes(paths.SysKernelMMHugepages)
+ return nil
+}
+
+func AreaForNode(ctx *context.Context, nodeID int) (*Area, error) {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ )
+
+ blockSizeBytes, err := memoryBlockSizeBytes(paths.SysDevicesSystemMemory)
+ if err != nil {
+ return nil, err
+ }
+
+ totPhys, err := memoryTotalPhysicalBytesFromPath(path, blockSizeBytes)
+ if err != nil {
+ return nil, err
+ }
+
+ totUsable, err := memoryTotalUsableBytesFromPath(filepath.Join(path, "meminfo"))
+ if err != nil {
+ return nil, err
+ }
+
+ supportedHP, err := memorySupportedPageSizes(filepath.Join(path, "hugepages"))
+ if err != nil {
+ return nil, err
+ }
+
+ return &Area{
+ TotalPhysicalBytes: totPhys,
+ TotalUsableBytes: totUsable,
+ SupportedPageSizes: supportedHP,
+ }, nil
+}
+
+func memoryBlockSizeBytes(dir string) (uint64, error) {
+ // get the memory block size in byte in hexadecimal notation
+ blockSize := filepath.Join(dir, "block_size_bytes")
+
+ d, err := ioutil.ReadFile(blockSize)
+ if err != nil {
+ return 0, err
+ }
+ return strconv.ParseUint(strings.TrimSpace(string(d)), 16, 64)
+}
+
+func memTotalPhysicalBytes(paths *linuxpath.Paths) (total int64) {
+ defer func() {
+ // fallback to the syslog file approach in case of error
+ if total < 0 {
+ total = memTotalPhysicalBytesFromSyslog(paths)
+ }
+ }()
+
+ // detect physical memory from /sys/devices/system/memory
+ dir := paths.SysDevicesSystemMemory
+ blockSizeBytes, err := memoryBlockSizeBytes(dir)
+ if err != nil {
+ total = -1
+ return total
+ }
+
+ total, err = memoryTotalPhysicalBytesFromPath(dir, blockSizeBytes)
+ if err != nil {
+ total = -1
+ }
+ return total
+}
+
+func memoryTotalPhysicalBytesFromPath(dir string, blockSizeBytes uint64) (int64, error) {
+ // iterate over memory's block /sys/.../memory*,
+ // if the memory block state is 'online' we increment the total
+ // with the memory block size to determine the amount of physical
+ // memory available on this system.
+ // This works for both system-wide:
+ // /sys/devices/system/memory/memory*
+ // and for per-numa-node report:
+ // /sys/devices/system/node/node*/memory*
+
+ sysMemory, err := filepath.Glob(filepath.Join(dir, "memory*"))
+ if err != nil {
+ return -1, err
+ } else if sysMemory == nil {
+ return -1, fmt.Errorf("cannot find memory entries in %q", dir)
+ }
+
+ var total int64
+ for _, path := range sysMemory {
+ s, err := ioutil.ReadFile(filepath.Join(path, "state"))
+ if err != nil {
+ return -1, err
+ }
+ if strings.TrimSpace(string(s)) != "online" {
+ continue
+ }
+ total += int64(blockSizeBytes)
+ }
+ return total, nil
+}
+
+func memTotalPhysicalBytesFromSyslog(paths *linuxpath.Paths) int64 {
+ // In Linux, the total physical memory can be determined by looking at the
+ // output of dmidecode, however dmidecode requires root privileges to run,
+ // so instead we examine the system logs for startup information containing
+ // total physical memory and cache the results of this.
+ findPhysicalKb := func(line string) int64 {
+ matches := _REGEX_SYSLOG_MEMLINE.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ i, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return -1
+ }
+ return int64(i * 1024)
+ }
+ return -1
+ }
+
+ // /var/log will contain a file called syslog and 0 or more files called
+ // syslog.$NUMBER or syslog.$NUMBER.gz containing system log records. We
+ // search each, stopping when we match a system log record line that
+ // contains physical memory information.
+ logDir := paths.VarLog
+ logFiles, err := ioutil.ReadDir(logDir)
+ if err != nil {
+ return -1
+ }
+ for _, file := range logFiles {
+ if strings.HasPrefix(file.Name(), "syslog") {
+ fullPath := filepath.Join(logDir, file.Name())
+ unzip := strings.HasSuffix(file.Name(), ".gz")
+ var r io.ReadCloser
+ r, err = os.Open(fullPath)
+ if err != nil {
+ return -1
+ }
+ defer util.SafeClose(r)
+ if unzip {
+ r, err = gzip.NewReader(r)
+ if err != nil {
+ return -1
+ }
+ }
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ size := findPhysicalKb(line)
+ if size > 0 {
+ return size
+ }
+ }
+ }
+ }
+ return -1
+}
+
+func memTotalUsableBytes(paths *linuxpath.Paths) int64 {
+ amount, err := memoryTotalUsableBytesFromPath(paths.ProcMeminfo)
+ if err != nil {
+ return -1
+ }
+ return amount
+}
+
+func memoryTotalUsableBytesFromPath(meminfoPath string) (int64, error) {
+ // In Linux, /proc/meminfo or its close relative
+ // /sys/devices/system/node/node*/meminfo
+ // contains a set of memory-related amounts, with
+ // lines looking like the following:
+ //
+ // $ cat /proc/meminfo
+ // MemTotal: 24677596 kB
+ // MemFree: 21244356 kB
+ // MemAvailable: 22085432 kB
+ // ...
+ // HugePages_Total: 0
+ // HugePages_Free: 0
+ // HugePages_Rsvd: 0
+ // HugePages_Surp: 0
+ // ...
+ //
+ // It's worth noting that /proc/meminfo returns exact information, not
+ // "theoretical" information. For instance, on the above system, I have
+ // 24GB of RAM but MemTotal is indicating only around 23GB. This is because
+ // MemTotal contains the exact amount of *usable* memory after accounting
+ // for the kernel's resident memory size and a few reserved bits.
+ // Please note GHW cares about the subset of lines shared between system-wide
+ // and per-NUMA-node meminfos. For more information, see:
+ //
+ // https://www.kernel.org/doc/Documentation/filesystems/proc.txt
+ r, err := os.Open(meminfoPath)
+ if err != nil {
+ return -1, err
+ }
+ defer util.SafeClose(r)
+
+ scanner := bufio.NewScanner(r)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := strings.Split(line, ":")
+ key := parts[0]
+ if !strings.Contains(key, "MemTotal") {
+ continue
+ }
+ rawValue := parts[1]
+ inKb := strings.HasSuffix(rawValue, "kB")
+ value, err := strconv.Atoi(strings.TrimSpace(strings.TrimSuffix(rawValue, "kB")))
+ if err != nil {
+ return -1, err
+ }
+ if inKb {
+ value = value * int(unitutil.KB)
+ }
+ return int64(value), nil
+ }
+ return -1, fmt.Errorf("failed to find MemTotal entry in path %q", meminfoPath)
+}
+
+func memorySupportedPageSizes(hpDir string) ([]uint64, error) {
+ // In Linux, /sys/kernel/mm/hugepages contains a directory per page size
+ // supported by the kernel. The directory name corresponds to the pattern
+ // 'hugepages-{pagesize}kb'
+ out := make([]uint64, 0)
+
+ files, err := ioutil.ReadDir(hpDir)
+ if err != nil {
+ return out, err
+ }
+ for _, file := range files {
+ parts := strings.Split(file.Name(), "-")
+ sizeStr := parts[1]
+ // Cut off the 'kb'
+ sizeStr = sizeStr[0 : len(sizeStr)-2]
+ size, err := strconv.Atoi(sizeStr)
+ if err != nil {
+ return out, err
+ }
+ out = append(out, uint64(size*int(unitutil.KB)))
+ }
+ return out, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go
new file mode 100644
index 00000000..6ce99e00
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("mem.Info.load not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go
new file mode 100644
index 00000000..c3a3945c
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/memory/memory_windows.go
@@ -0,0 +1,72 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package memory
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/unitutil"
+)
+
+const wqlOperatingSystem = "SELECT TotalVisibleMemorySize FROM Win32_OperatingSystem"
+
+type win32OperatingSystem struct {
+ TotalVisibleMemorySize *uint64
+}
+
+const wqlPhysicalMemory = "SELECT BankLabel, Capacity, DataWidth, Description, DeviceLocator, Manufacturer, Model, Name, PartNumber, PositionInRow, SerialNumber, Speed, Tag, TotalWidth FROM Win32_PhysicalMemory"
+
+type win32PhysicalMemory struct {
+ BankLabel *string
+ Capacity *uint64
+ DataWidth *uint16
+ Description *string
+ DeviceLocator *string
+ Manufacturer *string
+ Model *string
+ Name *string
+ PartNumber *string
+ PositionInRow *uint32
+ SerialNumber *string
+ Speed *uint32
+ Tag *string
+ TotalWidth *uint16
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32OSDescriptions []win32OperatingSystem
+ if err := wmi.Query(wqlOperatingSystem, &win32OSDescriptions); err != nil {
+ return err
+ }
+ var win32MemDescriptions []win32PhysicalMemory
+ if err := wmi.Query(wqlPhysicalMemory, &win32MemDescriptions); err != nil {
+ return err
+ }
+ // We calculate total physical memory size by summing the DIMM sizes
+ var totalPhysicalBytes uint64
+ i.Modules = make([]*Module, 0, len(win32MemDescriptions))
+ for _, description := range win32MemDescriptions {
+ totalPhysicalBytes += *description.Capacity
+ i.Modules = append(i.Modules, &Module{
+ Label: *description.BankLabel,
+ Location: *description.DeviceLocator,
+ SerialNumber: *description.SerialNumber,
+ SizeBytes: int64(*description.Capacity),
+ Vendor: *description.Manufacturer,
+ })
+ }
+ var totalUsableBytes uint64
+ for _, description := range win32OSDescriptions {
+ // TotalVisibleMemorySize is the amount of memory available for us by
+ // the operating system **in Kilobytes**
+ totalUsableBytes += *description.TotalVisibleMemorySize * uint64(unitutil.KB)
+ }
+ i.TotalUsableBytes = int64(totalUsableBytes)
+ i.TotalPhysicalBytes = int64(totalPhysicalBytes)
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net.go b/vendor/github.com/jaypipes/ghw/pkg/net/net.go
new file mode 100644
index 00000000..8994d112
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net.go
@@ -0,0 +1,83 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+type NICCapability struct {
+ Name string `json:"name"`
+ IsEnabled bool `json:"is_enabled"`
+ CanEnable bool `json:"can_enable"`
+}
+
+type NIC struct {
+ Name string `json:"name"`
+ MacAddress string `json:"mac_address"`
+ IsVirtual bool `json:"is_virtual"`
+ Capabilities []*NICCapability `json:"capabilities"`
+ PCIAddress *string `json:"pci_address,omitempty"`
+ // TODO(fromani): add other hw addresses (USB) when we support them
+}
+
+func (n *NIC) String() string {
+ isVirtualStr := ""
+ if n.IsVirtual {
+ isVirtualStr = " (virtual)"
+ }
+ return fmt.Sprintf(
+ "%s%s",
+ n.Name,
+ isVirtualStr,
+ )
+}
+
+type Info struct {
+ ctx *context.Context
+ NICs []*NIC `json:"nics"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// network interface controllers (NICs) on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ return fmt.Sprintf(
+ "net (%d NICs)",
+ len(i.NICs),
+ )
+}
+
+// simple private struct used to encapsulate net information in a
+// top-level "net" YAML/JSON map/object key
+type netPrinter struct {
+ Info *Info `json:"network"`
+}
+
+// YAMLString returns a string with the net information formatted as YAML
+// under a top-level "net:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, netPrinter{i})
+}
+
+// JSONString returns a string with the net information formatted as JSON
+// under a top-level "net:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, netPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go
new file mode 100644
index 00000000..1b338dfa
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_linux.go
@@ -0,0 +1,222 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+)
+
+const (
+ _WARN_ETHTOOL_NOT_INSTALLED = `ethtool not installed. Cannot grab NIC capabilities`
+)
+
+func (i *Info) load() error {
+ i.NICs = nics(i.ctx)
+ return nil
+}
+
+func nics(ctx *context.Context) []*NIC {
+ nics := make([]*NIC, 0)
+
+ paths := linuxpath.New(ctx)
+ files, err := ioutil.ReadDir(paths.SysClassNet)
+ if err != nil {
+ return nics
+ }
+
+ etAvailable := ctx.EnableTools
+ if etAvailable {
+ if etInstalled := ethtoolInstalled(); !etInstalled {
+ ctx.Warn(_WARN_ETHTOOL_NOT_INSTALLED)
+ etAvailable = false
+ }
+ }
+
+ for _, file := range files {
+ filename := file.Name()
+ // Ignore loopback...
+ if filename == "lo" {
+ continue
+ }
+
+ netPath := filepath.Join(paths.SysClassNet, filename)
+ dest, _ := os.Readlink(netPath)
+ isVirtual := false
+ if strings.Contains(dest, "devices/virtual/net") {
+ isVirtual = true
+ }
+
+ nic := &NIC{
+ Name: filename,
+ IsVirtual: isVirtual,
+ }
+
+ mac := netDeviceMacAddress(paths, filename)
+ nic.MacAddress = mac
+ if etAvailable {
+ nic.Capabilities = netDeviceCapabilities(ctx, filename)
+ } else {
+ nic.Capabilities = []*NICCapability{}
+ }
+
+ nic.PCIAddress = netDevicePCIAddress(paths.SysClassNet, filename)
+
+ nics = append(nics, nic)
+ }
+ return nics
+}
+
+func netDeviceMacAddress(paths *linuxpath.Paths, dev string) string {
+ // Instead of use udevadm, we can get the device's MAC address by examing
+ // the /sys/class/net/$DEVICE/address file in sysfs. However, for devices
+ // that have addr_assign_type != 0, return None since the MAC address is
+ // random.
+ aatPath := filepath.Join(paths.SysClassNet, dev, "addr_assign_type")
+ contents, err := ioutil.ReadFile(aatPath)
+ if err != nil {
+ return ""
+ }
+ if strings.TrimSpace(string(contents)) != "0" {
+ return ""
+ }
+ addrPath := filepath.Join(paths.SysClassNet, dev, "address")
+ contents, err = ioutil.ReadFile(addrPath)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(contents))
+}
+
+func ethtoolInstalled() bool {
+ _, err := exec.LookPath("ethtool")
+ return err == nil
+}
+
+func netDeviceCapabilities(ctx *context.Context, dev string) []*NICCapability {
+ caps := make([]*NICCapability, 0)
+ path, _ := exec.LookPath("ethtool")
+ cmd := exec.Command(path, "-k", dev)
+ var out bytes.Buffer
+ cmd.Stdout = &out
+ err := cmd.Run()
+ if err != nil {
+ msg := fmt.Sprintf("could not grab NIC capabilities for %s: %s", dev, err)
+ ctx.Warn(msg)
+ return caps
+ }
+
+ // The out variable will now contain something that looks like the
+ // following.
+ //
+ // Features for enp58s0f1:
+ // rx-checksumming: on
+ // tx-checksumming: off
+ // tx-checksum-ipv4: off
+ // tx-checksum-ip-generic: off [fixed]
+ // tx-checksum-ipv6: off
+ // tx-checksum-fcoe-crc: off [fixed]
+ // tx-checksum-sctp: off [fixed]
+ // scatter-gather: off
+ // tx-scatter-gather: off
+ // tx-scatter-gather-fraglist: off [fixed]
+ // tcp-segmentation-offload: off
+ // tx-tcp-segmentation: off
+ // tx-tcp-ecn-segmentation: off [fixed]
+ // tx-tcp-mangleid-segmentation: off
+ // tx-tcp6-segmentation: off
+ // < snipped >
+ scanner := bufio.NewScanner(&out)
+ // Skip the first line...
+ scanner.Scan()
+ for scanner.Scan() {
+ line := strings.TrimPrefix(scanner.Text(), "\t")
+ caps = append(caps, netParseEthtoolFeature(line))
+ }
+ return caps
+}
+
+// netParseEthtoolFeature parses a line from the ethtool -k output and returns
+// a NICCapability.
+//
+// The supplied line will look like the following:
+//
+// tx-checksum-ip-generic: off [fixed]
+//
+// [fixed] indicates that the feature may not be turned on/off. Note: it makes
+// no difference whether a privileged user runs `ethtool -k` when determining
+// whether [fixed] appears for a feature.
+func netParseEthtoolFeature(line string) *NICCapability {
+ parts := strings.Fields(line)
+ cap := strings.TrimSuffix(parts[0], ":")
+ enabled := parts[1] == "on"
+ fixed := len(parts) == 3 && parts[2] == "[fixed]"
+ return &NICCapability{
+ Name: cap,
+ IsEnabled: enabled,
+ CanEnable: !fixed,
+ }
+}
+
+func netDevicePCIAddress(netDevDir, netDevName string) *string {
+ // what we do here is not that hard in the end: we need to navigate the sysfs
+ // up to the directory belonging to the device backing the network interface.
+ // we can make few relatively safe assumptions, but the safest way is follow
+ // the right links. And so we go.
+ // First of all, knowing the network device name we need to resolve the backing
+ // device path to its full sysfs path.
+ // say we start with netDevDir="/sys/class/net" and netDevName="enp0s31f6"
+ netPath := filepath.Join(netDevDir, netDevName)
+ dest, err := os.Readlink(netPath)
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // now we have something like dest="../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // remember the path is relative to netDevDir="/sys/class/net"
+
+ netDev := filepath.Clean(filepath.Join(netDevDir, dest))
+ // so we clean "/sys/class/net/../../devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // leading to "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6"
+ // still not there. We need to access the data of the pci device. So we jump into the path
+ // linked to the "device" pseudofile
+ dest, err = os.Readlink(filepath.Join(netDev, "device"))
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // we expect something like="../../../0000:00:1f.6"
+
+ devPath := filepath.Clean(filepath.Join(netDev, dest))
+ // so we clean "/sys/devices/pci0000:00/0000:00:1f.6/net/enp0s31f6/../../../0000:00:1f.6"
+ // leading to "/sys/devices/pci0000:00/0000:00:1f.6/"
+ // finally here!
+
+ // to which bus is this device connected to?
+ dest, err = os.Readlink(filepath.Join(devPath, "subsystem"))
+ if err != nil {
+ // bail out with empty value
+ return nil
+ }
+ // ok, this is hacky, but since we need the last *two* path components and we know we
+ // are running on linux...
+ if !strings.HasSuffix(dest, "/bus/pci") {
+ // unsupported and unexpected bus!
+ return nil
+ }
+
+ pciAddr := filepath.Base(devPath)
+ return &pciAddr
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go
new file mode 100644
index 00000000..c8dfa090
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("netFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go
new file mode 100644
index 00000000..0b46aa56
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/net/net_windows.go
@@ -0,0 +1,74 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package net
+
+import (
+ "strings"
+
+ "github.com/StackExchange/wmi"
+)
+
+const wqlNetworkAdapter = "SELECT Description, DeviceID, Index, InterfaceIndex, MACAddress, Manufacturer, Name, NetConnectionID, ProductName, ServiceName, PhysicalAdapter FROM Win32_NetworkAdapter"
+
+type win32NetworkAdapter struct {
+ Description *string
+ DeviceID *string
+ Index *uint32
+ InterfaceIndex *uint32
+ MACAddress *string
+ Manufacturer *string
+ Name *string
+ NetConnectionID *string
+ ProductName *string
+ ServiceName *string
+ PhysicalAdapter *bool
+}
+
+func (i *Info) load() error {
+ // Getting info from WMI
+ var win32NetDescriptions []win32NetworkAdapter
+ if err := wmi.Query(wqlNetworkAdapter, &win32NetDescriptions); err != nil {
+ return err
+ }
+
+ i.NICs = nics(win32NetDescriptions)
+ return nil
+}
+
+func nics(win32NetDescriptions []win32NetworkAdapter) []*NIC {
+ // Converting into standard structures
+ nics := make([]*NIC, 0)
+ for _, nicDescription := range win32NetDescriptions {
+ nic := &NIC{
+ Name: netDeviceName(nicDescription),
+ MacAddress: *nicDescription.MACAddress,
+ IsVirtual: netIsVirtual(nicDescription),
+ Capabilities: []*NICCapability{},
+ }
+ // Appenging NIC to NICs
+ nics = append(nics, nic)
+ }
+
+ return nics
+}
+
+func netDeviceName(description win32NetworkAdapter) string {
+ var name string
+ if strings.TrimSpace(*description.NetConnectionID) != "" {
+ name = *description.NetConnectionID + " - " + *description.Description
+ } else {
+ name = *description.Description
+ }
+ return name
+}
+
+func netIsVirtual(description win32NetworkAdapter) bool {
+ if description.PhysicalAdapter == nil {
+ return false
+ }
+
+ return !(*description.PhysicalAdapter)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/option/option.go b/vendor/github.com/jaypipes/ghw/pkg/option/option.go
new file mode 100644
index 00000000..6cd231de
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/option/option.go
@@ -0,0 +1,259 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package option
+
+import (
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+)
+
+const (
+ DefaultChroot = "/"
+)
+
+const (
+ envKeyChroot = "GHW_CHROOT"
+ envKeyDisableWarnings = "GHW_DISABLE_WARNINGS"
+ envKeyDisableTools = "GHW_DISABLE_TOOLS"
+ envKeySnapshotPath = "GHW_SNAPSHOT_PATH"
+ envKeySnapshotRoot = "GHW_SNAPSHOT_ROOT"
+ envKeySnapshotExclusive = "GHW_SNAPSHOT_EXCLUSIVE"
+ envKeySnapshotPreserve = "GHW_SNAPSHOT_PRESERVE"
+)
+
+// Alerter emits warnings about undesirable but recoverable errors.
+// We use a subset of a logger interface only to emit warnings, and
+// `Warninger` sounded ugly.
+type Alerter interface {
+ Printf(format string, v ...interface{})
+}
+
+var (
+ NullAlerter = log.New(ioutil.Discard, "", 0)
+)
+
+// EnvOrDefaultAlerter returns the default instance ghw will use to emit
+// its warnings. ghw will emit warnings to stderr by default unless the
+// environs variable GHW_DISABLE_WARNINGS is specified; in the latter case
+// all warning will be suppressed.
+func EnvOrDefaultAlerter() Alerter {
+ var dest io.Writer
+ if _, exists := os.LookupEnv(envKeyDisableWarnings); exists {
+ dest = ioutil.Discard
+ } else {
+ // default
+ dest = os.Stderr
+ }
+ return log.New(dest, "", 0)
+}
+
+// EnvOrDefaultChroot returns the value of the GHW_CHROOT environs variable or
+// the default value of "/" if not set
+func EnvOrDefaultChroot() string {
+ // Grab options from the environs by default
+ if val, exists := os.LookupEnv(envKeyChroot); exists {
+ return val
+ }
+ return DefaultChroot
+}
+
+// EnvOrDefaultSnapshotPath returns the value of the GHW_SNAPSHOT_PATH environs variable
+// or the default value of "" (disable snapshot consumption) if not set
+func EnvOrDefaultSnapshotPath() string {
+ if val, exists := os.LookupEnv(envKeySnapshotPath); exists {
+ return val
+ }
+ return "" // default is no snapshot
+}
+
+// EnvOrDefaultSnapshotRoot returns the value of the the GHW_SNAPSHOT_ROOT environs variable
+// or the default value of "" (self-manage the snapshot unpack directory, if relevant) if not set
+func EnvOrDefaultSnapshotRoot() string {
+ if val, exists := os.LookupEnv(envKeySnapshotRoot); exists {
+ return val
+ }
+ return "" // default is to self-manage the snapshot directory
+}
+
+// EnvOrDefaultSnapshotExclusive returns the value of the GHW_SNAPSHOT_EXCLUSIVE environs variable
+// or the default value of false if not set
+func EnvOrDefaultSnapshotExclusive() bool {
+ if _, exists := os.LookupEnv(envKeySnapshotExclusive); exists {
+ return true
+ }
+ return false
+}
+
+// EnvOrDefaultSnapshotPreserve returns the value of the GHW_SNAPSHOT_PRESERVE environs variable
+// or the default value of false if not set
+func EnvOrDefaultSnapshotPreserve() bool {
+ if _, exists := os.LookupEnv(envKeySnapshotPreserve); exists {
+ return true
+ }
+ return false
+}
+
+// EnvOrDefaultTools return true if ghw should use external tools to augment the data collected
+// from sysfs. Most users want to do this most of time, so this is enabled by default.
+// Users consuming snapshots may want to opt out, thus they can set the GHW_DISABLE_TOOLS
+// environs variable to any value to make ghw skip calling external tools even if they are available.
+func EnvOrDefaultTools() bool {
+ if _, exists := os.LookupEnv(envKeyDisableTools); exists {
+ return false
+ }
+ return true
+}
+
+// Option is used to represent optionally-configured settings. Each field is a
+// pointer to some concrete value so that we can tell when something has been
+// set or left unset.
+type Option struct {
+ // To facilitate querying of sysfs filesystems that are bind-mounted to a
+ // non-default root mountpoint, we allow users to set the GHW_CHROOT environ
+ // variable to an alternate mountpoint. For instance, assume that the user of
+ // ghw is a Golang binary being executed from an application container that has
+ // certain host filesystems bind-mounted into the container at /host. The user
+ // would ensure the GHW_CHROOT environ variable is set to "/host" and ghw will
+ // build its paths from that location instead of /
+ Chroot *string
+
+ // Snapshot contains options for handling ghw snapshots
+ Snapshot *SnapshotOptions
+
+ // Alerter contains the target for ghw warnings
+ Alerter Alerter
+
+ // EnableTools optionally request ghw to not call any external program to learn
+ // about the hardware. The default is to use such tools if available.
+ EnableTools *bool
+
+ // PathOverrides optionally allows to override the default paths ghw uses internally
+ // to learn about the system resources.
+ PathOverrides PathOverrides
+
+ // Context may contain a pointer to a `Context` struct that is constructed
+ // during a call to the `context.WithContext` function. Only used internally.
+ // This is an interface to get around recursive package import issues.
+ Context interface{}
+}
+
+// SnapshotOptions contains options for handling of ghw snapshots
+type SnapshotOptions struct {
+ // Path allows users to specify a snapshot (captured using ghw-snapshot) to be
+ // automatically consumed. Users need to supply the path of the snapshot, and
+ // ghw will take care of unpacking it on a temporary directory.
+ // Set the environment variable "GHW_SNAPSHOT_PRESERVE" to make ghw skip the cleanup
+ // stage and keep the unpacked snapshot in the temporary directory.
+ Path string
+ // Root is the directory on which the snapshot must be unpacked. This allows
+ // the users to manage their snapshot directory instead of ghw doing that on
+ // their behalf. Relevant only if SnapshotPath is given.
+ Root *string
+ // Exclusive tells ghw if the given directory should be considered of exclusive
+ // usage of ghw or not If the user provides a Root. If the flag is set, ghw will
+ // unpack the snapshot in the given SnapshotRoot iff the directory is empty; otherwise
+ // any existing content will be left untouched and the unpack stage will exit silently.
+ // As additional side effect, give both this option and SnapshotRoot to make each
+ // context try to unpack the snapshot only once.
+ Exclusive bool
+}
+
+// WithChroot allows to override the root directory ghw uses.
+func WithChroot(dir string) *Option {
+ return &Option{Chroot: &dir}
+}
+
+// WithSnapshot sets snapshot-processing options for a ghw run
+func WithSnapshot(opts SnapshotOptions) *Option {
+ return &Option{
+ Snapshot: &opts,
+ }
+}
+
+// WithAlerter sets alerting options for ghw
+func WithAlerter(alerter Alerter) *Option {
+ return &Option{
+ Alerter: alerter,
+ }
+}
+
+// WithNullAlerter sets No-op alerting options for ghw
+func WithNullAlerter() *Option {
+ return &Option{
+ Alerter: NullAlerter,
+ }
+}
+
+// WithDisableTools sets enables or prohibts ghw to call external tools to discover hardware capabilities.
+func WithDisableTools() *Option {
+ false_ := false
+ return &Option{EnableTools: &false_}
+}
+
+// PathOverrides is a map, keyed by the string name of a mount path, of override paths
+type PathOverrides map[string]string
+
+// WithPathOverrides supplies path-specific overrides for the context
+func WithPathOverrides(overrides PathOverrides) *Option {
+ return &Option{
+ PathOverrides: overrides,
+ }
+}
+
+// There is intentionally no Option related to GHW_SNAPSHOT_PRESERVE because we see that as
+// a debug/troubleshoot aid more something users wants to do regularly.
+// Hence we allow that only via the environment variable for the time being.
+
+// Merge accepts one or more Options and merges them together, returning the
+// merged Option
+func Merge(opts ...*Option) *Option {
+ merged := &Option{}
+ for _, opt := range opts {
+ if opt.Chroot != nil {
+ merged.Chroot = opt.Chroot
+ }
+ if opt.Snapshot != nil {
+ merged.Snapshot = opt.Snapshot
+ }
+ if opt.Alerter != nil {
+ merged.Alerter = opt.Alerter
+ }
+ if opt.EnableTools != nil {
+ merged.EnableTools = opt.EnableTools
+ }
+ // intentionally only programmatically
+ if opt.PathOverrides != nil {
+ merged.PathOverrides = opt.PathOverrides
+ }
+ if opt.Context != nil {
+ merged.Context = opt.Context
+ }
+ }
+ // Set the default value if missing from mergeOpts
+ if merged.Chroot == nil {
+ chroot := EnvOrDefaultChroot()
+ merged.Chroot = &chroot
+ }
+ if merged.Alerter == nil {
+ merged.Alerter = EnvOrDefaultAlerter()
+ }
+ if merged.Snapshot == nil {
+ snapRoot := EnvOrDefaultSnapshotRoot()
+ merged.Snapshot = &SnapshotOptions{
+ Path: EnvOrDefaultSnapshotPath(),
+ Root: &snapRoot,
+ Exclusive: EnvOrDefaultSnapshotExclusive(),
+ }
+ }
+ if merged.EnableTools == nil {
+ enabled := EnvOrDefaultTools()
+ merged.EnableTools = &enabled
+ }
+ return merged
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go
new file mode 100644
index 00000000..6a8a4e45
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/address/address.go
@@ -0,0 +1,55 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package address
+
+import (
+ "regexp"
+ "strings"
+)
+
+var (
+ regexAddress *regexp.Regexp = regexp.MustCompile(
+ `^(([0-9a-f]{0,4}):)?([0-9a-f]{2}):([0-9a-f]{2})\.([0-9a-f]{1})$`,
+ )
+)
+
+// Address contains the components of a PCI Address
+type Address struct {
+ Domain string
+ Bus string
+ Device string
+ Function string
+}
+
+// String() returns the canonical [D]BDF representation of this Address
+func (addr *Address) String() string {
+ return addr.Domain + ":" + addr.Bus + ":" + addr.Device + "." + addr.Function
+}
+
+// FromString returns an Address struct from an ddress string in either
+// $BUS:$DEVICE.$FUNCTION (BDF) format or it can be a full PCI address that
+// includes the 4-digit $DOMAIN information as well:
+// $DOMAIN:$BUS:$DEVICE.$FUNCTION.
+//
+// Returns "" if the address string wasn't a valid PCI address.
+func FromString(address string) *Address {
+ addrLowered := strings.ToLower(address)
+ matches := regexAddress.FindStringSubmatch(addrLowered)
+ if len(matches) == 6 {
+ dom := "0000"
+ if matches[1] != "" {
+ dom = matches[2]
+ }
+ return &Address{
+ Domain: dom,
+ Bus: matches[3],
+ Device: matches[4],
+ Function: matches[5],
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go
new file mode 100644
index 00000000..86cc7b25
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci.go
@@ -0,0 +1,211 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "encoding/json"
+ "fmt"
+
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// backward compatibility, to be removed in 1.0.0
+type Address pciaddr.Address
+
+// backward compatibility, to be removed in 1.0.0
+var AddressFromString = pciaddr.FromString
+
+type Device struct {
+ // The PCI address of the device
+ Address string `json:"address"`
+ Vendor *pcidb.Vendor `json:"vendor"`
+ Product *pcidb.Product `json:"product"`
+ Revision string `json:"revision"`
+ Subsystem *pcidb.Product `json:"subsystem"`
+ // optional subvendor/sub-device information
+ Class *pcidb.Class `json:"class"`
+ // optional sub-class for the device
+ Subclass *pcidb.Subclass `json:"subclass"`
+ // optional programming interface
+ ProgrammingInterface *pcidb.ProgrammingInterface `json:"programming_interface"`
+ // Topology node that the PCI device is affined to. Will be nil if the
+ // architecture is not NUMA.
+ Node *topology.Node `json:"node,omitempty"`
+ Driver string `json:"driver"`
+}
+
+type devIdent struct {
+ ID string `json:"id"`
+ Name string `json:"name"`
+}
+
+type devMarshallable struct {
+ Driver string `json:"driver"`
+ Address string `json:"address"`
+ Vendor devIdent `json:"vendor"`
+ Product devIdent `json:"product"`
+ Revision string `json:"revision"`
+ Subsystem devIdent `json:"subsystem"`
+ Class devIdent `json:"class"`
+ Subclass devIdent `json:"subclass"`
+ Interface devIdent `json:"programming_interface"`
+}
+
+// NOTE(jaypipes) Device has a custom JSON marshaller because we don't want
+// to serialize the entire PCIDB information for the Vendor (which includes all
+// of the vendor's products, etc). Instead, we simply serialize the ID and
+// human-readable name of the vendor, product, class, etc.
+func (d *Device) MarshalJSON() ([]byte, error) {
+ dm := devMarshallable{
+ Driver: d.Driver,
+ Address: d.Address,
+ Vendor: devIdent{
+ ID: d.Vendor.ID,
+ Name: d.Vendor.Name,
+ },
+ Product: devIdent{
+ ID: d.Product.ID,
+ Name: d.Product.Name,
+ },
+ Revision: d.Revision,
+ Subsystem: devIdent{
+ ID: d.Subsystem.ID,
+ Name: d.Subsystem.Name,
+ },
+ Class: devIdent{
+ ID: d.Class.ID,
+ Name: d.Class.Name,
+ },
+ Subclass: devIdent{
+ ID: d.Subclass.ID,
+ Name: d.Subclass.Name,
+ },
+ Interface: devIdent{
+ ID: d.ProgrammingInterface.ID,
+ Name: d.ProgrammingInterface.Name,
+ },
+ }
+ return json.Marshal(dm)
+}
+
+func (d *Device) String() string {
+ vendorName := util.UNKNOWN
+ if d.Vendor != nil {
+ vendorName = d.Vendor.Name
+ }
+ productName := util.UNKNOWN
+ if d.Product != nil {
+ productName = d.Product.Name
+ }
+ className := util.UNKNOWN
+ if d.Class != nil {
+ className = d.Class.Name
+ }
+ return fmt.Sprintf(
+ "%s -> driver: '%s' class: '%s' vendor: '%s' product: '%s'",
+ d.Address,
+ d.Driver,
+ className,
+ vendorName,
+ productName,
+ )
+}
+
+type Info struct {
+ arch topology.Architecture
+ ctx *context.Context
+ // All PCI devices on the host system
+ Devices []*Device
+ // hash of class ID -> class information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Classes map[string]*pcidb.Class `json:"-"`
+ // hash of vendor ID -> vendor information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Vendors map[string]*pcidb.Vendor `json:"-"`
+ // hash of vendor ID + product/device ID -> product information
+ // DEPRECATED. Will be removed in v1.0. Please use
+ // github.com/jaypipes/pcidb to explore PCIDB information
+ Products map[string]*pcidb.Product `json:"-"`
+}
+
+func (i *Info) String() string {
+ return fmt.Sprintf("PCI (%d devices)", len(i.Devices))
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// PCI devices on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ merged := option.Merge(opts...)
+ ctx := context.New(merged)
+ // by default we don't report NUMA information;
+ // we will only if are sure we are running on NUMA architecture
+ info := &Info{
+ arch: topology.ARCHITECTURE_SMP,
+ ctx: ctx,
+ }
+
+ // we do this trick because we need to make sure ctx.Setup() gets
+ // a chance to run before any subordinate package is created reusing
+ // our context.
+ loadDetectingTopology := func() error {
+ topo, err := topology.New(context.WithContext(ctx))
+ if err == nil {
+ info.arch = topo.Architecture
+ } else {
+ ctx.Warn("error detecting system topology: %v", err)
+ }
+ return info.load()
+ }
+
+ var err error
+ if context.Exists(merged) {
+ err = loadDetectingTopology()
+ } else {
+ err = ctx.Do(loadDetectingTopology)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// lookupDevice gets a device from cached data
+func (info *Info) lookupDevice(address string) *Device {
+ for _, dev := range info.Devices {
+ if dev.Address == address {
+ return dev
+ }
+ }
+ return nil
+}
+
+// simple private struct used to encapsulate PCI information in a top-level
+// "pci" YAML/JSON map/object key
+type pciPrinter struct {
+ Info *Info `json:"pci"`
+}
+
+// YAMLString returns a string with the PCI information formatted as YAML
+// under a top-level "pci:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, pciPrinter{i})
+}
+
+// JSONString returns a string with the PCI information formatted as JSON
+// under a top-level "pci:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, pciPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go
new file mode 100644
index 00000000..087da33d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_linux.go
@@ -0,0 +1,414 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/jaypipes/pcidb"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/option"
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+ "github.com/jaypipes/ghw/pkg/topology"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const (
+ // found running `wc` against real linux systems
+ modAliasExpectedLength = 54
+)
+
+func (i *Info) load() error {
+ // when consuming snapshots - most notably, but not only, in tests,
+ // the context pkg forces the chroot value to the unpacked snapshot root.
+ // This is intentional, intentionally transparent and ghw is prepared to handle this case.
+ // However, `pcidb` is not. It doesn't know about ghw snaphots, nor it should.
+ // so we need to complicate things a bit. If the user explicitely supplied
+ // a chroot option, then we should honor it all across the stack, and passing down
+ // the chroot to pcidb is the right thing to do. If, however, the chroot was
+ // implcitely set by snapshot support, then this must be consumed by ghw only.
+ // In this case we should NOT pass it down to pcidb.
+ chroot := i.ctx.Chroot
+ if i.ctx.SnapshotPath != "" {
+ chroot = option.DefaultChroot
+ }
+ db, err := pcidb.New(pcidb.WithChroot(chroot))
+ if err != nil {
+ return err
+ }
+ i.Classes = db.Classes
+ i.Vendors = db.Vendors
+ i.Products = db.Products
+ i.Devices = i.ListDevices()
+ return nil
+}
+
+func getDeviceModaliasPath(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ return filepath.Join(
+ paths.SysBusPciDevices,
+ pciAddr.String(),
+ "modalias",
+ )
+}
+
+func getDeviceRevision(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ revisionPath := filepath.Join(
+ paths.SysBusPciDevices,
+ pciAddr.String(),
+ "revision",
+ )
+
+ if _, err := os.Stat(revisionPath); err != nil {
+ return ""
+ }
+ revision, err := ioutil.ReadFile(revisionPath)
+ if err != nil {
+ return ""
+ }
+ return strings.TrimSpace(string(revision))
+}
+
+func getDeviceNUMANode(ctx *context.Context, pciAddr *pciaddr.Address) *topology.Node {
+ paths := linuxpath.New(ctx)
+ numaNodePath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "numa_node")
+
+ if _, err := os.Stat(numaNodePath); err != nil {
+ return nil
+ }
+
+ nodeIdx := util.SafeIntFromFile(ctx, numaNodePath)
+ if nodeIdx == -1 {
+ return nil
+ }
+
+ return &topology.Node{
+ ID: nodeIdx,
+ }
+}
+
+func getDeviceDriver(ctx *context.Context, pciAddr *pciaddr.Address) string {
+ paths := linuxpath.New(ctx)
+ driverPath := filepath.Join(paths.SysBusPciDevices, pciAddr.String(), "driver")
+
+ if _, err := os.Stat(driverPath); err != nil {
+ return ""
+ }
+
+ dest, err := os.Readlink(driverPath)
+ if err != nil {
+ return ""
+ }
+ return filepath.Base(dest)
+}
+
+type deviceModaliasInfo struct {
+ vendorID string
+ productID string
+ subproductID string
+ subvendorID string
+ classID string
+ subclassID string
+ progIfaceID string
+}
+
+func parseModaliasFile(fp string) *deviceModaliasInfo {
+ if _, err := os.Stat(fp); err != nil {
+ return nil
+ }
+ data, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return nil
+ }
+
+ return parseModaliasData(string(data))
+}
+
+func parseModaliasData(data string) *deviceModaliasInfo {
+ // extra sanity check to avoid segfaults. We actually expect
+ // the data to be exactly long `modAliasExpectedlength`, but
+ // we will happily ignore any extra data we don't know how to
+ // handle.
+ if len(data) < modAliasExpectedLength {
+ return nil
+ }
+ // The modalias file is an encoded file that looks like this:
+ //
+ // $ cat /sys/devices/pci0000\:00/0000\:00\:03.0/0000\:03\:00.0/modalias
+ // pci:v000010DEd00001C82sv00001043sd00008613bc03sc00i00
+ //
+ // It is interpreted like so:
+ //
+ // pci: -- ignore
+ // v000010DE -- PCI vendor ID
+ // d00001C82 -- PCI device ID (the product/model ID)
+ // sv00001043 -- PCI subsystem vendor ID
+ // sd00008613 -- PCI subsystem device ID (subdevice product/model ID)
+ // bc03 -- PCI base class
+ // sc00 -- PCI subclass
+ // i00 -- programming interface
+ vendorID := strings.ToLower(data[9:13])
+ productID := strings.ToLower(data[18:22])
+ subvendorID := strings.ToLower(data[28:32])
+ subproductID := strings.ToLower(data[38:42])
+ classID := strings.ToLower(data[44:46])
+ subclassID := strings.ToLower(data[48:50])
+ progIfaceID := strings.ToLower(data[51:53])
+ return &deviceModaliasInfo{
+ vendorID: vendorID,
+ productID: productID,
+ subproductID: subproductID,
+ subvendorID: subvendorID,
+ classID: classID,
+ subclassID: subclassID,
+ progIfaceID: progIfaceID,
+ }
+}
+
+// Returns a pointer to a pcidb.Vendor struct matching the supplied vendor
+// ID string. If no such vendor ID string could be found, returns the
+// pcidb.Vendor struct populated with "unknown" vendor Name attribute and
+// empty Products attribute.
+func findPCIVendor(info *Info, vendorID string) *pcidb.Vendor {
+ vendor := info.Vendors[vendorID]
+ if vendor == nil {
+ return &pcidb.Vendor{
+ ID: vendorID,
+ Name: util.UNKNOWN,
+ Products: []*pcidb.Product{},
+ }
+ }
+ return vendor
+}
+
+// Returns a pointer to a pcidb.Product struct matching the supplied vendor
+// and product ID strings. If no such product could be found, returns the
+// pcidb.Product struct populated with "unknown" product Name attribute and
+// empty Subsystems attribute.
+func findPCIProduct(
+ info *Info,
+ vendorID string,
+ productID string,
+) *pcidb.Product {
+ product := info.Products[vendorID+productID]
+ if product == nil {
+ return &pcidb.Product{
+ ID: productID,
+ Name: util.UNKNOWN,
+ Subsystems: []*pcidb.Product{},
+ }
+ }
+ return product
+}
+
+// Returns a pointer to a pcidb.Product struct matching the supplied vendor,
+// product, subvendor and subproduct ID strings. If no such product could be
+// found, returns the pcidb.Product struct populated with "unknown" product
+// Name attribute and empty Subsystems attribute.
+func findPCISubsystem(
+ info *Info,
+ vendorID string,
+ productID string,
+ subvendorID string,
+ subproductID string,
+) *pcidb.Product {
+ product := info.Products[vendorID+productID]
+ subvendor := info.Vendors[subvendorID]
+ if subvendor != nil && product != nil {
+ for _, p := range product.Subsystems {
+ if p.ID == subproductID {
+ return p
+ }
+ }
+ }
+ return &pcidb.Product{
+ VendorID: subvendorID,
+ ID: subproductID,
+ Name: util.UNKNOWN,
+ }
+}
+
+// Returns a pointer to a pcidb.Class struct matching the supplied class ID
+// string. If no such class ID string could be found, returns the
+// pcidb.Class struct populated with "unknown" class Name attribute and
+// empty Subclasses attribute.
+func findPCIClass(info *Info, classID string) *pcidb.Class {
+ class := info.Classes[classID]
+ if class == nil {
+ return &pcidb.Class{
+ ID: classID,
+ Name: util.UNKNOWN,
+ Subclasses: []*pcidb.Subclass{},
+ }
+ }
+ return class
+}
+
+// Returns a pointer to a pcidb.Subclass struct matching the supplied class
+// and subclass ID strings. If no such subclass could be found, returns the
+// pcidb.Subclass struct populated with "unknown" subclass Name attribute
+// and empty ProgrammingInterfaces attribute.
+func findPCISubclass(
+ info *Info,
+ classID string,
+ subclassID string,
+) *pcidb.Subclass {
+ class := info.Classes[classID]
+ if class != nil {
+ for _, sc := range class.Subclasses {
+ if sc.ID == subclassID {
+ return sc
+ }
+ }
+ }
+ return &pcidb.Subclass{
+ ID: subclassID,
+ Name: util.UNKNOWN,
+ ProgrammingInterfaces: []*pcidb.ProgrammingInterface{},
+ }
+}
+
+// Returns a pointer to a pcidb.ProgrammingInterface struct matching the
+// supplied class, subclass and programming interface ID strings. If no such
+// programming interface could be found, returns the
+// pcidb.ProgrammingInterface struct populated with "unknown" Name attribute
+func findPCIProgrammingInterface(
+ info *Info,
+ classID string,
+ subclassID string,
+ progIfaceID string,
+) *pcidb.ProgrammingInterface {
+ subclass := findPCISubclass(info, classID, subclassID)
+ for _, pi := range subclass.ProgrammingInterfaces {
+ if pi.ID == progIfaceID {
+ return pi
+ }
+ }
+ return &pcidb.ProgrammingInterface{
+ ID: progIfaceID,
+ Name: util.UNKNOWN,
+ }
+}
+
+// GetDevice returns a pointer to a Device struct that describes the PCI
+// device at the requested address. If no such device could be found, returns nil.
+func (info *Info) GetDevice(address string) *Device {
+ // check cached data first
+ if dev := info.lookupDevice(address); dev != nil {
+ return dev
+ }
+
+ pciAddr := pciaddr.FromString(address)
+ if pciAddr == nil {
+ info.ctx.Warn("error parsing the pci address %q", address)
+ return nil
+ }
+
+ // no cached data, let's get the information from system.
+ fp := getDeviceModaliasPath(info.ctx, pciAddr)
+ if fp == "" {
+ info.ctx.Warn("error finding modalias info for device %q", address)
+ return nil
+ }
+
+ modaliasInfo := parseModaliasFile(fp)
+ if modaliasInfo == nil {
+ info.ctx.Warn("error parsing modalias info for device %q", address)
+ return nil
+ }
+
+ device := info.getDeviceFromModaliasInfo(address, modaliasInfo)
+ device.Revision = getDeviceRevision(info.ctx, pciAddr)
+ if info.arch == topology.ARCHITECTURE_NUMA {
+ device.Node = getDeviceNUMANode(info.ctx, pciAddr)
+ }
+ device.Driver = getDeviceDriver(info.ctx, pciAddr)
+ return device
+}
+
+// ParseDevice returns a pointer to a Device given its describing data.
+// The PCI device obtained this way may not exist in the system;
+// use GetDevice to get a *Device which is found in the system
+func (info *Info) ParseDevice(address, modalias string) *Device {
+ modaliasInfo := parseModaliasData(modalias)
+ if modaliasInfo == nil {
+ return nil
+ }
+ return info.getDeviceFromModaliasInfo(address, modaliasInfo)
+}
+
+func (info *Info) getDeviceFromModaliasInfo(address string, modaliasInfo *deviceModaliasInfo) *Device {
+ vendor := findPCIVendor(info, modaliasInfo.vendorID)
+ product := findPCIProduct(
+ info,
+ modaliasInfo.vendorID,
+ modaliasInfo.productID,
+ )
+ subsystem := findPCISubsystem(
+ info,
+ modaliasInfo.vendorID,
+ modaliasInfo.productID,
+ modaliasInfo.subvendorID,
+ modaliasInfo.subproductID,
+ )
+ class := findPCIClass(info, modaliasInfo.classID)
+ subclass := findPCISubclass(
+ info,
+ modaliasInfo.classID,
+ modaliasInfo.subclassID,
+ )
+ progIface := findPCIProgrammingInterface(
+ info,
+ modaliasInfo.classID,
+ modaliasInfo.subclassID,
+ modaliasInfo.progIfaceID,
+ )
+
+ return &Device{
+ Address: address,
+ Vendor: vendor,
+ Subsystem: subsystem,
+ Product: product,
+ Class: class,
+ Subclass: subclass,
+ ProgrammingInterface: progIface,
+ }
+}
+
+// ListDevices returns a list of pointers to Device structs present on the
+// host system
+// DEPRECATED. Will be removed in v1.0. Please use
+// github.com/jaypipes/pcidb to explore PCIDB information
+func (info *Info) ListDevices() []*Device {
+ paths := linuxpath.New(info.ctx)
+ devs := make([]*Device, 0)
+ // We scan the /sys/bus/pci/devices directory which contains a collection
+ // of symlinks. The names of the symlinks are all the known PCI addresses
+ // for the host. For each address, we grab a *Device matching the
+ // address and append to the returned array.
+ links, err := ioutil.ReadDir(paths.SysBusPciDevices)
+ if err != nil {
+ info.ctx.Warn("failed to read /sys/bus/pci/devices")
+ return nil
+ }
+ var dev *Device
+ for _, link := range links {
+ addr := link.Name()
+ dev = info.GetDevice(addr)
+ if dev == nil {
+ info.ctx.Warn("failed to get device information for PCI address %s", addr)
+ } else {
+ devs = append(devs, dev)
+ }
+ }
+ return devs
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go
new file mode 100644
index 00000000..9ebb396d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/pci/pci_stub.go
@@ -0,0 +1,32 @@
+//go:build !linux
+// +build !linux
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pci
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("pciFillInfo not implemented on " + runtime.GOOS)
+}
+
+// GetDevice returns a pointer to a Device struct that describes the PCI
+// device at the requested address. If no such device could be found, returns
+// nil
+func (info *Info) GetDevice(address string) *Device {
+ return nil
+}
+
+// ListDevices returns a list of pointers to Device structs present on the
+// host system
+func (info *Info) ListDevices() []*Device {
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product.go b/vendor/github.com/jaypipes/ghw/pkg/product/product.go
new file mode 100644
index 00000000..83d6541d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product.go
@@ -0,0 +1,96 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/option"
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+// Info defines product information
+type Info struct {
+ ctx *context.Context
+ Family string `json:"family"`
+ Name string `json:"name"`
+ Vendor string `json:"vendor"`
+ SerialNumber string `json:"serial_number"`
+ UUID string `json:"uuid"`
+ SKU string `json:"sku"`
+ Version string `json:"version"`
+}
+
+func (i *Info) String() string {
+ familyStr := ""
+ if i.Family != "" {
+ familyStr = " family=" + i.Family
+ }
+ nameStr := ""
+ if i.Name != "" {
+ nameStr = " name=" + i.Name
+ }
+ vendorStr := ""
+ if i.Vendor != "" {
+ vendorStr = " vendor=" + i.Vendor
+ }
+ serialStr := ""
+ if i.SerialNumber != "" && i.SerialNumber != util.UNKNOWN {
+ serialStr = " serial=" + i.SerialNumber
+ }
+ uuidStr := ""
+ if i.UUID != "" && i.UUID != util.UNKNOWN {
+ uuidStr = " uuid=" + i.UUID
+ }
+ skuStr := ""
+ if i.SKU != "" {
+ skuStr = " sku=" + i.SKU
+ }
+ versionStr := ""
+ if i.Version != "" {
+ versionStr = " version=" + i.Version
+ }
+
+ return "product" + util.ConcatStrings(
+ familyStr,
+ nameStr,
+ vendorStr,
+ serialStr,
+ uuidStr,
+ skuStr,
+ versionStr,
+ )
+}
+
+// New returns a pointer to a Info struct containing information
+// about the host's product
+func New(opts ...*option.Option) (*Info, error) {
+ ctx := context.New(opts...)
+ info := &Info{ctx: ctx}
+ if err := ctx.Do(info.load); err != nil {
+ return nil, err
+ }
+ return info, nil
+}
+
+// simple private struct used to encapsulate product information in a top-level
+// "product" YAML/JSON map/object key
+type productPrinter struct {
+ Info *Info `json:"product"`
+}
+
+// YAMLString returns a string with the product information formatted as YAML
+// under a top-level "dmi:" key
+func (info *Info) YAMLString() string {
+ return marshal.SafeYAML(info.ctx, productPrinter{info})
+}
+
+// JSONString returns a string with the product information formatted as JSON
+// under a top-level "product:" key
+func (info *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(info.ctx, productPrinter{info}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go
new file mode 100644
index 00000000..36b6b447
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_linux.go
@@ -0,0 +1,23 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/jaypipes/ghw/pkg/linuxdmi"
+)
+
+func (i *Info) load() error {
+
+ i.Family = linuxdmi.Item(i.ctx, "product_family")
+ i.Name = linuxdmi.Item(i.ctx, "product_name")
+ i.Vendor = linuxdmi.Item(i.ctx, "sys_vendor")
+ i.SerialNumber = linuxdmi.Item(i.ctx, "product_serial")
+ i.UUID = linuxdmi.Item(i.ctx, "product_uuid")
+ i.SKU = linuxdmi.Item(i.ctx, "product_sku")
+ i.Version = linuxdmi.Item(i.ctx, "product_version")
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go
new file mode 100644
index 00000000..8fc9724f
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("productFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go
new file mode 100644
index 00000000..c919cb0f
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/product/product_windows.go
@@ -0,0 +1,45 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package product
+
+import (
+ "github.com/StackExchange/wmi"
+
+ "github.com/jaypipes/ghw/pkg/util"
+)
+
+const wqlProduct = "SELECT Caption, Description, IdentifyingNumber, Name, SKUNumber, Vendor, Version, UUID FROM Win32_ComputerSystemProduct"
+
+type win32Product struct {
+ Caption *string
+ Description *string
+ IdentifyingNumber *string
+ Name *string
+ SKUNumber *string
+ Vendor *string
+ Version *string
+ UUID *string
+}
+
+func (i *Info) load() error {
+ // Getting data from WMI
+ var win32ProductDescriptions []win32Product
+ // Assuming the first product is the host...
+ if err := wmi.Query(wqlProduct, &win32ProductDescriptions); err != nil {
+ return err
+ }
+ if len(win32ProductDescriptions) > 0 {
+ i.Family = util.UNKNOWN
+ i.Name = *win32ProductDescriptions[0].Name
+ i.Vendor = *win32ProductDescriptions[0].Vendor
+ i.SerialNumber = *win32ProductDescriptions[0].IdentifyingNumber
+ i.UUID = *win32ProductDescriptions[0].UUID
+ i.SKU = *win32ProductDescriptions[0].SKUNumber
+ i.Version = *win32ProductDescriptions[0].Version
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go
new file mode 100644
index 00000000..519a874d
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree.go
@@ -0,0 +1,199 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// Attempting to tar up pseudofiles like /proc/cpuinfo is an exercise in
+// futility. Notably, the pseudofiles, when read by syscalls, do not return the
+// number of bytes read. This causes the tar writer to write zero-length files.
+//
+// Instead, it is necessary to build a directory structure in a tmpdir and
+// create actual files with copies of the pseudofile contents
+
+// CloneTreeInto copies all the pseudofiles that ghw will consume into the root
+// `scratchDir`, preserving the hieratchy.
+func CloneTreeInto(scratchDir string) error {
+ err := setupScratchDir(scratchDir)
+ if err != nil {
+ return err
+ }
+ fileSpecs := ExpectedCloneContent()
+ return CopyFilesInto(fileSpecs, scratchDir, nil)
+}
+
+// ExpectedCloneContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about.
+// The intended usage of this function is to validate a clone tree, checking that the
+// content matches the expectations.
+// Beware: the content is host-specific, because the content pertaining some subsystems,
+// most notably PCI, is host-specific and unpredictable.
+func ExpectedCloneContent() []string {
+ fileSpecs := ExpectedCloneStaticContent()
+ fileSpecs = append(fileSpecs, ExpectedCloneNetContent()...)
+ fileSpecs = append(fileSpecs, ExpectedClonePCIContent()...)
+ fileSpecs = append(fileSpecs, ExpectedCloneGPUContent()...)
+ return fileSpecs
+}
+
+// ValidateClonedTree checks the content of a cloned tree, whose root is `clonedDir`,
+// against a slice of glob specs which must be included in the cloned tree.
+// Is not wrong, and this functions doesn't enforce this, that the cloned tree includes
+// more files than the necessary; ghw will just ignore the files it doesn't care about.
+// Returns a slice of glob patters expected (given) but not found in the cloned tree,
+// and the error during the validation (if any).
+func ValidateClonedTree(fileSpecs []string, clonedDir string) ([]string, error) {
+ missing := []string{}
+ for _, fileSpec := range fileSpecs {
+ matches, err := filepath.Glob(filepath.Join(clonedDir, fileSpec))
+ if err != nil {
+ return missing, err
+ }
+ if len(matches) == 0 {
+ missing = append(missing, fileSpec)
+ }
+ }
+ return missing, nil
+}
+
+// CopyFileOptions allows to finetune the behaviour of the CopyFilesInto function
+type CopyFileOptions struct {
+ // IsSymlinkFn allows to control the behaviour when handling a symlink.
+ // If this hook returns true, the source file is treated as symlink: the cloned
+ // tree will thus contain a symlink, with its path adjusted to match the relative
+ // path inside the cloned tree. If return false, the symlink will be deferred.
+ // The easiest use case of this hook is if you want to avoid symlinks in your cloned
+ // tree (having duplicated content). In this case you can just add a function
+ // which always return false.
+ IsSymlinkFn func(path string, info os.FileInfo) bool
+ // ShouldCreateDirFn allows to control if empty directories listed as clone
+ // content should be created or not. When creating snapshots, empty directories
+ // are most often useless (but also harmless). Because of this, directories are only
+ // created as side effect of copying the files which are inside, and thus directories
+ // are never empty. The only notable exception are device driver on linux: in this
+ // case, for a number of technical/historical reasons, we care about the directory
+ // name, but not about the files which are inside.
+ // Hence, this is the only case on which ghw clones empty directories.
+ ShouldCreateDirFn func(path string, info os.FileInfo) bool
+}
+
+// CopyFilesInto copies all the given glob files specs in the given `destDir` directory,
+// preserving the directory structure. This means you can provide a deeply nested filespec
+// like
+// - /some/deeply/nested/file*
+// and you DO NOT need to build the tree incrementally like
+// - /some/
+// - /some/deeply/
+// ...
+// all glob patterns supported in `filepath.Glob` are supported.
+func CopyFilesInto(fileSpecs []string, destDir string, opts *CopyFileOptions) error {
+ if opts == nil {
+ opts = &CopyFileOptions{
+ IsSymlinkFn: isSymlink,
+ ShouldCreateDirFn: isDriversDir,
+ }
+ }
+ for _, fileSpec := range fileSpecs {
+ trace("copying spec: %q\n", fileSpec)
+ matches, err := filepath.Glob(fileSpec)
+ if err != nil {
+ return err
+ }
+ if err := copyFileTreeInto(matches, destDir, opts); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func copyFileTreeInto(paths []string, destDir string, opts *CopyFileOptions) error {
+ for _, path := range paths {
+ trace(" copying path: %q\n", path)
+ baseDir := filepath.Dir(path)
+ if err := os.MkdirAll(filepath.Join(destDir, baseDir), os.ModePerm); err != nil {
+ return err
+ }
+
+ fi, err := os.Lstat(path)
+ if err != nil {
+ return err
+ }
+ // directories must be listed explicitly and created separately.
+ // In the future we may want to expose this decision as hook point in
+ // CopyFileOptions, when clear use cases emerge.
+ destPath := filepath.Join(destDir, path)
+ if fi.IsDir() {
+ if opts.ShouldCreateDirFn(path, fi) {
+ if err := os.MkdirAll(destPath, os.ModePerm); err != nil {
+ return err
+ }
+ } else {
+ trace("expanded glob path %q is a directory - skipped\n", path)
+ }
+ continue
+ }
+ if opts.IsSymlinkFn(path, fi) {
+ trace(" copying link: %q -> %q\n", path, destPath)
+ if err := copyLink(path, destPath); err != nil {
+ return err
+ }
+ } else {
+ trace(" copying file: %q -> %q\n", path, destPath)
+ if err := copyPseudoFile(path, destPath); err != nil && !errors.Is(err, os.ErrPermission) {
+ return err
+ }
+ }
+ }
+ return nil
+}
+
+func isSymlink(path string, fi os.FileInfo) bool {
+ return fi.Mode()&os.ModeSymlink != 0
+}
+
+func isDriversDir(path string, fi os.FileInfo) bool {
+ return strings.Contains(path, "drivers")
+}
+
+func copyLink(path, targetPath string) error {
+ target, err := os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ trace(" symlink %q -> %q\n", target, targetPath)
+ if err := os.Symlink(target, targetPath); err != nil {
+ if errors.Is(err, os.ErrExist) {
+ return nil
+ }
+ return err
+ }
+
+ return nil
+}
+
+func copyPseudoFile(path, targetPath string) error {
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go
new file mode 100644
index 00000000..18e2161a
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_block_linux.go
@@ -0,0 +1,221 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "errors"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+func createBlockDevices(buildDir string) error {
+ // Grab all the block device pseudo-directories from /sys/block symlinks
+ // (excluding loopback devices) and inject them into our build filesystem
+ // with all but the circular symlink'd subsystem directories
+ devLinks, err := ioutil.ReadDir("/sys/block")
+ if err != nil {
+ return err
+ }
+ for _, devLink := range devLinks {
+ dname := devLink.Name()
+ if strings.HasPrefix(dname, "loop") {
+ continue
+ }
+ devPath := filepath.Join("/sys/block", dname)
+ trace("processing block device %q\n", devPath)
+
+ // from the sysfs layout, we know this is always a symlink
+ linkContentPath, err := os.Readlink(devPath)
+ if err != nil {
+ return err
+ }
+ trace("link target for block device %q is %q\n", devPath, linkContentPath)
+
+ // Create a symlink in our build filesystem that is a directory
+ // pointing to the actual device bus path where the block device's
+ // information directory resides
+ linkPath := filepath.Join(buildDir, "sys/block", dname)
+ linkTargetPath := filepath.Join(
+ buildDir,
+ "sys/block",
+ strings.TrimPrefix(linkContentPath, string(os.PathSeparator)),
+ )
+ trace("creating device directory %s\n", linkTargetPath)
+ if err = os.MkdirAll(linkTargetPath, os.ModePerm); err != nil {
+ return err
+ }
+
+ trace("linking device directory %s to %s\n", linkPath, linkContentPath)
+ // Make sure the link target is a relative path!
+ // if we use absolute path, the link target will be an absolute path starting
+ // with buildDir, hence the snapshot will contain broken link.
+ // Otherwise, the unpack directory will never have the same prefix of buildDir!
+ if err = os.Symlink(linkContentPath, linkPath); err != nil {
+ return err
+ }
+ // Now read the source block device directory and populate the
+ // newly-created target link in the build directory with the
+ // appropriate block device pseudofiles
+ srcDeviceDir := filepath.Join(
+ "/sys/block",
+ strings.TrimPrefix(linkContentPath, string(os.PathSeparator)),
+ )
+ trace("creating device directory %q from %q\n", linkTargetPath, srcDeviceDir)
+ if err = createBlockDeviceDir(linkTargetPath, srcDeviceDir); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func createBlockDeviceDir(buildDeviceDir string, srcDeviceDir string) error {
+ // Populate the supplied directory (in our build filesystem) with all the
+ // appropriate information pseudofile contents for the block device.
+ devName := filepath.Base(srcDeviceDir)
+ devFiles, err := ioutil.ReadDir(srcDeviceDir)
+ if err != nil {
+ return err
+ }
+ for _, f := range devFiles {
+ fname := f.Name()
+ fp := filepath.Join(srcDeviceDir, fname)
+ fi, err := os.Lstat(fp)
+ if err != nil {
+ return err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ // Ignore any symlinks in the deviceDir since they simply point to
+ // either self-referential links or information we aren't
+ // interested in like "subsystem"
+ continue
+ } else if fi.IsDir() {
+ if strings.HasPrefix(fname, devName) {
+ // We're interested in are the directories that begin with the
+ // block device name. These are directories with information
+ // about the partitions on the device
+ buildPartitionDir := filepath.Join(
+ buildDeviceDir, fname,
+ )
+ srcPartitionDir := filepath.Join(
+ srcDeviceDir, fname,
+ )
+ trace("creating partition directory %s\n", buildPartitionDir)
+ err = os.MkdirAll(buildPartitionDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ err = createPartitionDir(buildPartitionDir, srcPartitionDir)
+ if err != nil {
+ return err
+ }
+ }
+ } else if fi.Mode().IsRegular() {
+ // Regular files in the block device directory are both regular and
+ // pseudofiles containing information such as the size (in sectors)
+ // and whether the device is read-only
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ if errors.Is(err, os.ErrPermission) {
+ // example: /sys/devices/virtual/block/zram0/compact is 0400
+ trace("permission denied reading %q - skipped\n", fp)
+ continue
+ }
+ return err
+ }
+ targetPath := filepath.Join(buildDeviceDir, fname)
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ // There is a special file $DEVICE_DIR/queue/rotational that, for some hard
+ // drives, contains a 1 or 0 indicating whether the device is a spinning
+ // disk or not
+ srcQueueDir := filepath.Join(
+ srcDeviceDir,
+ "queue",
+ )
+ buildQueueDir := filepath.Join(
+ buildDeviceDir,
+ "queue",
+ )
+ err = os.MkdirAll(buildQueueDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ fp := filepath.Join(srcQueueDir, "rotational")
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ targetPath := filepath.Join(buildQueueDir, "rotational")
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+
+ return nil
+}
+
+func createPartitionDir(buildPartitionDir string, srcPartitionDir string) error {
+ // Populate the supplied directory (in our build filesystem) with all the
+ // appropriate information pseudofile contents for the partition.
+ partFiles, err := ioutil.ReadDir(srcPartitionDir)
+ if err != nil {
+ return err
+ }
+ for _, f := range partFiles {
+ fname := f.Name()
+ fp := filepath.Join(srcPartitionDir, fname)
+ fi, err := os.Lstat(fp)
+ if err != nil {
+ return err
+ }
+ if fi.Mode()&os.ModeSymlink != 0 {
+ // Ignore any symlinks in the partition directory since they simply
+ // point to information we aren't interested in like "subsystem"
+ continue
+ } else if fi.IsDir() {
+ // The subdirectories in the partition directory are not
+ // interesting for us. They have information about power events and
+ // traces
+ continue
+ } else if fi.Mode().IsRegular() {
+ // Regular files in the block device directory are both regular and
+ // pseudofiles containing information such as the size (in sectors)
+ // and whether the device is read-only
+ buf, err := ioutil.ReadFile(fp)
+ if err != nil {
+ return err
+ }
+ targetPath := filepath.Join(buildPartitionDir, fname)
+ trace("creating %s\n", targetPath)
+ f, err := os.Create(targetPath)
+ if err != nil {
+ return err
+ }
+ if _, err = f.Write(buf); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go
new file mode 100644
index 00000000..a26d6b01
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_gpu_linux.go
@@ -0,0 +1,33 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "strings"
+)
+
+// ExpectedCloneGPUContent returns a slice of strings pertaining to the GPU devices ghw
+// cares about. We cannot use a static list because we want to grab only the first cardX data
+// (see comment in pkg/gpu/gpu_linux.go)
+// Additionally, we want to make sure to clone the backing device data.
+func ExpectedCloneGPUContent() []string {
+ cardEntries := []string{
+ "device",
+ }
+
+ filterName := func(cardName string) bool {
+ if !strings.HasPrefix(cardName, "card") {
+ return false
+ }
+ if strings.ContainsRune(cardName, '-') {
+ return false
+ }
+ return true
+ }
+
+ return cloneContentByClass("drm", cardEntries, filterName, filterNone)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go
new file mode 100644
index 00000000..0ccd6935
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_linux.go
@@ -0,0 +1,109 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "io/ioutil"
+ "os"
+ "path/filepath"
+)
+
+func setupScratchDir(scratchDir string) error {
+ var createPaths = []string{
+ "sys/block",
+ }
+
+ for _, path := range createPaths {
+ if err := os.MkdirAll(filepath.Join(scratchDir, path), os.ModePerm); err != nil {
+ return err
+ }
+ }
+
+ return createBlockDevices(scratchDir)
+}
+
+// ExpectedCloneStaticContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about, and which are independent from host specific topology or configuration,
+// thus are safely represented by a static slice - e.g. they don't need to be discovered at runtime.
+func ExpectedCloneStaticContent() []string {
+ return []string{
+ "/proc/cpuinfo",
+ "/proc/meminfo",
+ "/proc/self/mounts",
+ "/sys/devices/system/cpu/cpu*/cache/index*/*",
+ "/sys/devices/system/cpu/cpu*/topology/*",
+ "/sys/devices/system/memory/block_size_bytes",
+ "/sys/devices/system/memory/memory*/online",
+ "/sys/devices/system/memory/memory*/state",
+ "/sys/devices/system/node/has_*",
+ "/sys/devices/system/node/online",
+ "/sys/devices/system/node/possible",
+ "/sys/devices/system/node/node*/cpu*",
+ "/sys/devices/system/node/node*/distance",
+ "/sys/devices/system/node/node*/meminfo",
+ "/sys/devices/system/node/node*/memory*",
+ "/sys/devices/system/node/node*/hugepages/hugepages-*/*",
+ }
+}
+
+type filterFunc func(string) bool
+
+// cloneContentByClass copies all the content related to a given device class
+// (devClass), possibly filtering out devices whose name does NOT pass a
+// filter (filterName). Each entry in `/sys/class/$CLASS` is actually a
+// symbolic link. We can filter out entries depending on the link target.
+// Each filter is a simple function which takes the entry name or the link
+// target and must return true if the entry should be collected, false
+// otherwise. Last, explicitly collect a list of attributes for each entry,
+// given as list of glob patterns as `subEntries`.
+// Return the final list of glob patterns to be collected.
+func cloneContentByClass(devClass string, subEntries []string, filterName filterFunc, filterLink filterFunc) []string {
+ var fileSpecs []string
+
+ // warning: don't use the context package here, this means not even the linuxpath package.
+ // TODO(fromani) remove the path duplication
+ sysClass := filepath.Join("sys", "class", devClass)
+ entries, err := ioutil.ReadDir(sysClass)
+ if err != nil {
+ // we should not import context, hence we can't Warn()
+ return fileSpecs
+ }
+ for _, entry := range entries {
+ devName := entry.Name()
+
+ if !filterName(devName) {
+ continue
+ }
+
+ devPath := filepath.Join(sysClass, devName)
+ dest, err := os.Readlink(devPath)
+ if err != nil {
+ continue
+ }
+
+ if !filterLink(dest) {
+ continue
+ }
+
+ // so, first copy the symlink itself
+ fileSpecs = append(fileSpecs, devPath)
+ // now we have to clone the content of the actual entry
+ // related (and found into a subdir of) the backing hardware
+ // device
+ devData := filepath.Clean(filepath.Join(sysClass, dest))
+ for _, subEntry := range subEntries {
+ fileSpecs = append(fileSpecs, filepath.Join(devData, subEntry))
+ }
+ }
+
+ return fileSpecs
+}
+
+// filterNone allows all content, filtering out none of it
+func filterNone(_ string) bool {
+ return true
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go
new file mode 100644
index 00000000..27b27573
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_net_linux.go
@@ -0,0 +1,28 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "strings"
+)
+
+// ExpectedCloneNetContent returns a slice of strings pertaning to the network interfaces ghw
+// cares about. We cannot use a static list because we want to filter away the virtual devices,
+// which ghw doesn't concern itself about. So we need to do some runtime discovery.
+// Additionally, we want to make sure to clone the backing device data.
+func ExpectedCloneNetContent() []string {
+ ifaceEntries := []string{
+ "addr_assign_type",
+ // intentionally avoid to clone "address" to avoid to leak any host-idenfifiable data.
+ }
+
+ filterLink := func(linkDest string) bool {
+ return !strings.Contains(linkDest, "devices/virtual/net")
+ }
+
+ return cloneContentByClass("net", ifaceEntries, filterNone, filterLink)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go
new file mode 100644
index 00000000..dbc3fc83
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_pci_linux.go
@@ -0,0 +1,151 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ pciaddr "github.com/jaypipes/ghw/pkg/pci/address"
+)
+
+const (
+ // root directory: entry point to start scanning the PCI forest
+ // warning: don't use the context package here, this means not even the linuxpath package.
+ // TODO(fromani) remove the path duplication
+ sysBusPCIDir = "/sys/bus/pci/devices"
+)
+
+// ExpectedClonePCIContent return a slice of glob patterns which represent the pseudofiles
+// ghw cares about, pertaining to PCI devices only.
+// Beware: the content is host-specific, because the PCI topology is host-dependent and unpredictable.
+func ExpectedClonePCIContent() []string {
+ fileSpecs := []string{
+ "/sys/bus/pci/drivers/*",
+ }
+ pciRoots := []string{
+ sysBusPCIDir,
+ }
+ for {
+ if len(pciRoots) == 0 {
+ break
+ }
+ pciRoot := pciRoots[0]
+ pciRoots = pciRoots[1:]
+ specs, roots := scanPCIDeviceRoot(pciRoot)
+ pciRoots = append(pciRoots, roots...)
+ fileSpecs = append(fileSpecs, specs...)
+ }
+ return fileSpecs
+}
+
+// scanPCIDeviceRoot reports a slice of glob patterns which represent the pseudofiles
+// ghw cares about pertaining to all the PCI devices connected to the bus connected from the
+// given root; usually (but not always) a CPU packages has 1+ PCI(e) roots, forming the first
+// level; more PCI bridges are (usually) attached to this level, creating deep nested trees.
+// hence we need to scan all possible roots, to make sure not to miss important devices.
+//
+// note about notifying errors. This function and its helper functions do use trace() everywhere
+// to report recoverable errors, even though it would have been appropriate to use Warn().
+// This is unfortunate, and again a byproduct of the fact we cannot use context.Context to avoid
+// circular dependencies.
+// TODO(fromani): switch to Warn() as soon as we figure out how to break this circular dep.
+func scanPCIDeviceRoot(root string) (fileSpecs []string, pciRoots []string) {
+ trace("scanning PCI device root %q\n", root)
+
+ perDevEntries := []string{
+ "class",
+ "device",
+ "driver",
+ "irq",
+ "local_cpulist",
+ "modalias",
+ "numa_node",
+ "revision",
+ "vendor",
+ }
+ entries, err := ioutil.ReadDir(root)
+ if err != nil {
+ return []string{}, []string{}
+ }
+ for _, entry := range entries {
+ entryName := entry.Name()
+ if addr := pciaddr.FromString(entryName); addr == nil {
+ // doesn't look like a entry we care about
+ // This is by far and large the most likely path
+ // hence we should NOT trace/warn here.
+ continue
+ }
+
+ entryPath := filepath.Join(root, entryName)
+ pciEntry, err := findPCIEntryFromPath(root, entryName)
+ if err != nil {
+ trace("error scanning %q: %v", entryName, err)
+ continue
+ }
+
+ trace("PCI entry is %q\n", pciEntry)
+ fileSpecs = append(fileSpecs, entryPath)
+ for _, perNetEntry := range perDevEntries {
+ fileSpecs = append(fileSpecs, filepath.Join(pciEntry, perNetEntry))
+ }
+
+ if isPCIBridge(entryPath) {
+ trace("adding new PCI root %q\n", entryName)
+ pciRoots = append(pciRoots, pciEntry)
+ }
+ }
+ return fileSpecs, pciRoots
+}
+
+func findPCIEntryFromPath(root, entryName string) (string, error) {
+ entryPath := filepath.Join(root, entryName)
+ fi, err := os.Lstat(entryPath)
+ if err != nil {
+ return "", fmt.Errorf("stat(%s) failed: %v\n", entryPath, err)
+ }
+ if fi.Mode()&os.ModeSymlink == 0 {
+ // regular file, nothing to resolve
+ return entryPath, nil
+ }
+ // resolve symlink
+ target, err := os.Readlink(entryPath)
+ trace("entry %q is symlink resolved to %q\n", entryPath, target)
+ if err != nil {
+ return "", fmt.Errorf("readlink(%s) failed: %v - skipped\n", entryPath, err)
+ }
+ return filepath.Clean(filepath.Join(root, target)), nil
+}
+
+func isPCIBridge(entryPath string) bool {
+ subNodes, err := ioutil.ReadDir(entryPath)
+ if err != nil {
+ // this is so unlikely we don't even return error. But we trace just in case.
+ trace("error scanning device entry path %q: %v", entryPath, err)
+ return false
+ }
+ for _, subNode := range subNodes {
+ if !subNode.IsDir() {
+ continue
+ }
+ if addr := pciaddr.FromString(subNode.Name()); addr != nil {
+ // we got an entry in the directory pertaining to this device
+ // which is a directory itself and it is named like a PCI address.
+ // Hence we infer the device we are considering is a PCI bridge of sorts.
+ // This is is indeed a bit brutal, but the only possible alternative
+ // (besides blindly copying everything in /sys/bus/pci/devices) is
+ // to detect the type of the device and pick only the bridges.
+ // This approach duplicates the logic within the `pci` subkpg
+ // - or forces us into awkward dep cycles, and has poorer forward
+ // compatibility.
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go
new file mode 100644
index 00000000..af85a55b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/clonetree_stub.go
@@ -0,0 +1,30 @@
+//go:build !linux
+// +build !linux
+
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+func setupScratchDir(scratchDir string) error {
+ return nil
+}
+
+func ExpectedCloneStaticContent() []string {
+ return []string{}
+}
+
+func ExpectedCloneGPUContent() []string {
+ return []string{}
+}
+
+func ExpectedCloneNetContent() []string {
+ return []string{}
+}
+
+func ExpectedClonePCIContent() []string {
+ return []string{}
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go
new file mode 100644
index 00000000..94b5bb69
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/pack.go
@@ -0,0 +1,113 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// PackFrom creates the snapshot named `snapshotName` from the
+// directory tree whose root is `sourceRoot`.
+func PackFrom(snapshotName, sourceRoot string) error {
+ f, err := OpenDestination(snapshotName)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ return PackWithWriter(f, sourceRoot)
+}
+
+// OpenDestination opens the `snapshotName` file for writing, bailing out
+// if the file seems to exist and have existing content already.
+// This is done to avoid accidental overwrites.
+func OpenDestination(snapshotName string) (*os.File, error) {
+ var f *os.File
+ var err error
+
+ if _, err = os.Stat(snapshotName); errors.Is(err, os.ErrNotExist) {
+ if f, err = os.Create(snapshotName); err != nil {
+ return nil, err
+ }
+ } else if err != nil {
+ return nil, err
+ } else {
+ f, err := os.OpenFile(snapshotName, os.O_WRONLY, 0600)
+ if err != nil {
+ return nil, err
+ }
+ fs, err := f.Stat()
+ if err != nil {
+ return nil, err
+ }
+ if fs.Size() > 0 {
+ return nil, fmt.Errorf("File %s already exists and is of size >0", snapshotName)
+ }
+ }
+ return f, nil
+}
+
+// PakcWithWriter creates a snapshot sending all the binary data to the
+// given `fw` writer. The snapshot is made from the directory tree whose
+// root is `sourceRoot`.
+func PackWithWriter(fw io.Writer, sourceRoot string) error {
+ gzw := gzip.NewWriter(fw)
+ defer gzw.Close()
+
+ tw := tar.NewWriter(gzw)
+ defer tw.Close()
+
+ return createSnapshot(tw, sourceRoot)
+}
+
+func createSnapshot(tw *tar.Writer, buildDir string) error {
+ return filepath.Walk(buildDir, func(path string, fi os.FileInfo, _ error) error {
+ if path == buildDir {
+ return nil
+ }
+ var link string
+ var err error
+
+ if fi.Mode()&os.ModeSymlink != 0 {
+ trace("processing symlink %s\n", path)
+ link, err = os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ }
+
+ hdr, err := tar.FileInfoHeader(fi, link)
+ if err != nil {
+ return err
+ }
+ hdr.Name = strings.TrimPrefix(strings.TrimPrefix(path, buildDir), string(os.PathSeparator))
+
+ if err = tw.WriteHeader(hdr); err != nil {
+ return err
+ }
+
+ switch hdr.Typeflag {
+ case tar.TypeReg, tar.TypeRegA:
+ f, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ if _, err = io.Copy(tw, f); err != nil {
+ return err
+ }
+ f.Close()
+ }
+ return nil
+ })
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz
new file mode 100644
index 00000000..edb26fbd
Binary files /dev/null and b/vendor/github.com/jaypipes/ghw/pkg/snapshot/testdata.tar.gz differ
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go
new file mode 100644
index 00000000..78c76121
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/trace.go
@@ -0,0 +1,17 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+var trace func(msg string, args ...interface{})
+
+func init() {
+ trace = func(msg string, args ...interface{}) {}
+}
+
+func SetTraceFunction(fn func(msg string, args ...interface{})) {
+ trace = fn
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go
new file mode 100644
index 00000000..3df395e2
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/snapshot/unpack.go
@@ -0,0 +1,129 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package snapshot
+
+import (
+ "archive/tar"
+ "compress/gzip"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+const (
+ TargetRoot = "ghw-snapshot-*"
+)
+
+const (
+ // If set, `ghw` will not unpack the snapshot in the user-supplied directory
+ // unless the aforementioned directory is empty.
+ OwnTargetDirectory = 1 << iota
+)
+
+// Clanup removes the unpacket snapshot from the target root.
+// Please not that the environs variable `GHW_SNAPSHOT_PRESERVE`, if set,
+// will make this function silently skip.
+func Cleanup(targetRoot string) error {
+ if option.EnvOrDefaultSnapshotPreserve() {
+ return nil
+ }
+ return os.RemoveAll(targetRoot)
+}
+
+// Unpack expands the given snapshot in a temporary directory managed by `ghw`. Returns the path of that directory.
+func Unpack(snapshotName string) (string, error) {
+ targetRoot, err := ioutil.TempDir("", TargetRoot)
+ if err != nil {
+ return "", err
+ }
+ _, err = UnpackInto(snapshotName, targetRoot, 0)
+ return targetRoot, err
+}
+
+// UnpackInto expands the given snapshot in a client-supplied directory.
+// Returns true if the snapshot was actually unpacked, false otherwise
+func UnpackInto(snapshotName, targetRoot string, flags uint) (bool, error) {
+ if (flags&OwnTargetDirectory) == OwnTargetDirectory && !isEmptyDir(targetRoot) {
+ return false, nil
+ }
+ snap, err := os.Open(snapshotName)
+ if err != nil {
+ return false, err
+ }
+ defer snap.Close()
+ return true, Untar(targetRoot, snap)
+}
+
+// Untar extracts data from the given reader (providing data in tar.gz format) and unpacks it in the given directory.
+func Untar(root string, r io.Reader) error {
+ var err error
+ gzr, err := gzip.NewReader(r)
+ if err != nil {
+ return err
+ }
+ defer gzr.Close()
+
+ tr := tar.NewReader(gzr)
+ for {
+ header, err := tr.Next()
+ if err == io.EOF {
+ // we are done
+ return nil
+ }
+
+ if err != nil {
+ // bail out
+ return err
+ }
+
+ if header == nil {
+ // TODO: how come?
+ continue
+ }
+
+ target := filepath.Join(root, header.Name)
+ mode := os.FileMode(header.Mode)
+
+ switch header.Typeflag {
+ case tar.TypeDir:
+ err = os.MkdirAll(target, mode)
+ if err != nil {
+ return err
+ }
+
+ case tar.TypeReg:
+ dst, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, mode)
+ if err != nil {
+ return err
+ }
+
+ _, err = io.Copy(dst, tr)
+ if err != nil {
+ return err
+ }
+
+ dst.Close()
+
+ case tar.TypeSymlink:
+ err = os.Symlink(header.Linkname, target)
+ if err != nil {
+ return err
+ }
+ }
+ }
+}
+
+func isEmptyDir(name string) bool {
+ entries, err := ioutil.ReadDir(name)
+ if err != nil {
+ return false
+ }
+ return len(entries) == 0
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go
new file mode 100644
index 00000000..4a269bb9
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology.go
@@ -0,0 +1,156 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/marshal"
+ "github.com/jaypipes/ghw/pkg/memory"
+ "github.com/jaypipes/ghw/pkg/option"
+)
+
+// Architecture describes the overall hardware architecture. It can be either
+// Symmetric Multi-Processor (SMP) or Non-Uniform Memory Access (NUMA)
+type Architecture int
+
+const (
+ // SMP is a Symmetric Multi-Processor system
+ ARCHITECTURE_SMP Architecture = iota
+ // NUMA is a Non-Uniform Memory Access system
+ ARCHITECTURE_NUMA
+)
+
+var (
+ architectureString = map[Architecture]string{
+ ARCHITECTURE_SMP: "SMP",
+ ARCHITECTURE_NUMA: "NUMA",
+ }
+
+ // NOTE(fromani): the keys are all lowercase and do not match
+ // the keys in the opposite table `architectureString`.
+ // This is done because of the choice we made in
+ // Architecture:MarshalJSON.
+ // We use this table only in UnmarshalJSON, so it should be OK.
+ stringArchitecture = map[string]Architecture{
+ "smp": ARCHITECTURE_SMP,
+ "numa": ARCHITECTURE_NUMA,
+ }
+)
+
+func (a Architecture) String() string {
+ return architectureString[a]
+}
+
+// NOTE(jaypipes): since serialized output is as "official" as we're going to
+// get, let's lowercase the string output when serializing, in order to
+// "normalize" the expected serialized output
+func (a Architecture) MarshalJSON() ([]byte, error) {
+ return []byte(strconv.Quote(strings.ToLower(a.String()))), nil
+}
+
+func (a *Architecture) UnmarshalJSON(b []byte) error {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ key := strings.ToLower(s)
+ val, ok := stringArchitecture[key]
+ if !ok {
+ return fmt.Errorf("unknown architecture: %q", key)
+ }
+ *a = val
+ return nil
+}
+
+// Node is an abstract construct representing a collection of processors and
+// various levels of memory cache that those processors share. In a NUMA
+// architecture, there are multiple NUMA nodes, abstracted here as multiple
+// Node structs. In an SMP architecture, a single Node will be available in the
+// Info struct and this single struct can be used to describe the levels of
+// memory caching available to the single physical processor package's physical
+// processor cores
+type Node struct {
+ ID int `json:"id"`
+ Cores []*cpu.ProcessorCore `json:"cores"`
+ Caches []*memory.Cache `json:"caches"`
+ Distances []int `json:"distances"`
+ Memory *memory.Area `json:"memory"`
+}
+
+func (n *Node) String() string {
+ return fmt.Sprintf(
+ "node #%d (%d cores)",
+ n.ID,
+ len(n.Cores),
+ )
+}
+
+// Info describes the system topology for the host hardware
+type Info struct {
+ ctx *context.Context
+ Architecture Architecture `json:"architecture"`
+ Nodes []*Node `json:"nodes"`
+}
+
+// New returns a pointer to an Info struct that contains information about the
+// NUMA topology on the host system
+func New(opts ...*option.Option) (*Info, error) {
+ merged := option.Merge(opts...)
+ ctx := context.New(merged)
+ info := &Info{ctx: ctx}
+ var err error
+ if context.Exists(merged) {
+ err = info.load()
+ } else {
+ err = ctx.Do(info.load)
+ }
+ if err != nil {
+ return nil, err
+ }
+ for _, node := range info.Nodes {
+ sort.Sort(memory.SortByCacheLevelTypeFirstProcessor(node.Caches))
+ }
+ return info, nil
+}
+
+func (i *Info) String() string {
+ archStr := "SMP"
+ if i.Architecture == ARCHITECTURE_NUMA {
+ archStr = "NUMA"
+ }
+ res := fmt.Sprintf(
+ "topology %s (%d nodes)",
+ archStr,
+ len(i.Nodes),
+ )
+ return res
+}
+
+// simple private struct used to encapsulate topology information in a
+// top-level "topology" YAML/JSON map/object key
+type topologyPrinter struct {
+ Info *Info `json:"topology"`
+}
+
+// YAMLString returns a string with the topology information formatted as YAML
+// under a top-level "topology:" key
+func (i *Info) YAMLString() string {
+ return marshal.SafeYAML(i.ctx, topologyPrinter{i})
+}
+
+// JSONString returns a string with the topology information formatted as JSON
+// under a top-level "topology:" key
+func (i *Info) JSONString(indent bool) string {
+ return marshal.SafeJSON(i.ctx, topologyPrinter{i}, indent)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go
new file mode 100644
index 00000000..6844dd96
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_linux.go
@@ -0,0 +1,107 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+ "github.com/jaypipes/ghw/pkg/cpu"
+ "github.com/jaypipes/ghw/pkg/linuxpath"
+ "github.com/jaypipes/ghw/pkg/memory"
+)
+
+func (i *Info) load() error {
+ i.Nodes = topologyNodes(i.ctx)
+ if len(i.Nodes) == 1 {
+ i.Architecture = ARCHITECTURE_SMP
+ } else {
+ i.Architecture = ARCHITECTURE_NUMA
+ }
+ return nil
+}
+
+func topologyNodes(ctx *context.Context) []*Node {
+ paths := linuxpath.New(ctx)
+ nodes := make([]*Node, 0)
+
+ files, err := ioutil.ReadDir(paths.SysDevicesSystemNode)
+ if err != nil {
+ ctx.Warn("failed to determine nodes: %s\n", err)
+ return nodes
+ }
+ for _, file := range files {
+ filename := file.Name()
+ if !strings.HasPrefix(filename, "node") {
+ continue
+ }
+ node := &Node{}
+ nodeID, err := strconv.Atoi(filename[4:])
+ if err != nil {
+ ctx.Warn("failed to determine node ID: %s\n", err)
+ return nodes
+ }
+ node.ID = nodeID
+ cores, err := cpu.CoresForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine cores for node: %s\n", err)
+ return nodes
+ }
+ node.Cores = cores
+ caches, err := memory.CachesForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine caches for node: %s\n", err)
+ return nodes
+ }
+ node.Caches = caches
+
+ distances, err := distancesForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine node distances for node: %s\n", err)
+ return nodes
+ }
+ node.Distances = distances
+
+ area, err := memory.AreaForNode(ctx, nodeID)
+ if err != nil {
+ ctx.Warn("failed to determine memory area for node: %s\n", err)
+ return nodes
+ }
+ node.Memory = area
+
+ nodes = append(nodes, node)
+ }
+ return nodes
+}
+
+func distancesForNode(ctx *context.Context, nodeID int) ([]int, error) {
+ paths := linuxpath.New(ctx)
+ path := filepath.Join(
+ paths.SysDevicesSystemNode,
+ fmt.Sprintf("node%d", nodeID),
+ "distance",
+ )
+
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ items := strings.Fields(strings.TrimSpace(string(data)))
+ dists := make([]int, len(items)) // TODO: can a NUMA cell be offlined?
+ for idx, item := range items {
+ dist, err := strconv.Atoi(item)
+ if err != nil {
+ return dists, err
+ }
+ dists[idx] = dist
+ }
+ return dists, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go
new file mode 100644
index 00000000..b5ee4354
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_stub.go
@@ -0,0 +1,19 @@
+//go:build !linux && !windows
+// +build !linux,!windows
+
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "runtime"
+
+ "github.com/pkg/errors"
+)
+
+func (i *Info) load() error {
+ return errors.New("topologyFillInfo not implemented on " + runtime.GOOS)
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go
new file mode 100644
index 00000000..3141ac99
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/topology/topology_windows.go
@@ -0,0 +1,156 @@
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package topology
+
+import (
+ "encoding/binary"
+ "fmt"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ rcFailure = 0
+ sizeofLogicalProcessorInfo = 32
+ errInsufficientBuffer syscall.Errno = 122
+
+ relationProcessorCore = 0
+ relationNUMANode = 1
+ relationCache = 2
+ relationProcessorPackage = 3
+ relationGroup = 4
+)
+
+func (i *Info) load() error {
+ nodes, err := topologyNodes()
+ if err != nil {
+ return err
+ }
+ i.Nodes = nodes
+ if len(nodes) == 1 {
+ i.Architecture = ARCHITECTURE_SMP
+ } else {
+ i.Architecture = ARCHITECTURE_NUMA
+ }
+ return nil
+}
+
+func topologyNodes() ([]*Node, error) {
+ nodes := make([]*Node, 0)
+ lpis, err := getWin32LogicalProcessorInfos()
+ if err != nil {
+ return nil, err
+ }
+ for _, lpi := range lpis {
+ switch lpi.relationship {
+ case relationNUMANode:
+ nodes = append(nodes, &Node{
+ ID: lpi.numaNodeID(),
+ })
+ case relationProcessorCore:
+ // TODO(jaypipes): associated LP to processor core
+ case relationProcessorPackage:
+ // ignore
+ case relationCache:
+ // TODO(jaypipes) handle cache layers
+ default:
+ return nil, fmt.Errorf("Unknown LOGICAL_PROCESSOR_RELATIONSHIP value: %d", lpi.relationship)
+
+ }
+ }
+ return nodes, nil
+}
+
+// This is the CACHE_DESCRIPTOR struct in the Win32 API
+type cacheDescriptor struct {
+ level uint8
+ associativity uint8
+ lineSize uint16
+ size uint32
+ cacheType uint32
+}
+
+// This is the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct in the Win32 API
+type logicalProcessorInfo struct {
+ processorMask uint64
+ relationship uint64
+ // The following dummyunion member is a representation of this part of
+ // the SYSTEM_LOGICAL_PROCESSOR_INFORMATION struct:
+ //
+ // union {
+ // struct {
+ // BYTE Flags;
+ // } ProcessorCore;
+ // struct {
+ // DWORD NodeNumber;
+ // } NumaNode;
+ // CACHE_DESCRIPTOR Cache;
+ // ULONGLONG Reserved[2];
+ // } DUMMYUNIONNAME;
+ dummyunion [16]byte
+}
+
+// numaNodeID returns the NUMA node's identifier from the logical processor
+// information struct by grabbing the integer representation of the struct's
+// NumaNode unioned data element
+func (lpi *logicalProcessorInfo) numaNodeID() int {
+ if lpi.relationship != relationNUMANode {
+ return -1
+ }
+ return int(binary.LittleEndian.Uint16(lpi.dummyunion[0:]))
+}
+
+// ref: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getlogicalprocessorinformation
+func getWin32LogicalProcessorInfos() (
+ []*logicalProcessorInfo,
+ error,
+) {
+ lpis := make([]*logicalProcessorInfo, 0)
+ win32api := syscall.NewLazyDLL("kernel32.dll")
+ glpi := win32api.NewProc("GetLogicalProcessorInformation")
+
+ // The way the GetLogicalProcessorInformation (GLPI) Win32 API call
+ // works is wonky, but consistent with the Win32 API calling structure.
+ // Basically, you need to first call the GLPI API with a NUL pointerr
+ // and a pointer to an integer. That first call to the API should
+ // return ERROR_INSUFFICIENT_BUFFER, which is the indication that the
+ // supplied buffer pointer is NUL and needs to have memory allocated to
+ // it of an amount equal to the value of the integer pointer argument.
+ // Once the buffer is allocated this amount of space, the GLPI API call
+ // is again called. This time, the return value should be 0 and the
+ // buffer will have been set to an array of
+ // SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs.
+ toAllocate := uint32(0)
+ // first, figure out how much we need
+ rc, _, win32err := glpi.Call(uintptr(0), uintptr(unsafe.Pointer(&toAllocate)))
+ if rc == rcFailure {
+ if win32err != errInsufficientBuffer {
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call failed to return ERROR_INSUFFICIENT_BUFFER")
+ }
+ } else {
+ // This shouldn't happen because buffer hasn't yet been allocated...
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API initial call returned success instead of failure with ERROR_INSUFFICIENT_BUFFER")
+ }
+
+ // OK, now we actually allocate a raw buffer to fill with some number
+ // of SYSTEM_LOGICAL_PROCESSOR_INFORMATION structs
+ b := make([]byte, toAllocate)
+ rc, _, win32err = glpi.Call(uintptr(unsafe.Pointer(&b[0])), uintptr(unsafe.Pointer(&toAllocate)))
+ if rc == rcFailure {
+ return nil, fmt.Errorf("GetLogicalProcessorInformation Win32 API call failed to set supplied buffer. Win32 system error: %s", win32err)
+ }
+
+ for x := uint32(0); x < toAllocate; x += sizeofLogicalProcessorInfo {
+ lpiraw := b[x : x+sizeofLogicalProcessorInfo]
+ lpi := &logicalProcessorInfo{
+ processorMask: binary.LittleEndian.Uint64(lpiraw[0:]),
+ relationship: binary.LittleEndian.Uint64(lpiraw[8:]),
+ }
+ copy(lpi.dummyunion[0:16], lpiraw[16:32])
+ lpis = append(lpis, lpi)
+ }
+ return lpis, nil
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go
new file mode 100644
index 00000000..13fa7b5b
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/unitutil/unit.go
@@ -0,0 +1,37 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package unitutil
+
+var (
+ KB int64 = 1024
+ MB = KB * 1024
+ GB = MB * 1024
+ TB = GB * 1024
+ PB = TB * 1024
+ EB = PB * 1024
+)
+
+// AmountString returns a string representation of the amount with an amount
+// suffix corresponding to the nearest kibibit.
+//
+// For example, AmountString(1022) == "1022). AmountString(1024) == "1KB", etc
+func AmountString(size int64) (int64, string) {
+ switch {
+ case size < MB:
+ return KB, "KB"
+ case size < GB:
+ return MB, "MB"
+ case size < TB:
+ return GB, "GB"
+ case size < PB:
+ return TB, "TB"
+ case size < EB:
+ return PB, "PB"
+ default:
+ return EB, "EB"
+ }
+}
diff --git a/vendor/github.com/jaypipes/ghw/pkg/util/util.go b/vendor/github.com/jaypipes/ghw/pkg/util/util.go
new file mode 100644
index 00000000..b72430e2
--- /dev/null
+++ b/vendor/github.com/jaypipes/ghw/pkg/util/util.go
@@ -0,0 +1,59 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package util
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "strconv"
+ "strings"
+
+ "github.com/jaypipes/ghw/pkg/context"
+)
+
+const (
+ UNKNOWN = "unknown"
+)
+
+type closer interface {
+ Close() error
+}
+
+func SafeClose(c closer) {
+ err := c.Close()
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "failed to close: %s", err)
+ }
+}
+
+// Reads a supplied filepath and converts the contents to an integer. Returns
+// -1 if there were file permissions or existence errors or if the contents
+// could not be successfully converted to an integer. In any error, a warning
+// message is printed to STDERR and -1 is returned.
+func SafeIntFromFile(ctx *context.Context, path string) int {
+ msg := "failed to read int from file: %s\n"
+ buf, err := ioutil.ReadFile(path)
+ if err != nil {
+ ctx.Warn(msg, err)
+ return -1
+ }
+ contents := strings.TrimSpace(string(buf))
+ res, err := strconv.Atoi(contents)
+ if err != nil {
+ ctx.Warn(msg, err)
+ return -1
+ }
+ return res
+}
+
+// ConcatStrings concatenate strings in a larger one. This function
+// addresses a very specific ghw use case. For a more general approach,
+// just use strings.Join()
+func ConcatStrings(items ...string) string {
+ return strings.Join(items, "")
+}
diff --git a/vendor/github.com/jaypipes/pcidb/.gitignore b/vendor/github.com/jaypipes/pcidb/.gitignore
new file mode 100644
index 00000000..cc292d34
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/.gitignore
@@ -0,0 +1,2 @@
+vendor/
+coverage*.*
diff --git a/vendor/github.com/jaypipes/pcidb/COPYING b/vendor/github.com/jaypipes/pcidb/COPYING
new file mode 100644
index 00000000..68c771a0
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/COPYING
@@ -0,0 +1,176 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
diff --git a/vendor/github.com/jaypipes/pcidb/LICENSE b/vendor/github.com/jaypipes/pcidb/LICENSE
new file mode 100644
index 00000000..261eeb9e
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/jaypipes/pcidb/Makefile b/vendor/github.com/jaypipes/pcidb/Makefile
new file mode 100644
index 00000000..73a274c7
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/Makefile
@@ -0,0 +1,38 @@
+VENDOR := vendor
+PKGS := $(shell go list ./... | grep -v /$(VENDOR)/)
+SRC = $(shell find . -type f -name '*.go' -not -path "*/$(VENDOR)/*")
+BIN_DIR := $(GOPATH)/bin
+GOMETALINTER := $(BIN_DIR)/gometalinter
+
+.PHONY: test
+test: vet
+ go test $(PKGS)
+
+$(GOMETALINTER):
+ go get -u github.com/alecthomas/gometalinter
+ $(GOMETALINTER) --install &> /dev/null
+
+.PHONY: lint
+lint: $(GOMETALINTER)
+ $(GOMETALINTER) ./... --vendor
+
+.PHONY: fmt
+fmt:
+ @gofmt -s -l -w $(SRC)
+
+.PHONY: fmtcheck
+fmtcheck:
+ @bash -c "diff -u <(echo -n) <(gofmt -d $(SRC))"
+
+.PHONY: vet
+vet:
+ go vet $(PKGS)
+
+.PHONY: cover
+cover:
+ $(shell [ -e coverage.out ] && rm coverage.out)
+ @echo "mode: count" > coverage-all.out
+ @$(foreach pkg,$(PKGS),\
+ go test -coverprofile=coverage.out -covermode=count $(pkg);\
+ tail -n +2 coverage.out >> coverage-all.out;)
+ go tool cover -html=coverage-all.out -o=coverage-all.html
diff --git a/vendor/github.com/jaypipes/pcidb/README.md b/vendor/github.com/jaypipes/pcidb/README.md
new file mode 100644
index 00000000..ddfcde6b
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/README.md
@@ -0,0 +1,417 @@
+# `pcidb` - the Golang PCI DB library
+
+[![Build Status](https://github.com/jaypipes/pcidb/actions/workflows/go.yml/badge.svg?branch=main)](https://github.com/jaypipes/pcidb/actions)
+[![Go Report Card](https://goreportcard.com/badge/github.com/jaypipes/pcidb)](https://goreportcard.com/report/github.com/jaypipes/pcidb)
+[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-2.1-4baaaa.svg)](CODE_OF_CONDUCT.md)
+
+`pcidb` is a small Golang library for programmatic querying of PCI vendor,
+product and class information.
+
+We currently test `pcidb` on Linux, Windows and MacOSX.
+
+## Usage
+
+`pcidb` contains a PCI database inspection and querying facility that allows
+developers to query for information about hardware device classes, vendor and
+product information.
+
+The `pcidb.New()` function returns a `pcidb.PCIDB` struct or an error if the
+PCI database could not be loaded.
+
+> `pcidb`'s default behaviour is to first search for pci-ids DB files on the
+> local host system in well-known filesystem paths. If `pcidb` cannot find a
+> pci-ids DB file on the local host system, you can configure `pcidb` to fetch
+> a current pci-ids DB file from the network. You can enable this
+> network-fetching behaviour with the `pcidb.WithEnableNetworkFetch()` function
+> or set the `PCIDB_ENABLE_NETWORK_FETCH` to a non-0 value.
+
+The `pcidb.PCIDB` struct contains a number of fields that may be queried for
+PCI information:
+
+* `pcidb.PCIDB.Classes` is a map, keyed by the PCI class ID (a hex-encoded
+ string) of pointers to `pcidb.Class` structs, one for each class of PCI
+ device known to `pcidb`
+* `pcidb.PCIDB.Vendors` is a map, keyed by the PCI vendor ID (a hex-encoded
+ string) of pointers to `pcidb.Vendor` structs, one for each PCI vendor
+ known to `pcidb`
+* `pcidb.PCIDB.Products` is a map, keyed by the PCI product ID* (a hex-encoded
+ string) of pointers to `pcidb.Product` structs, one for each PCI product
+ known to `pcidb`
+
+**NOTE**: PCI products are often referred to by their "device ID". We use
+the term "product ID" in `pcidb` because it more accurately reflects what the
+identifier is for: a specific product line produced by the vendor.
+
+### Overriding the root mountpoint `pcidb` uses
+
+The default root mountpoint that `pcidb` uses when looking for information
+about the host system is `/`. So, for example, when looking up known PCI IDS DB
+files on Linux, `pcidb` will attempt to discover a pciids DB file at
+`/usr/share/misc/pci.ids`. If you are calling `pcidb` from a system that has an
+alternate root mountpoint, you can either set the `PCIDB_CHROOT` environment
+variable to that alternate path, or call the `pcidb.New()` function with the
+`pcidb.WithChroot()` modifier.
+
+For example, if you are executing from within an application container that has
+bind-mounted the root host filesystem to the mount point `/host`, you would set
+`PCIDB_CHROOT` to `/host` so that pcidb can find files like
+`/usr/share/misc/pci.ids` at `/host/usr/share/misc/pci.ids`.
+
+Alternately, you can use the `pcidb.WithChroot()` function like so:
+
+```go
+pci := pcidb.New(pcidb.WithChroot("/host"))
+```
+
+### PCI device classes
+
+Let's take a look at the PCI device class information and how to query the PCI
+database for class, subclass, and programming interface information.
+
+Each `pcidb.Class` struct contains the following fields:
+
+* `pcidb.Class.ID` is the hex-encoded string identifier for the device
+ class
+* `pcidb.Class.Name` is the common name/description of the class
+* `pcidb.Class.Subclasses` is an array of pointers to
+ `pcidb.Subclass` structs, one for each subclass in the device class
+
+Each `pcidb.Subclass` struct contains the following fields:
+
+* `pcidb.Subclass.ID` is the hex-encoded string identifier for the device
+ subclass
+* `pcidb.Subclass.Name` is the common name/description of the subclass
+* `pcidb.Subclass.ProgrammingInterfaces` is an array of pointers to
+ `pcidb.ProgrammingInterface` structs, one for each programming interface
+ for the device subclass
+
+Each `pcidb.ProgrammingInterface` struct contains the following fields:
+
+* `pcidb.ProgrammingInterface.ID` is the hex-encoded string identifier for
+ the programming interface
+* `pcidb.ProgrammingInterface.Name` is the common name/description for the
+ programming interface
+
+```go
+package main
+
+import (
+ "fmt"
+
+ "github.com/jaypipes/pcidb"
+)
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ for _, devClass := range pci.Classes {
+ fmt.Printf(" Device class: %v ('%v')\n", devClass.Name, devClass.ID)
+ for _, devSubclass := range devClass.Subclasses {
+ fmt.Printf(" Device subclass: %v ('%v')\n", devSubclass.Name, devSubclass.ID)
+ for _, progIface := range devSubclass.ProgrammingInterfaces {
+ fmt.Printf(" Programming interface: %v ('%v')\n", progIface.Name, progIface.ID)
+ }
+ }
+ }
+}
+```
+
+Example output from my personal workstation, snipped for brevity:
+
+```
+...
+ Device class: Serial bus controller ('0c')
+ Device subclass: FireWire (IEEE 1394) ('00')
+ Programming interface: Generic ('00')
+ Programming interface: OHCI ('10')
+ Device subclass: ACCESS Bus ('01')
+ Device subclass: SSA ('02')
+ Device subclass: USB controller ('03')
+ Programming interface: UHCI ('00')
+ Programming interface: OHCI ('10')
+ Programming interface: EHCI ('20')
+ Programming interface: XHCI ('30')
+ Programming interface: Unspecified ('80')
+ Programming interface: USB Device ('fe')
+ Device subclass: Fibre Channel ('04')
+ Device subclass: SMBus ('05')
+ Device subclass: InfiniBand ('06')
+ Device subclass: IPMI SMIC interface ('07')
+ Device subclass: SERCOS interface ('08')
+ Device subclass: CANBUS ('09')
+...
+```
+
+### PCI vendors and products
+
+Let's take a look at the PCI vendor information and how to query the PCI
+database for vendor information and the products a vendor supplies.
+
+Each `pcidb.Vendor` struct contains the following fields:
+
+* `pcidb.Vendor.ID` is the hex-encoded string identifier for the vendor
+* `pcidb.Vendor.Name` is the common name/description of the vendor
+* `pcidb.Vendor.Products` is an array of pointers to `pcidb.Product`
+ structs, one for each product supplied by the vendor
+
+Each `pcidb.Product` struct contains the following fields:
+
+* `pcidb.Product.VendorID` is the hex-encoded string identifier for the
+ product's vendor
+* `pcidb.Product.ID` is the hex-encoded string identifier for the product
+* `pcidb.Product.Name` is the common name/description of the subclass
+* `pcidb.Product.Subsystems` is an array of pointers to
+ `pcidb.Product` structs, one for each "subsystem" (sometimes called
+ "sub-device" in PCI literature) for the product
+
+**NOTE**: A subsystem product may have a different vendor than its "parent" PCI
+product. This is sometimes referred to as the "sub-vendor".
+
+Here's some example code that demonstrates listing the PCI vendors with the
+most known products:
+
+```go
+package main
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/jaypipes/pcidb"
+)
+
+type ByCountProducts []*pcidb.Vendor
+
+func (v ByCountProducts) Len() int {
+ return len(v)
+}
+
+func (v ByCountProducts) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+
+func (v ByCountProducts) Less(i, j int) bool {
+ return len(v[i].Products) > len(v[j].Products)
+}
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ vendors := make([]*pcidb.Vendor, len(pci.Vendors))
+ x := 0
+ for _, vendor := range pci.Vendors {
+ vendors[x] = vendor
+ x++
+ }
+
+ sort.Sort(ByCountProducts(vendors))
+
+ fmt.Println("Top 5 vendors by product")
+ fmt.Println("====================================================")
+ for _, vendor := range vendors[0:5] {
+ fmt.Printf("%v ('%v') has %d products\n", vendor.Name, vendor.ID, len(vendor.Products))
+ }
+}
+```
+
+which yields (on my local workstation as of July 7th, 2018):
+
+```
+Top 5 vendors by product
+====================================================
+Intel Corporation ('8086') has 3389 products
+NVIDIA Corporation ('10de') has 1358 products
+Advanced Micro Devices, Inc. [AMD/ATI] ('1002') has 886 products
+National Instruments ('1093') has 601 products
+Chelsio Communications Inc ('1425') has 525 products
+```
+
+The following is an example of querying the PCI product and subsystem
+information to find the products which have the most number of subsystems that
+have a different vendor than the top-level product. In other words, the two
+products which have been re-sold or re-manufactured with the most number of
+different companies.
+
+```go
+package main
+
+import (
+ "fmt"
+ "sort"
+
+ "github.com/jaypipes/pcidb"
+)
+
+type ByCountSeparateSubvendors []*pcidb.Product
+
+func (v ByCountSeparateSubvendors) Len() int {
+ return len(v)
+}
+
+func (v ByCountSeparateSubvendors) Swap(i, j int) {
+ v[i], v[j] = v[j], v[i]
+}
+
+func (v ByCountSeparateSubvendors) Less(i, j int) bool {
+ iVendor := v[i].VendorID
+ iSetSubvendors := make(map[string]bool, 0)
+ iNumDiffSubvendors := 0
+ jVendor := v[j].VendorID
+ jSetSubvendors := make(map[string]bool, 0)
+ jNumDiffSubvendors := 0
+
+ for _, sub := range v[i].Subsystems {
+ if sub.VendorID != iVendor {
+ iSetSubvendors[sub.VendorID] = true
+ }
+ }
+ iNumDiffSubvendors = len(iSetSubvendors)
+
+ for _, sub := range v[j].Subsystems {
+ if sub.VendorID != jVendor {
+ jSetSubvendors[sub.VendorID] = true
+ }
+ }
+ jNumDiffSubvendors = len(jSetSubvendors)
+
+ return iNumDiffSubvendors > jNumDiffSubvendors
+}
+
+func main() {
+ pci, err := pcidb.New()
+ if err != nil {
+ fmt.Printf("Error getting PCI info: %v", err)
+ }
+
+ products := make([]*pcidb.Product, len(pci.Products))
+ x := 0
+ for _, product := range pci.Products {
+ products[x] = product
+ x++
+ }
+
+ sort.Sort(ByCountSeparateSubvendors(products))
+
+ fmt.Println("Top 2 products by # different subvendors")
+ fmt.Println("====================================================")
+ for _, product := range products[0:2] {
+ vendorID := product.VendorID
+ vendor := pci.Vendors[vendorID]
+ setSubvendors := make(map[string]bool, 0)
+
+ for _, sub := range product.Subsystems {
+ if sub.VendorID != vendorID {
+ setSubvendors[sub.VendorID] = true
+ }
+ }
+ fmt.Printf("%v ('%v') from %v\n", product.Name, product.ID, vendor.Name)
+ fmt.Printf(" -> %d subsystems under the following different vendors:\n", len(setSubvendors))
+ for subvendorID, _ := range setSubvendors {
+ subvendor, exists := pci.Vendors[subvendorID]
+ subvendorName := "Unknown subvendor"
+ if exists {
+ subvendorName = subvendor.Name
+ }
+ fmt.Printf(" - %v ('%v')\n", subvendorName, subvendorID)
+ }
+ }
+}
+```
+
+which yields (on my local workstation as of July 7th, 2018):
+
+```
+Top 2 products by # different subvendors
+====================================================
+RTL-8100/8101L/8139 PCI Fast Ethernet Adapter ('8139') from Realtek Semiconductor Co., Ltd.
+ -> 34 subsystems under the following different vendors:
+ - OVISLINK Corp. ('149c')
+ - EPoX Computer Co., Ltd. ('1695')
+ - Red Hat, Inc ('1af4')
+ - Mitac ('1071')
+ - Netgear ('1385')
+ - Micro-Star International Co., Ltd. [MSI] ('1462')
+ - Hangzhou Silan Microelectronics Co., Ltd. ('1904')
+ - Compex ('11f6')
+ - Edimax Computer Co. ('1432')
+ - KYE Systems Corporation ('1489')
+ - ZyXEL Communications Corporation ('187e')
+ - Acer Incorporated [ALI] ('1025')
+ - Matsushita Electric Industrial Co., Ltd. ('10f7')
+ - Ruby Tech Corp. ('146c')
+ - Belkin ('1799')
+ - Allied Telesis ('1259')
+ - Unex Technology Corp. ('1429')
+ - CIS Technology Inc ('1436')
+ - D-Link System Inc ('1186')
+ - Ambicom Inc ('1395')
+ - AOPEN Inc. ('a0a0')
+ - TTTech Computertechnik AG (Wrong ID) ('0357')
+ - Gigabyte Technology Co., Ltd ('1458')
+ - Packard Bell B.V. ('1631')
+ - Billionton Systems Inc ('14cb')
+ - Kingston Technologies ('2646')
+ - Accton Technology Corporation ('1113')
+ - Samsung Electronics Co Ltd ('144d')
+ - Biostar Microtech Int'l Corp ('1565')
+ - U.S. Robotics ('16ec')
+ - KTI ('8e2e')
+ - Hewlett-Packard Company ('103c')
+ - ASUSTeK Computer Inc. ('1043')
+ - Surecom Technology ('10bd')
+Bt878 Video Capture ('036e') from Brooktree Corporation
+ -> 30 subsystems under the following different vendors:
+ - iTuner ('aa00')
+ - Nebula Electronics Ltd. ('0071')
+ - DViCO Corporation ('18ac')
+ - iTuner ('aa05')
+ - iTuner ('aa0d')
+ - LeadTek Research Inc. ('107d')
+ - Avermedia Technologies Inc ('1461')
+ - Chaintech Computer Co. Ltd ('270f')
+ - iTuner ('aa07')
+ - iTuner ('aa0a')
+ - Microtune, Inc. ('1851')
+ - iTuner ('aa01')
+ - iTuner ('aa04')
+ - iTuner ('aa06')
+ - iTuner ('aa0f')
+ - iTuner ('aa02')
+ - iTuner ('aa0b')
+ - Pinnacle Systems, Inc. (Wrong ID) ('bd11')
+ - Rockwell International ('127a')
+ - Askey Computer Corp. ('144f')
+ - Twinhan Technology Co. Ltd ('1822')
+ - Anritsu Corp. ('1852')
+ - iTuner ('aa08')
+ - Hauppauge computer works Inc. ('0070')
+ - Pinnacle Systems Inc. ('11bd')
+ - Conexant Systems, Inc. ('14f1')
+ - iTuner ('aa09')
+ - iTuner ('aa03')
+ - iTuner ('aa0c')
+ - iTuner ('aa0e')
+```
+
+## Developers
+
+Contributions to `pcidb` are welcomed! Fork the repo on GitHub and submit a pull
+request with your proposed changes. Or, feel free to log an issue for a feature
+request or bug report.
+
+### Running tests
+
+You can run unit tests easily using the `make test` command, like so:
+
+
+```
+[jaypipes@uberbox pcidb]$ make test
+go test github.com/jaypipes/pcidb
+ok github.com/jaypipes/pcidb 0.045s
+```
diff --git a/vendor/github.com/jaypipes/pcidb/context.go b/vendor/github.com/jaypipes/pcidb/context.go
new file mode 100644
index 00000000..da345996
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/context.go
@@ -0,0 +1,86 @@
+package pcidb
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "runtime"
+
+ homedir "github.com/mitchellh/go-homedir"
+)
+
+// Concrete merged set of configuration switches that get passed to pcidb
+// internal functions
+type context struct {
+ chroot string
+ cacheOnly bool
+ cachePath string
+ path string
+ enableNetworkFetch bool
+ searchPaths []string
+}
+
+func contextFromOptions(merged *WithOption) *context {
+ ctx := &context{
+ chroot: *merged.Chroot,
+ cacheOnly: *merged.CacheOnly,
+ cachePath: getCachePath(),
+ enableNetworkFetch: *merged.EnableNetworkFetch,
+ path: *merged.Path,
+ searchPaths: make([]string, 0),
+ }
+ ctx.setSearchPaths()
+ return ctx
+}
+
+func getCachePath() string {
+ hdir, err := homedir.Dir()
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed getting homedir.Dir(): %v", err)
+ return ""
+ }
+ fp, err := homedir.Expand(filepath.Join(hdir, ".cache", "pci.ids"))
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed expanding local cache path: %v", err)
+ return ""
+ }
+ return fp
+}
+
+// Depending on the operating system, sets the context's searchPaths to a set
+// of local filepaths to search for a pci.ids database file
+func (ctx *context) setSearchPaths() {
+ // Look in direct path first, if set
+ if ctx.path != "" {
+ ctx.searchPaths = append(ctx.searchPaths, ctx.path)
+ return
+ }
+ // A set of filepaths we will first try to search for the pci-ids DB file
+ // on the local machine. If we fail to find one, we'll try pulling the
+ // latest pci-ids file from the network
+ ctx.searchPaths = append(ctx.searchPaths, ctx.cachePath)
+ if ctx.cacheOnly {
+ return
+ }
+
+ rootPath := ctx.chroot
+
+ if runtime.GOOS != "windows" {
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "misc", "pci.ids"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "hwdata", "pci.ids.gz"),
+ )
+ ctx.searchPaths = append(
+ ctx.searchPaths,
+ filepath.Join(rootPath, "usr", "share", "misc", "pci.ids.gz"),
+ )
+ }
+}
diff --git a/vendor/github.com/jaypipes/pcidb/discover.go b/vendor/github.com/jaypipes/pcidb/discover.go
new file mode 100644
index 00000000..b0452d7d
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/discover.go
@@ -0,0 +1,111 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "bufio"
+ "compress/gzip"
+ "io"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+const (
+ PCIIDS_URI = "https://pci-ids.ucw.cz/v2.2/pci.ids.gz"
+ USER_AGENT = "golang-jaypipes-pcidb"
+)
+
+func (db *PCIDB) load(ctx *context) error {
+ var foundPath string
+ for _, fp := range ctx.searchPaths {
+ if _, err := os.Stat(fp); err == nil {
+ foundPath = fp
+ break
+ }
+ }
+ if foundPath == "" {
+ if !ctx.enableNetworkFetch {
+ return ERR_NO_DB
+ }
+ // OK, so we didn't find any host-local copy of the pci-ids DB file. Let's
+ // try fetching it from the network and storing it
+ if err := cacheDBFile(ctx.cachePath); err != nil {
+ return err
+ }
+ foundPath = ctx.cachePath
+ }
+ f, err := os.Open(foundPath)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ var scanner *bufio.Scanner
+ if strings.HasSuffix(foundPath, ".gz") {
+ var zipReader *gzip.Reader
+ if zipReader, err = gzip.NewReader(f); err != nil {
+ return err
+ }
+ defer zipReader.Close()
+ scanner = bufio.NewScanner(zipReader)
+ } else {
+ scanner = bufio.NewScanner(f)
+ }
+
+ return parseDBFile(db, scanner)
+}
+
+func ensureDir(fp string) error {
+ fpDir := filepath.Dir(fp)
+ if _, err := os.Stat(fpDir); os.IsNotExist(err) {
+ err = os.MkdirAll(fpDir, os.ModePerm)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// Pulls down the latest copy of the pci-ids file from the network and stores
+// it in the local host filesystem
+func cacheDBFile(cacheFilePath string) error {
+ ensureDir(cacheFilePath)
+
+ client := new(http.Client)
+ request, err := http.NewRequest("GET", PCIIDS_URI, nil)
+ if err != nil {
+ return err
+ }
+ request.Header.Set("User-Agent", USER_AGENT)
+ response, err := client.Do(request)
+ if err != nil {
+ return err
+ }
+ defer response.Body.Close()
+ f, err := os.Create(cacheFilePath)
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if err != nil {
+ os.Remove(cacheFilePath)
+ }
+ }()
+ defer f.Close()
+ // write the gunzipped contents to our local cache file
+ zr, err := gzip.NewReader(response.Body)
+ if err != nil {
+ return err
+ }
+ defer zr.Close()
+ if _, err = io.Copy(f, zr); err != nil {
+ return err
+ }
+ return err
+}
diff --git a/vendor/github.com/jaypipes/pcidb/main.go b/vendor/github.com/jaypipes/pcidb/main.go
new file mode 100644
index 00000000..d518748e
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/main.go
@@ -0,0 +1,196 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+var (
+ ERR_NO_DB = fmt.Errorf("No pci-ids DB files found (and network fetch disabled)")
+ trueVar = true
+)
+
+// ProgrammingInterface is the PCI programming interface for a class of PCI
+// devices
+type ProgrammingInterface struct {
+ // hex-encoded PCI_ID of the programming interface
+ ID string `json:"id"`
+ // common string name for the programming interface
+ Name string `json:"name"`
+}
+
+// Subclass is a subdivision of a PCI class
+type Subclass struct {
+ // hex-encoded PCI_ID for the device subclass
+ ID string `json:"id"`
+ // common string name for the subclass
+ Name string `json:"name"`
+ // any programming interfaces this subclass might have
+ ProgrammingInterfaces []*ProgrammingInterface `json:"programming_interfaces"`
+}
+
+// Class is the PCI class
+type Class struct {
+ // hex-encoded PCI_ID for the device class
+ ID string `json:"id"`
+ // common string name for the class
+ Name string `json:"name"`
+ // any subclasses belonging to this class
+ Subclasses []*Subclass `json:"subclasses"`
+}
+
+// Product provides information about a PCI device model
+// NOTE(jaypipes): In the hardware world, the PCI "device_id" is the identifier
+// for the product/model
+type Product struct {
+ // vendor ID for the product
+ VendorID string `json:"vendor_id"`
+ // hex-encoded PCI_ID for the product/model
+ ID string `json:"id"`
+ // common string name of the vendor
+ Name string `json:"name"`
+ // "subdevices" or "subsystems" for the product
+ Subsystems []*Product `json:"subsystems"`
+}
+
+// Vendor provides information about a device vendor
+type Vendor struct {
+ // hex-encoded PCI_ID for the vendor
+ ID string `json:"id"`
+ // common string name of the vendor
+ Name string `json:"name"`
+ // all top-level devices for the vendor
+ Products []*Product `json:"products"`
+}
+
+type PCIDB struct {
+ // hash of class ID -> class information
+ Classes map[string]*Class `json:"classes"`
+ // hash of vendor ID -> vendor information
+ Vendors map[string]*Vendor `json:"vendors"`
+ // hash of vendor ID + product/device ID -> product information
+ Products map[string]*Product `json:"products"`
+}
+
+// WithOption is used to represent optionally-configured settings
+type WithOption struct {
+ // Chroot is the directory that pcidb uses when attempting to discover
+ // pciids DB files
+ Chroot *string
+ // CacheOnly is mostly just useful for testing. It essentially disables
+ // looking for any non ~/.cache/pci.ids filepaths (which is useful when we
+ // want to test the fetch-from-network code paths
+ CacheOnly *bool
+ // Enables fetching a pci-ids from a known location on the network if no
+ // local pci-ids DB files can be found.
+ EnableNetworkFetch *bool
+ // Path points to the absolute path of a pci.ids file in a non-standard
+ // location.
+ Path *string
+}
+
+func WithChroot(dir string) *WithOption {
+ return &WithOption{Chroot: &dir}
+}
+
+func WithCacheOnly() *WithOption {
+ return &WithOption{CacheOnly: &trueVar}
+}
+
+func WithDirectPath(path string) *WithOption {
+ return &WithOption{Path: &path}
+}
+
+func WithEnableNetworkFetch() *WithOption {
+ return &WithOption{EnableNetworkFetch: &trueVar}
+}
+
+func mergeOptions(opts ...*WithOption) *WithOption {
+ // Grab options from the environs by default
+ defaultChroot := "/"
+ if val, exists := os.LookupEnv("PCIDB_CHROOT"); exists {
+ defaultChroot = val
+ }
+ path := ""
+ if val, exists := os.LookupEnv("PCIDB_PATH"); exists {
+ path = val
+ }
+ defaultCacheOnly := false
+ if val, exists := os.LookupEnv("PCIDB_CACHE_ONLY"); exists {
+ if parsed, err := strconv.ParseBool(val); err != nil {
+ fmt.Fprintf(
+ os.Stderr,
+ "Failed parsing a bool from PCIDB_CACHE_ONLY "+
+ "environ value of %s",
+ val,
+ )
+ } else if parsed {
+ defaultCacheOnly = parsed
+ }
+ }
+ defaultEnableNetworkFetch := false
+ if val, exists := os.LookupEnv("PCIDB_ENABLE_NETWORK_FETCH"); exists {
+ if parsed, err := strconv.ParseBool(val); err != nil {
+ fmt.Fprintf(
+ os.Stderr,
+ "Failed parsing a bool from PCIDB_ENABLE_NETWORK_FETCH "+
+ "environ value of %s",
+ val,
+ )
+ } else if parsed {
+ defaultEnableNetworkFetch = parsed
+ }
+ }
+
+ merged := &WithOption{}
+ for _, opt := range opts {
+ if opt.Chroot != nil {
+ merged.Chroot = opt.Chroot
+ }
+ if opt.CacheOnly != nil {
+ merged.CacheOnly = opt.CacheOnly
+ }
+ if opt.EnableNetworkFetch != nil {
+ merged.EnableNetworkFetch = opt.EnableNetworkFetch
+ }
+ if opt.Path != nil {
+ merged.Path = opt.Path
+ }
+ }
+ // Set the default value if missing from merged
+ if merged.Chroot == nil {
+ merged.Chroot = &defaultChroot
+ }
+ if merged.CacheOnly == nil {
+ merged.CacheOnly = &defaultCacheOnly
+ }
+ if merged.EnableNetworkFetch == nil {
+ merged.EnableNetworkFetch = &defaultEnableNetworkFetch
+ }
+ if merged.Path == nil {
+ merged.Path = &path
+ }
+ return merged
+}
+
+// New returns a pointer to a PCIDB struct which contains information you can
+// use to query PCI vendor, product and class information. It accepts zero or
+// more pointers to WithOption structs. If you want to modify the behaviour of
+// pcidb, use one of the option modifiers when calling New. For example, to
+// change the root directory that pcidb uses when discovering pciids DB files,
+// call New(WithChroot("/my/root/override"))
+func New(opts ...*WithOption) (*PCIDB, error) {
+ ctx := contextFromOptions(mergeOptions(opts...))
+ db := &PCIDB{}
+ if err := db.load(ctx); err != nil {
+ return nil, err
+ }
+ return db, nil
+}
diff --git a/vendor/github.com/jaypipes/pcidb/parse.go b/vendor/github.com/jaypipes/pcidb/parse.go
new file mode 100644
index 00000000..0fee5fe5
--- /dev/null
+++ b/vendor/github.com/jaypipes/pcidb/parse.go
@@ -0,0 +1,163 @@
+//
+// Use and distribution licensed under the Apache license version 2.
+//
+// See the COPYING file in the root project directory for full text.
+//
+
+package pcidb
+
+import (
+ "bufio"
+ "strings"
+)
+
+func parseDBFile(db *PCIDB, scanner *bufio.Scanner) error {
+ inClassBlock := false
+ db.Classes = make(map[string]*Class, 20)
+ db.Vendors = make(map[string]*Vendor, 200)
+ db.Products = make(map[string]*Product, 1000)
+ subclasses := make([]*Subclass, 0)
+ progIfaces := make([]*ProgrammingInterface, 0)
+ var curClass *Class
+ var curSubclass *Subclass
+ var curProgIface *ProgrammingInterface
+ vendorProducts := make([]*Product, 0)
+ var curVendor *Vendor
+ var curProduct *Product
+ var curSubsystem *Product
+ productSubsystems := make([]*Product, 0)
+ for scanner.Scan() {
+ line := scanner.Text()
+ // skip comments and blank lines
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+ lineBytes := []rune(line)
+
+ // Lines starting with an uppercase "C" indicate a PCI top-level class
+ // dbrmation block. These lines look like this:
+ //
+ // C 02 Network controller
+ if lineBytes[0] == 'C' {
+ if curClass != nil {
+ // finalize existing class because we found a new class block
+ curClass.Subclasses = subclasses
+ subclasses = make([]*Subclass, 0)
+ }
+ inClassBlock = true
+ classID := string(lineBytes[2:4])
+ className := string(lineBytes[6:])
+ curClass = &Class{
+ ID: classID,
+ Name: className,
+ Subclasses: subclasses,
+ }
+ db.Classes[curClass.ID] = curClass
+ continue
+ }
+
+ // Lines not beginning with an uppercase "C" or a TAB character
+ // indicate a top-level vendor dbrmation block. These lines look like
+ // this:
+ //
+ // 0a89 BREA Technologies Inc
+ if lineBytes[0] != '\t' {
+ if curVendor != nil {
+ // finalize existing vendor because we found a new vendor block
+ curVendor.Products = vendorProducts
+ vendorProducts = make([]*Product, 0)
+ }
+ inClassBlock = false
+ vendorID := string(lineBytes[0:4])
+ vendorName := string(lineBytes[6:])
+ curVendor = &Vendor{
+ ID: vendorID,
+ Name: vendorName,
+ Products: vendorProducts,
+ }
+ db.Vendors[curVendor.ID] = curVendor
+ continue
+ }
+
+ // Lines beginning with only a single TAB character are *either* a
+ // subclass OR are a device dbrmation block. If we're in a class
+ // block (i.e. the last parsed block header was for a PCI class), then
+ // we parse a subclass block. Otherwise, we parse a device dbrmation
+ // block.
+ //
+ // A subclass dbrmation block looks like this:
+ //
+ // \t00 Non-VGA unclassified device
+ //
+ // A device dbrmation block looks like this:
+ //
+ // \t0002 PCI to MCA Bridge
+ if len(lineBytes) > 1 && lineBytes[1] != '\t' {
+ if inClassBlock {
+ if curSubclass != nil {
+ // finalize existing subclass because we found a new subclass block
+ curSubclass.ProgrammingInterfaces = progIfaces
+ progIfaces = make([]*ProgrammingInterface, 0)
+ }
+ subclassID := string(lineBytes[1:3])
+ subclassName := string(lineBytes[5:])
+ curSubclass = &Subclass{
+ ID: subclassID,
+ Name: subclassName,
+ ProgrammingInterfaces: progIfaces,
+ }
+ subclasses = append(subclasses, curSubclass)
+ } else {
+ if curProduct != nil {
+ // finalize existing product because we found a new product block
+ curProduct.Subsystems = productSubsystems
+ productSubsystems = make([]*Product, 0)
+ }
+ productID := string(lineBytes[1:5])
+ productName := string(lineBytes[7:])
+ productKey := curVendor.ID + productID
+ curProduct = &Product{
+ VendorID: curVendor.ID,
+ ID: productID,
+ Name: productName,
+ }
+ vendorProducts = append(vendorProducts, curProduct)
+ db.Products[productKey] = curProduct
+ }
+ } else {
+ // Lines beginning with two TAB characters are *either* a subsystem
+ // (subdevice) OR are a programming interface for a PCI device
+ // subclass. If we're in a class block (i.e. the last parsed block
+ // header was for a PCI class), then we parse a programming
+ // interface block, otherwise we parse a subsystem block.
+ //
+ // A programming interface block looks like this:
+ //
+ // \t\t00 UHCI
+ //
+ // A subsystem block looks like this:
+ //
+ // \t\t0e11 4091 Smart Array 6i
+ if inClassBlock {
+ progIfaceID := string(lineBytes[2:4])
+ progIfaceName := string(lineBytes[6:])
+ curProgIface = &ProgrammingInterface{
+ ID: progIfaceID,
+ Name: progIfaceName,
+ }
+ progIfaces = append(progIfaces, curProgIface)
+ } else {
+ vendorID := string(lineBytes[2:6])
+ subsystemID := string(lineBytes[7:11])
+ subsystemName := string(lineBytes[13:])
+ curSubsystem = &Product{
+ VendorID: vendorID,
+ ID: subsystemID,
+ Name: subsystemName,
+ }
+ productSubsystems = append(productSubsystems, curSubsystem)
+ }
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE
new file mode 100644
index 00000000..f9c841a5
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2013 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md
new file mode 100644
index 00000000..d70706d5
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/README.md
@@ -0,0 +1,14 @@
+# go-homedir
+
+This is a Go library for detecting the user's home directory without
+the use of cgo, so the library can be used in cross-compilation environments.
+
+Usage is incredibly simple, just call `homedir.Dir()` to get the home directory
+for a user, and `homedir.Expand()` to expand the `~` in a path to the home
+directory.
+
+**Why not just use `os/user`?** The built-in `os/user` package requires
+cgo on Darwin systems. This means that any Go code that uses that package
+cannot cross compile. But 99% of the time the use for `os/user` is just to
+retrieve the home directory, which we can do for the current user without
+cgo. This library does that, enabling cross-compilation.
diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go
new file mode 100644
index 00000000..25378537
--- /dev/null
+++ b/vendor/github.com/mitchellh/go-homedir/homedir.go
@@ -0,0 +1,167 @@
+package homedir
+
+import (
+ "bytes"
+ "errors"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// DisableCache will disable caching of the home directory. Caching is enabled
+// by default.
+var DisableCache bool
+
+var homedirCache string
+var cacheLock sync.RWMutex
+
+// Dir returns the home directory for the executing user.
+//
+// This uses an OS-specific method for discovering the home directory.
+// An error is returned if a home directory cannot be detected.
+func Dir() (string, error) {
+ if !DisableCache {
+ cacheLock.RLock()
+ cached := homedirCache
+ cacheLock.RUnlock()
+ if cached != "" {
+ return cached, nil
+ }
+ }
+
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+
+ var result string
+ var err error
+ if runtime.GOOS == "windows" {
+ result, err = dirWindows()
+ } else {
+ // Unix-like system, so just assume Unix
+ result, err = dirUnix()
+ }
+
+ if err != nil {
+ return "", err
+ }
+ homedirCache = result
+ return result, nil
+}
+
+// Expand expands the path to include the home directory if the path
+// is prefixed with `~`. If it isn't prefixed with `~`, the path is
+// returned as-is.
+func Expand(path string) (string, error) {
+ if len(path) == 0 {
+ return path, nil
+ }
+
+ if path[0] != '~' {
+ return path, nil
+ }
+
+ if len(path) > 1 && path[1] != '/' && path[1] != '\\' {
+ return "", errors.New("cannot expand user-specific home dir")
+ }
+
+ dir, err := Dir()
+ if err != nil {
+ return "", err
+ }
+
+ return filepath.Join(dir, path[1:]), nil
+}
+
+// Reset clears the cache, forcing the next call to Dir to re-detect
+// the home directory. This generally never has to be called, but can be
+// useful in tests if you're modifying the home directory via the HOME
+// env var or something.
+func Reset() {
+ cacheLock.Lock()
+ defer cacheLock.Unlock()
+ homedirCache = ""
+}
+
+func dirUnix() (string, error) {
+ homeEnv := "HOME"
+ if runtime.GOOS == "plan9" {
+ // On plan9, env vars are lowercase.
+ homeEnv = "home"
+ }
+
+ // First prefer the HOME environmental variable
+ if home := os.Getenv(homeEnv); home != "" {
+ return home, nil
+ }
+
+ var stdout bytes.Buffer
+
+ // If that fails, try OS specific commands
+ if runtime.GOOS == "darwin" {
+ cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`)
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err == nil {
+ result := strings.TrimSpace(stdout.String())
+ if result != "" {
+ return result, nil
+ }
+ }
+ } else {
+ cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid()))
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ // If the error is ErrNotFound, we ignore it. Otherwise, return it.
+ if err != exec.ErrNotFound {
+ return "", err
+ }
+ } else {
+ if passwd := strings.TrimSpace(stdout.String()); passwd != "" {
+ // username:password:uid:gid:gecos:home:shell
+ passwdParts := strings.SplitN(passwd, ":", 7)
+ if len(passwdParts) > 5 {
+ return passwdParts[5], nil
+ }
+ }
+ }
+ }
+
+ // If all else fails, try the shell
+ stdout.Reset()
+ cmd := exec.Command("sh", "-c", "cd && pwd")
+ cmd.Stdout = &stdout
+ if err := cmd.Run(); err != nil {
+ return "", err
+ }
+
+ result := strings.TrimSpace(stdout.String())
+ if result == "" {
+ return "", errors.New("blank output when reading home directory")
+ }
+
+ return result, nil
+}
+
+func dirWindows() (string, error) {
+ // First prefer the HOME environmental variable
+ if home := os.Getenv("HOME"); home != "" {
+ return home, nil
+ }
+
+ // Prefer standard environment variable USERPROFILE
+ if home := os.Getenv("USERPROFILE"); home != "" {
+ return home, nil
+ }
+
+ drive := os.Getenv("HOMEDRIVE")
+ path := os.Getenv("HOMEPATH")
+ home := drive + path
+ if drive == "" || path == "" {
+ return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank")
+ }
+
+ return home, nil
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 00000000..daf913b1
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 00000000..9159de03
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - tip
+
+script:
+ - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 00000000..835ba3e7
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 00000000..ce9d7cde
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test:
+ $(GO) test $(PKGS)
+
+vet: | test
+ $(GO) vet $(PKGS)
+
+staticcheck:
+ $(GO) get honnef.co/go/tools/cmd/staticcheck
+ staticcheck -checks all $(PKGS)
+
+misspell:
+ $(GO) get github.com/client9/misspell/cmd/misspell
+ misspell \
+ -locale GB \
+ -error \
+ *.md *.go
+
+unconvert:
+ $(GO) get github.com/mdempsky/unconvert
+ unconvert -v $(PKGS)
+
+ineffassign:
+ $(GO) get github.com/gordonklaus/ineffassign
+ find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+ $(GO) get mvdan.cc/unparam
+ unparam ./...
+
+errcheck:
+ $(GO) get github.com/kisielk/errcheck
+ errcheck $(PKGS)
+
+gofmt:
+ @echo Checking code is gofmted
+ @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 00000000..54dfdcb1
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+ return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+ return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+ Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+ // handle specifically
+default:
+ // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports.
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 00000000..a932eade
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+ GOPATH: C:\gopath
+
+platform:
+ - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+ # some helpful output for debugging builds
+ - go version
+ - go env
+ # pre-installed MinGW at C:\MinGW is 32bit only
+ # but MSYS2 at C:\msys64 has mingw64
+ - set PATH=C:\msys64\mingw64\bin;%PATH%
+ - gcc --version
+ - g++ --version
+
+build_script:
+ - go install -v ./...
+
+test_script:
+ - set PATH=C:\gopath\bin;%PATH%
+ - go test -v ./...
+
+#artifacts:
+# - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 00000000..161aea25
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+// if err != nil {
+// return err
+// }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+// _, err := ioutil.ReadAll(r)
+// if err != nil {
+// return errors.Wrap(err, "read failed")
+// }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+// switch err := errors.Cause(err).(type) {
+// case *MyError:
+// // handle specifically
+// default:
+// // unknown error
+// }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+// %s print the error. If the error has a Cause it will be
+// printed recursively.
+// %v see %s
+// %+v extended format. Each Frame of the error's StackTrace will
+// be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+// type stackTracer interface {
+// StackTrace() errors.StackTrace
+// }
+//
+// The returned errors.StackTrace type is defined as
+//
+// type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+// if err, ok := err.(stackTracer); ok {
+// for _, f := range err.StackTrace() {
+// fmt.Printf("%+s:%d\n", f, f)
+// }
+// }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+ "fmt"
+ "io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+ return &fundamental{
+ msg: message,
+ stack: callers(),
+ }
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+ return &fundamental{
+ msg: fmt.Sprintf(format, args...),
+ stack: callers(),
+ }
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+ msg string
+ *stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ io.WriteString(s, f.msg)
+ f.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, f.msg)
+ case 'q':
+ fmt.Fprintf(s, "%q", f.msg)
+ }
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+ if err == nil {
+ return nil
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+type withStack struct {
+ error
+ *stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v", w.Cause())
+ w.stack.Format(s, verb)
+ return
+ }
+ fallthrough
+ case 's':
+ io.WriteString(s, w.Error())
+ case 'q':
+ fmt.Fprintf(s, "%q", w.Error())
+ }
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: message,
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ err = &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+ return &withStack{
+ err,
+ callers(),
+ }
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: message,
+ }
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+ if err == nil {
+ return nil
+ }
+ return &withMessage{
+ cause: err,
+ msg: fmt.Sprintf(format, args...),
+ }
+}
+
+type withMessage struct {
+ cause error
+ msg string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ if s.Flag('+') {
+ fmt.Fprintf(s, "%+v\n", w.Cause())
+ io.WriteString(s, w.msg)
+ return
+ }
+ fallthrough
+ case 's', 'q':
+ io.WriteString(s, w.Error())
+ }
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+// type causer interface {
+// Cause() error
+// }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+ type causer interface {
+ Cause() error
+ }
+
+ for err != nil {
+ cause, ok := err.(causer)
+ if !ok {
+ break
+ }
+ err = cause.Cause()
+ }
+ return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 00000000..be0d10d0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+ stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+ return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 00000000..779a8348
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+ "fmt"
+ "io"
+ "path"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ file, _ := fn.FileLine(f.pc())
+ return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return 0
+ }
+ _, line := fn.FileLine(f.pc())
+ return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+ fn := runtime.FuncForPC(f.pc())
+ if fn == nil {
+ return "unknown"
+ }
+ return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+// %s source file
+// %d source line
+// %n function name
+// %v equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+s function name and path of source file relative to the compile time
+// GOPATH separated by \n\t (\n\t)
+// %+v equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 's':
+ switch {
+ case s.Flag('+'):
+ io.WriteString(s, f.name())
+ io.WriteString(s, "\n\t")
+ io.WriteString(s, f.file())
+ default:
+ io.WriteString(s, path.Base(f.file()))
+ }
+ case 'd':
+ io.WriteString(s, strconv.Itoa(f.line()))
+ case 'n':
+ io.WriteString(s, funcname(f.name()))
+ case 'v':
+ f.Format(s, 's')
+ io.WriteString(s, ":")
+ f.Format(s, 'd')
+ }
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+ name := f.name()
+ if name == "unknown" {
+ return []byte(name), nil
+ }
+ return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+// %s lists source files for each Frame in the stack
+// %v lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+// %+v Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case s.Flag('+'):
+ for _, f := range st {
+ io.WriteString(s, "\n")
+ f.Format(s, verb)
+ }
+ case s.Flag('#'):
+ fmt.Fprintf(s, "%#v", []Frame(st))
+ default:
+ st.formatSlice(s, verb)
+ }
+ case 's':
+ st.formatSlice(s, verb)
+ }
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+ io.WriteString(s, "[")
+ for i, f := range st {
+ if i > 0 {
+ io.WriteString(s, " ")
+ }
+ f.Format(s, verb)
+ }
+ io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+ switch verb {
+ case 'v':
+ switch {
+ case st.Flag('+'):
+ for _, pc := range *s {
+ f := Frame(pc)
+ fmt.Fprintf(st, "\n%+v", f)
+ }
+ }
+ }
+}
+
+func (s *stack) StackTrace() StackTrace {
+ f := make([]Frame, len(*s))
+ for i := 0; i < len(f); i++ {
+ f[i] = Frame((*s)[i])
+ }
+ return f
+}
+
+func callers() *stack {
+ const depth = 32
+ var pcs [depth]uintptr
+ n := runtime.Callers(3, pcs[:])
+ var st stack = pcs[0:n]
+ return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+ i := strings.LastIndex(name, "/")
+ name = name[i+1:]
+ i = strings.Index(name, ".")
+ return name[i+1:]
+}
diff --git a/vendor/howett.net/plist/.gitignore b/vendor/howett.net/plist/.gitignore
new file mode 100644
index 00000000..3743b346
--- /dev/null
+++ b/vendor/howett.net/plist/.gitignore
@@ -0,0 +1,16 @@
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+*.wasm
+
+# Test binary, built with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
+# Dependency directories (remove the comment below to include it)
+# vendor/
diff --git a/vendor/howett.net/plist/.gitlab-ci.yml b/vendor/howett.net/plist/.gitlab-ci.yml
new file mode 100644
index 00000000..11d6dbf7
--- /dev/null
+++ b/vendor/howett.net/plist/.gitlab-ci.yml
@@ -0,0 +1,39 @@
+image: golang:alpine
+stages:
+ - test
+
+variables:
+ GO_PACKAGE: "howett.net/plist"
+
+before_script:
+ - "mkdir -p $(dirname $GOPATH/src/$GO_PACKAGE)"
+ - "ln -s $(pwd) $GOPATH/src/$GO_PACKAGE"
+ - "cd $GOPATH/src/$GO_PACKAGE"
+
+.template:go-test: &template-go-test
+ stage: test
+ script:
+ - go test
+
+go-test-cover:latest:
+ stage: test
+ script:
+ - go test -v -cover
+ coverage: '/^coverage: \d+\.\d+/'
+
+go-test-appengine:latest:
+ stage: test
+ script:
+ - go test -tags appengine
+
+go-test:1.6:
+ <<: *template-go-test
+ image: golang:1.6-alpine
+
+go-test:1.4:
+ <<: *template-go-test
+ image: golang:1.4-alpine
+
+go-test:1.2:
+ <<: *template-go-test
+ image: golang:1.2
diff --git a/vendor/howett.net/plist/LICENSE b/vendor/howett.net/plist/LICENSE
new file mode 100644
index 00000000..9f6012f3
--- /dev/null
+++ b/vendor/howett.net/plist/LICENSE
@@ -0,0 +1,58 @@
+Copyright (c) 2013, Dustin L. Howett. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of the FreeBSD Project.
+
+--------------------------------------------------------------------------------
+Parts of this package were made available under the license covering
+the Go language and all attended core libraries. That license follows.
+--------------------------------------------------------------------------------
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/howett.net/plist/README.md b/vendor/howett.net/plist/README.md
new file mode 100644
index 00000000..d751c062
--- /dev/null
+++ b/vendor/howett.net/plist/README.md
@@ -0,0 +1,21 @@
+# plist - A pure Go property list transcoder [![coverage report](https://gitlab.howett.net/go/plist/badges/main/coverage.svg)](https://gitlab.howett.net/go/plist/commits/main)
+## INSTALL
+```
+$ go get howett.net/plist
+```
+
+## FEATURES
+* Supports encoding/decoding property lists (Apple XML, Apple Binary, OpenStep and GNUStep) from/to arbitrary Go types
+
+## USE
+```go
+package main
+import (
+ "howett.net/plist"
+ "os"
+)
+func main() {
+ encoder := plist.NewEncoder(os.Stdout)
+ encoder.Encode(map[string]string{"hello": "world"})
+}
+```
diff --git a/vendor/howett.net/plist/bplist.go b/vendor/howett.net/plist/bplist.go
new file mode 100644
index 00000000..962793a9
--- /dev/null
+++ b/vendor/howett.net/plist/bplist.go
@@ -0,0 +1,26 @@
+package plist
+
+type bplistTrailer struct {
+ Unused [5]uint8
+ SortVersion uint8
+ OffsetIntSize uint8
+ ObjectRefSize uint8
+ NumObjects uint64
+ TopObject uint64
+ OffsetTableOffset uint64
+}
+
+const (
+ bpTagNull uint8 = 0x00
+ bpTagBoolFalse = 0x08
+ bpTagBoolTrue = 0x09
+ bpTagInteger = 0x10
+ bpTagReal = 0x20
+ bpTagDate = 0x30
+ bpTagData = 0x40
+ bpTagASCIIString = 0x50
+ bpTagUTF16String = 0x60
+ bpTagUID = 0x80
+ bpTagArray = 0xA0
+ bpTagDictionary = 0xD0
+)
diff --git a/vendor/howett.net/plist/bplist_generator.go b/vendor/howett.net/plist/bplist_generator.go
new file mode 100644
index 00000000..09ab71b1
--- /dev/null
+++ b/vendor/howett.net/plist/bplist_generator.go
@@ -0,0 +1,303 @@
+package plist
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "time"
+ "unicode/utf16"
+)
+
+func bplistMinimumIntSize(n uint64) int {
+ switch {
+ case n <= uint64(0xff):
+ return 1
+ case n <= uint64(0xffff):
+ return 2
+ case n <= uint64(0xffffffff):
+ return 4
+ default:
+ return 8
+ }
+}
+
+func bplistValueShouldUnique(pval cfValue) bool {
+ switch pval.(type) {
+ case cfString, *cfNumber, *cfReal, cfDate, cfData:
+ return true
+ }
+ return false
+}
+
+type bplistGenerator struct {
+ writer *countedWriter
+ objmap map[interface{}]uint64 // maps pValue.hash()es to object locations
+ objtable []cfValue
+ trailer bplistTrailer
+}
+
+func (p *bplistGenerator) flattenPlistValue(pval cfValue) {
+ key := pval.hash()
+ if bplistValueShouldUnique(pval) {
+ if _, ok := p.objmap[key]; ok {
+ return
+ }
+ }
+
+ p.objmap[key] = uint64(len(p.objtable))
+ p.objtable = append(p.objtable, pval)
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ pval.sort()
+ for _, k := range pval.keys {
+ p.flattenPlistValue(cfString(k))
+ }
+ for _, v := range pval.values {
+ p.flattenPlistValue(v)
+ }
+ case *cfArray:
+ for _, v := range pval.values {
+ p.flattenPlistValue(v)
+ }
+ }
+}
+
+func (p *bplistGenerator) indexForPlistValue(pval cfValue) (uint64, bool) {
+ v, ok := p.objmap[pval.hash()]
+ return v, ok
+}
+
+func (p *bplistGenerator) generateDocument(root cfValue) {
+ p.objtable = make([]cfValue, 0, 16)
+ p.objmap = make(map[interface{}]uint64)
+ p.flattenPlistValue(root)
+
+ p.trailer.NumObjects = uint64(len(p.objtable))
+ p.trailer.ObjectRefSize = uint8(bplistMinimumIntSize(p.trailer.NumObjects))
+
+ p.writer.Write([]byte("bplist00"))
+
+ offtable := make([]uint64, p.trailer.NumObjects)
+ for i, pval := range p.objtable {
+ offtable[i] = uint64(p.writer.BytesWritten())
+ p.writePlistValue(pval)
+ }
+
+ p.trailer.OffsetIntSize = uint8(bplistMinimumIntSize(uint64(p.writer.BytesWritten())))
+ p.trailer.TopObject = p.objmap[root.hash()]
+ p.trailer.OffsetTableOffset = uint64(p.writer.BytesWritten())
+
+ for _, offset := range offtable {
+ p.writeSizedInt(offset, int(p.trailer.OffsetIntSize))
+ }
+
+ binary.Write(p.writer, binary.BigEndian, p.trailer)
+}
+
+func (p *bplistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ p.writeDictionaryTag(pval)
+ case *cfArray:
+ p.writeArrayTag(pval.values)
+ case cfString:
+ p.writeStringTag(string(pval))
+ case *cfNumber:
+ p.writeIntTag(pval.signed, pval.value)
+ case *cfReal:
+ if pval.wide {
+ p.writeRealTag(pval.value, 64)
+ } else {
+ p.writeRealTag(pval.value, 32)
+ }
+ case cfBoolean:
+ p.writeBoolTag(bool(pval))
+ case cfData:
+ p.writeDataTag([]byte(pval))
+ case cfDate:
+ p.writeDateTag(time.Time(pval))
+ case cfUID:
+ p.writeUIDTag(UID(pval))
+ default:
+ panic(fmt.Errorf("unknown plist type %t", pval))
+ }
+}
+
+func (p *bplistGenerator) writeSizedInt(n uint64, nbytes int) {
+ var val interface{}
+ switch nbytes {
+ case 1:
+ val = uint8(n)
+ case 2:
+ val = uint16(n)
+ case 4:
+ val = uint32(n)
+ case 8:
+ val = n
+ default:
+ panic(errors.New("illegal integer size"))
+ }
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeBoolTag(v bool) {
+ tag := uint8(bpTagBoolFalse)
+ if v {
+ tag = bpTagBoolTrue
+ }
+ binary.Write(p.writer, binary.BigEndian, tag)
+}
+
+func (p *bplistGenerator) writeIntTag(signed bool, n uint64) {
+ var tag uint8
+ var val interface{}
+ switch {
+ case n <= uint64(0xff):
+ val = uint8(n)
+ tag = bpTagInteger | 0x0
+ case n <= uint64(0xffff):
+ val = uint16(n)
+ tag = bpTagInteger | 0x1
+ case n <= uint64(0xffffffff):
+ val = uint32(n)
+ tag = bpTagInteger | 0x2
+ case n > uint64(0x7fffffffffffffff) && !signed:
+ // 64-bit values are always *signed* in format 00.
+ // Any unsigned value that doesn't intersect with the signed
+ // range must be sign-extended and stored as a SInt128
+ val = n
+ tag = bpTagInteger | 0x4
+ default:
+ val = n
+ tag = bpTagInteger | 0x3
+ }
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ if tag&0xF == 0x4 {
+ // SInt128; in the absence of true 128-bit integers in Go,
+ // we'll just fake the top half. We only got here because
+ // we had an unsigned 64-bit int that didn't fit,
+ // so sign extend it with zeroes.
+ binary.Write(p.writer, binary.BigEndian, uint64(0))
+ }
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeUIDTag(u UID) {
+ nbytes := bplistMinimumIntSize(uint64(u))
+ tag := uint8(bpTagUID | (nbytes - 1))
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ p.writeSizedInt(uint64(u), nbytes)
+}
+
+func (p *bplistGenerator) writeRealTag(n float64, bits int) {
+ var tag uint8 = bpTagReal | 0x3
+ var val interface{} = n
+ if bits == 32 {
+ val = float32(n)
+ tag = bpTagReal | 0x2
+ }
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeDateTag(t time.Time) {
+ tag := uint8(bpTagDate) | 0x3
+ val := float64(t.In(time.UTC).UnixNano()) / float64(time.Second)
+ val -= 978307200 // Adjust to Apple Epoch
+
+ binary.Write(p.writer, binary.BigEndian, tag)
+ binary.Write(p.writer, binary.BigEndian, val)
+}
+
+func (p *bplistGenerator) writeCountedTag(tag uint8, count uint64) {
+ marker := tag
+ if count >= 0xF {
+ marker |= 0xF
+ } else {
+ marker |= uint8(count)
+ }
+
+ binary.Write(p.writer, binary.BigEndian, marker)
+
+ if count >= 0xF {
+ p.writeIntTag(false, count)
+ }
+}
+
+func (p *bplistGenerator) writeDataTag(data []byte) {
+ p.writeCountedTag(bpTagData, uint64(len(data)))
+ binary.Write(p.writer, binary.BigEndian, data)
+}
+
+func (p *bplistGenerator) writeStringTag(str string) {
+ for _, r := range str {
+ if r > 0x7F {
+ utf16Runes := utf16.Encode([]rune(str))
+ p.writeCountedTag(bpTagUTF16String, uint64(len(utf16Runes)))
+ binary.Write(p.writer, binary.BigEndian, utf16Runes)
+ return
+ }
+ }
+
+ p.writeCountedTag(bpTagASCIIString, uint64(len(str)))
+ binary.Write(p.writer, binary.BigEndian, []byte(str))
+}
+
+func (p *bplistGenerator) writeDictionaryTag(dict *cfDictionary) {
+ // assumption: sorted already; flattenPlistValue did this.
+ cnt := len(dict.keys)
+ p.writeCountedTag(bpTagDictionary, uint64(cnt))
+ vals := make([]uint64, cnt*2)
+ for i, k := range dict.keys {
+ // invariant: keys have already been "uniqued" (as PStrings)
+ keyIdx, ok := p.objmap[cfString(k).hash()]
+ if !ok {
+ panic(errors.New("failed to find key " + k + " in object map during serialization"))
+ }
+ vals[i] = keyIdx
+ }
+
+ for i, v := range dict.values {
+ // invariant: values have already been "uniqued"
+ objIdx, ok := p.indexForPlistValue(v)
+ if !ok {
+ panic(errors.New("failed to find value in object map during serialization"))
+ }
+ vals[i+cnt] = objIdx
+ }
+
+ for _, v := range vals {
+ p.writeSizedInt(v, int(p.trailer.ObjectRefSize))
+ }
+}
+
+func (p *bplistGenerator) writeArrayTag(arr []cfValue) {
+ p.writeCountedTag(bpTagArray, uint64(len(arr)))
+ for _, v := range arr {
+ objIdx, ok := p.indexForPlistValue(v)
+ if !ok {
+ panic(errors.New("failed to find value in object map during serialization"))
+ }
+
+ p.writeSizedInt(objIdx, int(p.trailer.ObjectRefSize))
+ }
+}
+
+func (p *bplistGenerator) Indent(i string) {
+ // There's nothing to indent.
+}
+
+func newBplistGenerator(w io.Writer) *bplistGenerator {
+ return &bplistGenerator{
+ writer: &countedWriter{Writer: mustWriter{w}},
+ }
+}
diff --git a/vendor/howett.net/plist/bplist_parser.go b/vendor/howett.net/plist/bplist_parser.go
new file mode 100644
index 00000000..1825b570
--- /dev/null
+++ b/vendor/howett.net/plist/bplist_parser.go
@@ -0,0 +1,353 @@
+package plist
+
+import (
+ "bytes"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "math"
+ "runtime"
+ "time"
+ "unicode/utf16"
+)
+
+const (
+ signedHighBits = 0xFFFFFFFFFFFFFFFF
+)
+
+type offset uint64
+
+type bplistParser struct {
+ buffer []byte
+
+ reader io.ReadSeeker
+ version int
+ objects []cfValue // object ID to object
+ trailer bplistTrailer
+ trailerOffset uint64
+
+ containerStack []offset // slice of object offsets; manipulated during container deserialization
+}
+
+func (p *bplistParser) validateDocumentTrailer() {
+ if p.trailer.OffsetTableOffset >= p.trailerOffset {
+ panic(fmt.Errorf("offset table beyond beginning of trailer (0x%x, trailer@0x%x)", p.trailer.OffsetTableOffset, p.trailerOffset))
+ }
+
+ if p.trailer.OffsetTableOffset < 9 {
+ panic(fmt.Errorf("offset table begins inside header (0x%x)", p.trailer.OffsetTableOffset))
+ }
+
+ if p.trailerOffset > (p.trailer.NumObjects*uint64(p.trailer.OffsetIntSize))+p.trailer.OffsetTableOffset {
+ panic(errors.New("garbage between offset table and trailer"))
+ }
+
+ if p.trailer.OffsetTableOffset+(uint64(p.trailer.OffsetIntSize)*p.trailer.NumObjects) > p.trailerOffset {
+ panic(errors.New("offset table isn't long enough to address every object"))
+ }
+
+ maxObjectRef := uint64(1) << (8 * p.trailer.ObjectRefSize)
+ if p.trailer.NumObjects > maxObjectRef {
+ panic(fmt.Errorf("more objects (%v) than object ref size (%v bytes) can support", p.trailer.NumObjects, p.trailer.ObjectRefSize))
+ }
+
+ if p.trailer.OffsetIntSize < uint8(8) && (uint64(1)<<(8*p.trailer.OffsetIntSize)) <= p.trailer.OffsetTableOffset {
+ panic(errors.New("offset size isn't big enough to address entire file"))
+ }
+
+ if p.trailer.TopObject >= p.trailer.NumObjects {
+ panic(fmt.Errorf("top object #%d is out of range (only %d exist)", p.trailer.TopObject, p.trailer.NumObjects))
+ }
+}
+
+func (p *bplistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+
+ parseError = plistParseError{"binary", r.(error)}
+ }
+ }()
+
+ p.buffer, _ = ioutil.ReadAll(p.reader)
+
+ l := len(p.buffer)
+ if l < 40 {
+ panic(errors.New("not enough data"))
+ }
+
+ if !bytes.Equal(p.buffer[0:6], []byte{'b', 'p', 'l', 'i', 's', 't'}) {
+ panic(errors.New("incomprehensible magic"))
+ }
+
+ p.version = int(((p.buffer[6] - '0') * 10) + (p.buffer[7] - '0'))
+
+ if p.version > 1 {
+ panic(fmt.Errorf("unexpected version %d", p.version))
+ }
+
+ p.trailerOffset = uint64(l - 32)
+ p.trailer = bplistTrailer{
+ SortVersion: p.buffer[p.trailerOffset+5],
+ OffsetIntSize: p.buffer[p.trailerOffset+6],
+ ObjectRefSize: p.buffer[p.trailerOffset+7],
+ NumObjects: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+8:]),
+ TopObject: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+16:]),
+ OffsetTableOffset: binary.BigEndian.Uint64(p.buffer[p.trailerOffset+24:]),
+ }
+
+ p.validateDocumentTrailer()
+
+ // INVARIANTS:
+ // - Entire offset table is before trailer
+ // - Offset table begins after header
+ // - Offset table can address entire document
+ // - Object IDs are big enough to support the number of objects in this plist
+ // - Top object is in range
+
+ p.objects = make([]cfValue, p.trailer.NumObjects)
+
+ pval = p.objectAtIndex(p.trailer.TopObject)
+ return
+}
+
+// parseSizedInteger returns a 128-bit integer as low64, high64
+func (p *bplistParser) parseSizedInteger(off offset, nbytes int) (lo uint64, hi uint64, newOffset offset) {
+ // Per comments in CoreFoundation, format version 00 requires that all
+ // 1, 2 or 4-byte integers be interpreted as unsigned. 8-byte integers are
+ // signed (always?) and therefore must be sign extended here.
+ // negative 1, 2, or 4-byte integers are always emitted as 64-bit.
+ switch nbytes {
+ case 1:
+ lo, hi = uint64(p.buffer[off]), 0
+ case 2:
+ lo, hi = uint64(binary.BigEndian.Uint16(p.buffer[off:])), 0
+ case 4:
+ lo, hi = uint64(binary.BigEndian.Uint32(p.buffer[off:])), 0
+ case 8:
+ lo = binary.BigEndian.Uint64(p.buffer[off:])
+ if p.buffer[off]&0x80 != 0 {
+ // sign extend if lo is signed
+ hi = signedHighBits
+ }
+ case 16:
+ lo, hi = binary.BigEndian.Uint64(p.buffer[off+8:]), binary.BigEndian.Uint64(p.buffer[off:])
+ default:
+ panic(errors.New("illegal integer size"))
+ }
+ newOffset = off + offset(nbytes)
+ return
+}
+
+func (p *bplistParser) parseObjectRefAtOffset(off offset) (uint64, offset) {
+ oid, _, next := p.parseSizedInteger(off, int(p.trailer.ObjectRefSize))
+ return oid, next
+}
+
+func (p *bplistParser) parseOffsetAtOffset(off offset) (offset, offset) {
+ parsedOffset, _, next := p.parseSizedInteger(off, int(p.trailer.OffsetIntSize))
+ return offset(parsedOffset), next
+}
+
+func (p *bplistParser) objectAtIndex(index uint64) cfValue {
+ if index >= p.trailer.NumObjects {
+ panic(fmt.Errorf("invalid object#%d (max %d)", index, p.trailer.NumObjects))
+ }
+
+ if pval := p.objects[index]; pval != nil {
+ return pval
+ }
+
+ off, _ := p.parseOffsetAtOffset(offset(p.trailer.OffsetTableOffset + (index * uint64(p.trailer.OffsetIntSize))))
+ if off > offset(p.trailer.OffsetTableOffset-1) {
+ panic(fmt.Errorf("object#%d starts beyond beginning of object table (0x%x, table@0x%x)", index, off, p.trailer.OffsetTableOffset))
+ }
+
+ pval := p.parseTagAtOffset(off)
+ p.objects[index] = pval
+ return pval
+
+}
+
+func (p *bplistParser) pushNestedObject(off offset) {
+ for _, v := range p.containerStack {
+ if v == off {
+ p.panicNestedObject(off)
+ }
+ }
+ p.containerStack = append(p.containerStack, off)
+}
+
+func (p *bplistParser) panicNestedObject(off offset) {
+ ids := ""
+ for _, v := range p.containerStack {
+ ids += fmt.Sprintf("0x%x > ", v)
+ }
+
+ // %s0x%d: ids above ends with " > "
+ panic(fmt.Errorf("self-referential collection@0x%x (%s0x%x) cannot be deserialized", off, ids, off))
+}
+
+func (p *bplistParser) popNestedObject() {
+ p.containerStack = p.containerStack[:len(p.containerStack)-1]
+}
+
+func (p *bplistParser) parseTagAtOffset(off offset) cfValue {
+ tag := p.buffer[off]
+
+ switch tag & 0xF0 {
+ case bpTagNull:
+ switch tag & 0x0F {
+ case bpTagBoolTrue, bpTagBoolFalse:
+ return cfBoolean(tag == bpTagBoolTrue)
+ }
+ case bpTagInteger:
+ lo, hi, _ := p.parseIntegerAtOffset(off)
+ return &cfNumber{
+ signed: hi == signedHighBits, // a signed integer is stored as a 128-bit integer with the top 64 bits set
+ value: lo,
+ }
+ case bpTagReal:
+ nbytes := 1 << (tag & 0x0F)
+ switch nbytes {
+ case 4:
+ bits := binary.BigEndian.Uint32(p.buffer[off+1:])
+ return &cfReal{wide: false, value: float64(math.Float32frombits(bits))}
+ case 8:
+ bits := binary.BigEndian.Uint64(p.buffer[off+1:])
+ return &cfReal{wide: true, value: math.Float64frombits(bits)}
+ }
+ panic(errors.New("illegal float size"))
+ case bpTagDate:
+ bits := binary.BigEndian.Uint64(p.buffer[off+1:])
+ val := math.Float64frombits(bits)
+
+ // Apple Epoch is 20110101000000Z
+ // Adjust for UNIX Time
+ val += 978307200
+
+ sec, fsec := math.Modf(val)
+ time := time.Unix(int64(sec), int64(fsec*float64(time.Second))).In(time.UTC)
+ return cfDate(time)
+ case bpTagData:
+ data := p.parseDataAtOffset(off)
+ return cfData(data)
+ case bpTagASCIIString:
+ str := p.parseASCIIStringAtOffset(off)
+ return cfString(str)
+ case bpTagUTF16String:
+ str := p.parseUTF16StringAtOffset(off)
+ return cfString(str)
+ case bpTagUID: // Somehow different than int: low half is nbytes - 1 instead of log2(nbytes)
+ lo, _, _ := p.parseSizedInteger(off+1, int(tag&0xF)+1)
+ return cfUID(lo)
+ case bpTagDictionary:
+ return p.parseDictionaryAtOffset(off)
+ case bpTagArray:
+ return p.parseArrayAtOffset(off)
+ }
+ panic(fmt.Errorf("unexpected atom 0x%2.02x at offset 0x%x", tag, off))
+}
+
+func (p *bplistParser) parseIntegerAtOffset(off offset) (uint64, uint64, offset) {
+ tag := p.buffer[off]
+ return p.parseSizedInteger(off+1, 1<<(tag&0xF))
+}
+
+func (p *bplistParser) countForTagAtOffset(off offset) (uint64, offset) {
+ tag := p.buffer[off]
+ cnt := uint64(tag & 0x0F)
+ if cnt == 0xF {
+ cnt, _, off = p.parseIntegerAtOffset(off + 1)
+ return cnt, off
+ }
+ return cnt, off + 1
+}
+
+func (p *bplistParser) parseDataAtOffset(off offset) []byte {
+ len, start := p.countForTagAtOffset(off)
+ if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("data@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+ return p.buffer[start : start+offset(len)]
+}
+
+func (p *bplistParser) parseASCIIStringAtOffset(off offset) string {
+ len, start := p.countForTagAtOffset(off)
+ if start+offset(len) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("ascii string@0x%x too long (%v bytes, max is %v)", off, len, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+
+ return zeroCopy8BitString(p.buffer, int(start), int(len))
+}
+
+func (p *bplistParser) parseUTF16StringAtOffset(off offset) string {
+ len, start := p.countForTagAtOffset(off)
+ bytes := len * 2
+ if start+offset(bytes) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("utf16 string@0x%x too long (%v bytes, max is %v)", off, bytes, p.trailer.OffsetTableOffset-uint64(start)))
+ }
+
+ u16s := make([]uint16, len)
+ for i := offset(0); i < offset(len); i++ {
+ u16s[i] = binary.BigEndian.Uint16(p.buffer[start+(i*2):])
+ }
+ runes := utf16.Decode(u16s)
+ return string(runes)
+}
+
+func (p *bplistParser) parseObjectListAtOffset(off offset, count uint64) []cfValue {
+ if off+offset(count*uint64(p.trailer.ObjectRefSize)) > offset(p.trailer.OffsetTableOffset) {
+ panic(fmt.Errorf("list@0x%x length (%v) puts its end beyond the offset table at 0x%x", off, count, p.trailer.OffsetTableOffset))
+ }
+ objects := make([]cfValue, count)
+
+ next := off
+ var oid uint64
+ for i := uint64(0); i < count; i++ {
+ oid, next = p.parseObjectRefAtOffset(next)
+ objects[i] = p.objectAtIndex(oid)
+ }
+
+ return objects
+}
+
+func (p *bplistParser) parseDictionaryAtOffset(off offset) *cfDictionary {
+ p.pushNestedObject(off)
+ defer p.popNestedObject()
+
+ // a dictionary is an object list of [key key key val val val]
+ cnt, start := p.countForTagAtOffset(off)
+ objects := p.parseObjectListAtOffset(start, cnt*2)
+
+ keys := make([]string, cnt)
+ for i := uint64(0); i < cnt; i++ {
+ if str, ok := objects[i].(cfString); ok {
+ keys[i] = string(str)
+ } else {
+ panic(fmt.Errorf("dictionary@0x%x contains non-string key at index %d", off, i))
+ }
+ }
+
+ return &cfDictionary{
+ keys: keys,
+ values: objects[cnt:],
+ }
+}
+
+func (p *bplistParser) parseArrayAtOffset(off offset) *cfArray {
+ p.pushNestedObject(off)
+ defer p.popNestedObject()
+
+ // an array is just an object list
+ cnt, start := p.countForTagAtOffset(off)
+ return &cfArray{p.parseObjectListAtOffset(start, cnt)}
+}
+
+func newBplistParser(r io.ReadSeeker) *bplistParser {
+ return &bplistParser{reader: r}
+}
diff --git a/vendor/howett.net/plist/decode.go b/vendor/howett.net/plist/decode.go
new file mode 100644
index 00000000..4c646677
--- /dev/null
+++ b/vendor/howett.net/plist/decode.go
@@ -0,0 +1,119 @@
+package plist
+
+import (
+ "bytes"
+ "io"
+ "reflect"
+ "runtime"
+)
+
+type parser interface {
+ parseDocument() (cfValue, error)
+}
+
+// A Decoder reads a property list from an input stream.
+type Decoder struct {
+ // the format of the most-recently-decoded property list
+ Format int
+
+ reader io.ReadSeeker
+ lax bool
+}
+
+// Decode works like Unmarshal, except it reads the decoder stream to find property list elements.
+//
+// After Decoding, the Decoder's Format field will be set to one of the plist format constants.
+func (p *Decoder) Decode(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ header := make([]byte, 6)
+ p.reader.Read(header)
+ p.reader.Seek(0, 0)
+
+ var parser parser
+ var pval cfValue
+ if bytes.Equal(header, []byte("bplist")) {
+ parser = newBplistParser(p.reader)
+ pval, err = parser.parseDocument()
+ if err != nil {
+ // Had a bplist header, but still got an error: we have to die here.
+ return err
+ }
+ p.Format = BinaryFormat
+ } else {
+ parser = newXMLPlistParser(p.reader)
+ pval, err = parser.parseDocument()
+ if _, ok := err.(invalidPlistError); ok {
+ // Rewind: the XML parser might have exhausted the file.
+ p.reader.Seek(0, 0)
+ // We don't use parser here because we want the textPlistParser type
+ tp := newTextPlistParser(p.reader)
+ pval, err = tp.parseDocument()
+ if err != nil {
+ return err
+ }
+ p.Format = tp.format
+ if p.Format == OpenStepFormat {
+ // OpenStep property lists can only store strings,
+ // so we have to turn on lax mode here for the unmarshal step later.
+ p.lax = true
+ }
+ } else {
+ if err != nil {
+ return err
+ }
+ p.Format = XMLFormat
+ }
+ }
+
+ p.unmarshal(pval, reflect.ValueOf(v))
+ return
+}
+
+// NewDecoder returns a Decoder that reads property list elements from a stream reader, r.
+// NewDecoder requires a Seekable stream for the purposes of file type detection.
+func NewDecoder(r io.ReadSeeker) *Decoder {
+ return &Decoder{Format: InvalidFormat, reader: r, lax: false}
+}
+
+// Unmarshal parses a property list document and stores the result in the value pointed to by v.
+//
+// Unmarshal uses the inverse of the type encodings that Marshal uses, allocating heap-borne types as necessary.
+//
+// When given a nil pointer, Unmarshal allocates a new value for it to point to.
+//
+// To decode property list values into an interface value, Unmarshal decodes the property list into the concrete value contained
+// in the interface value. If the interface value is nil, Unmarshal stores one of the following in the interface value:
+//
+// string, bool, uint64, float64
+// plist.UID for "CoreFoundation Keyed Archiver UIDs" (convertible to uint64)
+// []byte, for plist data
+// []interface{}, for plist arrays
+// map[string]interface{}, for plist dictionaries
+//
+// If a property list value is not appropriate for a given value type, Unmarshal aborts immediately and returns an error.
+//
+// As Go does not support 128-bit types, and we don't want to pretend we're giving the user integer types (as opposed to
+// secretly passing them structs), Unmarshal will drop the high 64 bits of any 128-bit integers encoded in binary property lists.
+// (This is important because CoreFoundation serializes some large 64-bit values as 128-bit values with an empty high half.)
+//
+// When Unmarshal encounters an OpenStep property list, it will enter a relaxed parsing mode: OpenStep property lists can only store
+// plain old data as strings, so we will attempt to recover integer, floating-point, boolean and date values wherever they are necessary.
+// (for example, if Unmarshal attempts to unmarshal an OpenStep property list into a time.Time, it will try to parse the string it
+// receives as a time.)
+//
+// Unmarshal returns the detected property list format and an error, if any.
+func Unmarshal(data []byte, v interface{}) (format int, err error) {
+ r := bytes.NewReader(data)
+ dec := NewDecoder(r)
+ err = dec.Decode(v)
+ format = dec.Format
+ return
+}
diff --git a/vendor/howett.net/plist/doc.go b/vendor/howett.net/plist/doc.go
new file mode 100644
index 00000000..457e60b6
--- /dev/null
+++ b/vendor/howett.net/plist/doc.go
@@ -0,0 +1,5 @@
+// Package plist implements encoding and decoding of Apple's "property list" format.
+// Property lists come in three sorts: plain text (GNUStep and OpenStep), XML and binary.
+// plist supports all of them.
+// The mapping between property list and Go objects is described in the documentation for the Marshal and Unmarshal functions.
+package plist
diff --git a/vendor/howett.net/plist/encode.go b/vendor/howett.net/plist/encode.go
new file mode 100644
index 00000000..f81309b5
--- /dev/null
+++ b/vendor/howett.net/plist/encode.go
@@ -0,0 +1,126 @@
+package plist
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "reflect"
+ "runtime"
+)
+
+type generator interface {
+ generateDocument(cfValue)
+ Indent(string)
+}
+
+// An Encoder writes a property list to an output stream.
+type Encoder struct {
+ writer io.Writer
+ format int
+
+ indent string
+}
+
+// Encode writes the property list encoding of v to the stream.
+func (p *Encoder) Encode(v interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+
+ pval := p.marshal(reflect.ValueOf(v))
+ if pval == nil {
+ panic(errors.New("plist: no root element to encode"))
+ }
+
+ var g generator
+ switch p.format {
+ case XMLFormat:
+ g = newXMLPlistGenerator(p.writer)
+ case BinaryFormat, AutomaticFormat:
+ g = newBplistGenerator(p.writer)
+ case OpenStepFormat, GNUStepFormat:
+ g = newTextPlistGenerator(p.writer, p.format)
+ }
+ g.Indent(p.indent)
+ g.generateDocument(pval)
+ return
+}
+
+// Indent turns on pretty-printing for the XML and Text property list formats.
+// Each element begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
+func (p *Encoder) Indent(indent string) {
+ p.indent = indent
+}
+
+// NewEncoder returns an Encoder that writes an XML property list to w.
+func NewEncoder(w io.Writer) *Encoder {
+ return NewEncoderForFormat(w, XMLFormat)
+}
+
+// NewEncoderForFormat returns an Encoder that writes a property list to w in the specified format.
+// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
+func NewEncoderForFormat(w io.Writer, format int) *Encoder {
+ return &Encoder{
+ writer: w,
+ format: format,
+ }
+}
+
+// NewBinaryEncoder returns an Encoder that writes a binary property list to w.
+func NewBinaryEncoder(w io.Writer) *Encoder {
+ return NewEncoderForFormat(w, BinaryFormat)
+}
+
+// Marshal returns the property list encoding of v in the specified format.
+//
+// Pass AutomaticFormat to allow the library to choose the best encoding (currently BinaryFormat).
+//
+// Marshal traverses the value v recursively.
+// Any nil values encountered, other than the root, will be silently discarded as
+// the property list format bears no representation for nil values.
+//
+// Strings, integers of varying size, floats and booleans are encoded unchanged.
+// Strings bearing non-ASCII runes will be encoded differently depending upon the property list format:
+// UTF-8 for XML property lists and UTF-16 for binary property lists.
+//
+// Slice and Array values are encoded as property list arrays, except for
+// []byte values, which are encoded as data.
+//
+// Map values encode as dictionaries. The map's key type must be string; there is no provision for encoding non-string dictionary keys.
+//
+// Struct values are encoded as dictionaries, with only exported fields being serialized. Struct field encoding may be influenced with the use of tags.
+// The tag format is:
+//
+// `plist:"[,flags...]"`
+//
+// The following flags are supported:
+//
+// omitempty Only include the field if it is not set to the zero value for its type.
+//
+// If the key is "-", the field is ignored.
+//
+// Anonymous struct fields are encoded as if their exported fields were exposed via the outer struct.
+//
+// Pointer values encode as the value pointed to.
+//
+// Channel, complex and function values cannot be encoded. Any attempt to do so causes Marshal to return an error.
+func Marshal(v interface{}, format int) ([]byte, error) {
+ return MarshalIndent(v, format, "")
+}
+
+// MarshalIndent works like Marshal, but each property list element
+// begins on a new line and is preceded by one or more copies of indent according to its nesting depth.
+func MarshalIndent(v interface{}, format int, indent string) ([]byte, error) {
+ buf := &bytes.Buffer{}
+ enc := NewEncoderForFormat(buf, format)
+ enc.Indent(indent)
+ if err := enc.Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
diff --git a/vendor/howett.net/plist/fuzz.go b/vendor/howett.net/plist/fuzz.go
new file mode 100644
index 00000000..18a3b4b9
--- /dev/null
+++ b/vendor/howett.net/plist/fuzz.go
@@ -0,0 +1,17 @@
+// +build gofuzz
+
+package plist
+
+import (
+ "bytes"
+)
+
+func Fuzz(data []byte) int {
+ buf := bytes.NewReader(data)
+
+ var obj interface{}
+ if err := NewDecoder(buf).Decode(&obj); err != nil {
+ return 0
+ }
+ return 1
+}
diff --git a/vendor/howett.net/plist/marshal.go b/vendor/howett.net/plist/marshal.go
new file mode 100644
index 00000000..e237d20a
--- /dev/null
+++ b/vendor/howett.net/plist/marshal.go
@@ -0,0 +1,187 @@
+package plist
+
+import (
+ "encoding"
+ "reflect"
+ "time"
+)
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+var (
+ plistMarshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+ timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+)
+
+func implementsInterface(val reflect.Value, interfaceType reflect.Type) (interface{}, bool) {
+ if val.CanInterface() && val.Type().Implements(interfaceType) {
+ return val.Interface(), true
+ }
+
+ if val.CanAddr() {
+ pv := val.Addr()
+ if pv.CanInterface() && pv.Type().Implements(interfaceType) {
+ return pv.Interface(), true
+ }
+ }
+ return nil, false
+}
+
+func (p *Encoder) marshalPlistInterface(marshalable Marshaler) cfValue {
+ value, err := marshalable.MarshalPlist()
+ if err != nil {
+ panic(err)
+ }
+ return p.marshal(reflect.ValueOf(value))
+}
+
+// marshalTextInterface marshals a TextMarshaler to a plist string.
+func (p *Encoder) marshalTextInterface(marshalable encoding.TextMarshaler) cfValue {
+ s, err := marshalable.MarshalText()
+ if err != nil {
+ panic(err)
+ }
+ return cfString(s)
+}
+
+// marshalStruct marshals a reflected struct value to a plist dictionary
+func (p *Encoder) marshalStruct(typ reflect.Type, val reflect.Value) cfValue {
+ tinfo, _ := getTypeInfo(typ)
+
+ dict := &cfDictionary{
+ keys: make([]string, 0, len(tinfo.fields)),
+ values: make([]cfValue, 0, len(tinfo.fields)),
+ }
+ for _, finfo := range tinfo.fields {
+ value := finfo.value(val)
+ if !value.IsValid() || finfo.omitEmpty && isEmptyValue(value) {
+ continue
+ }
+ dict.keys = append(dict.keys, finfo.name)
+ dict.values = append(dict.values, p.marshal(value))
+ }
+
+ return dict
+}
+
+func (p *Encoder) marshalTime(val reflect.Value) cfValue {
+ time := val.Interface().(time.Time)
+ return cfDate(time)
+}
+
+func (p *Encoder) marshal(val reflect.Value) cfValue {
+ if !val.IsValid() {
+ return nil
+ }
+
+ if receiver, can := implementsInterface(val, plistMarshalerType); can {
+ return p.marshalPlistInterface(receiver.(Marshaler))
+ }
+
+ // time.Time implements TextMarshaler, but we need to store it in RFC3339
+ if val.Type() == timeType {
+ return p.marshalTime(val)
+ }
+ if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
+ ival := val.Elem()
+ if ival.IsValid() && ival.Type() == timeType {
+ return p.marshalTime(ival)
+ }
+ }
+
+ // Check for text marshaler.
+ if receiver, can := implementsInterface(val, textMarshalerType); can {
+ return p.marshalTextInterface(receiver.(encoding.TextMarshaler))
+ }
+
+ // Descend into pointers or interfaces
+ if val.Kind() == reflect.Ptr || (val.Kind() == reflect.Interface && val.NumMethod() == 0) {
+ val = val.Elem()
+ }
+
+ // We got this far and still may have an invalid anything or nil ptr/interface
+ if !val.IsValid() || ((val.Kind() == reflect.Ptr || val.Kind() == reflect.Interface) && val.IsNil()) {
+ return nil
+ }
+
+ typ := val.Type()
+
+ if typ == uidType {
+ return cfUID(val.Uint())
+ }
+
+ if val.Kind() == reflect.Struct {
+ return p.marshalStruct(typ, val)
+ }
+
+ switch val.Kind() {
+ case reflect.String:
+ return cfString(val.String())
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return &cfNumber{signed: true, value: uint64(val.Int())}
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return &cfNumber{signed: false, value: val.Uint()}
+ case reflect.Float32:
+ return &cfReal{wide: false, value: val.Float()}
+ case reflect.Float64:
+ return &cfReal{wide: true, value: val.Float()}
+ case reflect.Bool:
+ return cfBoolean(val.Bool())
+ case reflect.Slice, reflect.Array:
+ if typ.Elem().Kind() == reflect.Uint8 {
+ bytes := []byte(nil)
+ if val.CanAddr() && val.Kind() == reflect.Slice {
+ // arrays are may be addressable but do not support .Bytes
+ bytes = val.Bytes()
+ } else {
+ bytes = make([]byte, val.Len())
+ reflect.Copy(reflect.ValueOf(bytes), val)
+ }
+ return cfData(bytes)
+ } else {
+ values := make([]cfValue, val.Len())
+ for i, length := 0, val.Len(); i < length; i++ {
+ if subpval := p.marshal(val.Index(i)); subpval != nil {
+ values[i] = subpval
+ }
+ }
+ return &cfArray{values}
+ }
+ case reflect.Map:
+ if typ.Key().Kind() != reflect.String {
+ panic(&unknownTypeError{typ})
+ }
+
+ l := val.Len()
+ dict := &cfDictionary{
+ keys: make([]string, 0, l),
+ values: make([]cfValue, 0, l),
+ }
+ for _, keyv := range val.MapKeys() {
+ if subpval := p.marshal(val.MapIndex(keyv)); subpval != nil {
+ dict.keys = append(dict.keys, keyv.String())
+ dict.values = append(dict.values, subpval)
+ }
+ }
+ return dict
+ default:
+ panic(&unknownTypeError{typ})
+ }
+}
diff --git a/vendor/howett.net/plist/must.go b/vendor/howett.net/plist/must.go
new file mode 100644
index 00000000..2c2523d9
--- /dev/null
+++ b/vendor/howett.net/plist/must.go
@@ -0,0 +1,50 @@
+package plist
+
+import (
+ "io"
+ "strconv"
+)
+
+type mustWriter struct {
+ io.Writer
+}
+
+func (w mustWriter) Write(p []byte) (int, error) {
+ n, err := w.Writer.Write(p)
+ if err != nil {
+ panic(err)
+ }
+ return n, nil
+}
+
+func mustParseInt(str string, base, bits int) int64 {
+ i, err := strconv.ParseInt(str, base, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseUint(str string, base, bits int) uint64 {
+ i, err := strconv.ParseUint(str, base, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseFloat(str string, bits int) float64 {
+ i, err := strconv.ParseFloat(str, bits)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
+
+func mustParseBool(str string) bool {
+ i, err := strconv.ParseBool(str)
+ if err != nil {
+ panic(err)
+ }
+ return i
+}
diff --git a/vendor/howett.net/plist/plist.go b/vendor/howett.net/plist/plist.go
new file mode 100644
index 00000000..8883e1c7
--- /dev/null
+++ b/vendor/howett.net/plist/plist.go
@@ -0,0 +1,83 @@
+package plist
+
+import (
+ "reflect"
+)
+
+// Property list format constants
+const (
+ // Used by Decoder to represent an invalid property list.
+ InvalidFormat int = 0
+
+ // Used to indicate total abandon with regards to Encoder's output format.
+ AutomaticFormat = 0
+
+ XMLFormat = 1
+ BinaryFormat = 2
+ OpenStepFormat = 3
+ GNUStepFormat = 4
+)
+
+var FormatNames = map[int]string{
+ InvalidFormat: "unknown/invalid",
+ XMLFormat: "XML",
+ BinaryFormat: "Binary",
+ OpenStepFormat: "OpenStep",
+ GNUStepFormat: "GNUStep",
+}
+
+type unknownTypeError struct {
+ typ reflect.Type
+}
+
+func (u *unknownTypeError) Error() string {
+ return "plist: can't marshal value of type " + u.typ.String()
+}
+
+type invalidPlistError struct {
+ format string
+ err error
+}
+
+func (e invalidPlistError) Error() string {
+ s := "plist: invalid " + e.format + " property list"
+ if e.err != nil {
+ s += ": " + e.err.Error()
+ }
+ return s
+}
+
+type plistParseError struct {
+ format string
+ err error
+}
+
+func (e plistParseError) Error() string {
+ s := "plist: error parsing " + e.format + " property list"
+ if e.err != nil {
+ s += ": " + e.err.Error()
+ }
+ return s
+}
+
+// A UID represents a unique object identifier. UIDs are serialized in a manner distinct from
+// that of integers.
+type UID uint64
+
+// Marshaler is the interface implemented by types that can marshal themselves into valid
+// property list objects. The returned value is marshaled in place of the original value
+// implementing Marshaler
+//
+// If an error is returned by MarshalPlist, marshaling stops and the error is returned.
+type Marshaler interface {
+ MarshalPlist() (interface{}, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal themselves from
+// property list objects. The UnmarshalPlist method receives a function that may
+// be called to unmarshal the original property list value into a field or variable.
+//
+// It is safe to call the unmarshal function more than once.
+type Unmarshaler interface {
+ UnmarshalPlist(unmarshal func(interface{}) error) error
+}
diff --git a/vendor/howett.net/plist/plist_types.go b/vendor/howett.net/plist/plist_types.go
new file mode 100644
index 00000000..98363644
--- /dev/null
+++ b/vendor/howett.net/plist/plist_types.go
@@ -0,0 +1,172 @@
+package plist
+
+import (
+ "hash/crc32"
+ "sort"
+ "time"
+ "strconv"
+)
+
+// magic value used in the non-binary encoding of UIDs
+// (stored as a dictionary mapping CF$UID->integer)
+const cfUIDMagic = "CF$UID"
+
+type cfValue interface {
+ typeName() string
+ hash() interface{}
+}
+
+type cfDictionary struct {
+ keys sort.StringSlice
+ values []cfValue
+}
+
+func (*cfDictionary) typeName() string {
+ return "dictionary"
+}
+
+func (p *cfDictionary) hash() interface{} {
+ return p
+}
+
+func (p *cfDictionary) Len() int {
+ return len(p.keys)
+}
+
+func (p *cfDictionary) Less(i, j int) bool {
+ return p.keys.Less(i, j)
+}
+
+func (p *cfDictionary) Swap(i, j int) {
+ p.keys.Swap(i, j)
+ p.values[i], p.values[j] = p.values[j], p.values[i]
+}
+
+func (p *cfDictionary) sort() {
+ sort.Sort(p)
+}
+
+func (p *cfDictionary) maybeUID(lax bool) cfValue {
+ if len(p.keys) == 1 && p.keys[0] == "CF$UID" && len(p.values) == 1 {
+ pval := p.values[0]
+ if integer, ok := pval.(*cfNumber); ok {
+ return cfUID(integer.value)
+ }
+ // Openstep only has cfString. Act like the unmarshaller a bit.
+ if lax {
+ if str, ok := pval.(cfString); ok {
+ if i, err := strconv.ParseUint(string(str), 10, 64); err == nil {
+ return cfUID(i)
+ }
+ }
+ }
+ }
+ return p
+}
+
+type cfArray struct {
+ values []cfValue
+}
+
+func (*cfArray) typeName() string {
+ return "array"
+}
+
+func (p *cfArray) hash() interface{} {
+ return p
+}
+
+type cfString string
+
+func (cfString) typeName() string {
+ return "string"
+}
+
+func (p cfString) hash() interface{} {
+ return string(p)
+}
+
+type cfNumber struct {
+ signed bool
+ value uint64
+}
+
+func (*cfNumber) typeName() string {
+ return "integer"
+}
+
+func (p *cfNumber) hash() interface{} {
+ if p.signed {
+ return int64(p.value)
+ }
+ return p.value
+}
+
+type cfReal struct {
+ wide bool
+ value float64
+}
+
+func (cfReal) typeName() string {
+ return "real"
+}
+
+func (p *cfReal) hash() interface{} {
+ if p.wide {
+ return p.value
+ }
+ return float32(p.value)
+}
+
+type cfBoolean bool
+
+func (cfBoolean) typeName() string {
+ return "boolean"
+}
+
+func (p cfBoolean) hash() interface{} {
+ return bool(p)
+}
+
+type cfUID UID
+
+func (cfUID) typeName() string {
+ return "UID"
+}
+
+func (p cfUID) hash() interface{} {
+ return p
+}
+
+func (p cfUID) toDict() *cfDictionary {
+ return &cfDictionary{
+ keys: []string{cfUIDMagic},
+ values: []cfValue{&cfNumber{
+ signed: false,
+ value: uint64(p),
+ }},
+ }
+}
+
+type cfData []byte
+
+func (cfData) typeName() string {
+ return "data"
+}
+
+func (p cfData) hash() interface{} {
+ // Data are uniqued by their checksums.
+ // Todo: Look at calculating this only once and storing it somewhere;
+ // crc32 is fairly quick, however.
+ return crc32.ChecksumIEEE([]byte(p))
+}
+
+type cfDate time.Time
+
+func (cfDate) typeName() string {
+ return "date"
+}
+
+func (p cfDate) hash() interface{} {
+ return time.Time(p)
+}
diff --git a/vendor/howett.net/plist/text_generator.go b/vendor/howett.net/plist/text_generator.go
new file mode 100644
index 00000000..d71f02bb
--- /dev/null
+++ b/vendor/howett.net/plist/text_generator.go
@@ -0,0 +1,228 @@
+package plist
+
+import (
+ "encoding/hex"
+ "io"
+ "strconv"
+ "time"
+)
+
+type textPlistGenerator struct {
+ writer io.Writer
+ format int
+
+ quotableTable *characterSet
+
+ indent string
+ depth int
+
+ dictKvDelimiter, dictEntryDelimiter, arrayDelimiter []byte
+}
+
+var (
+ textPlistTimeLayout = "2006-01-02 15:04:05 -0700"
+ padding = "0000"
+)
+
+func (p *textPlistGenerator) generateDocument(pval cfValue) {
+ p.writePlistValue(pval)
+}
+
+func (p *textPlistGenerator) plistQuotedString(str string) string {
+ if str == "" {
+ return `""`
+ }
+ s := ""
+ quot := false
+ for _, r := range str {
+ if r > 0xFF {
+ quot = true
+ s += `\U`
+ us := strconv.FormatInt(int64(r), 16)
+ s += padding[len(us):]
+ s += us
+ } else if r > 0x7F {
+ quot = true
+ s += `\`
+ us := strconv.FormatInt(int64(r), 8)
+ s += padding[1+len(us):]
+ s += us
+ } else {
+ c := uint8(r)
+ if p.quotableTable.ContainsByte(c) {
+ quot = true
+ }
+
+ switch c {
+ case '\a':
+ s += `\a`
+ case '\b':
+ s += `\b`
+ case '\v':
+ s += `\v`
+ case '\f':
+ s += `\f`
+ case '\\':
+ s += `\\`
+ case '"':
+ s += `\"`
+ case '\t', '\r', '\n':
+ fallthrough
+ default:
+ s += string(c)
+ }
+ }
+ }
+ if quot {
+ s = `"` + s + `"`
+ }
+ return s
+}
+
+func (p *textPlistGenerator) deltaIndent(depthDelta int) {
+ if depthDelta < 0 {
+ p.depth--
+ } else if depthDelta > 0 {
+ p.depth++
+ }
+}
+
+func (p *textPlistGenerator) writeIndent() {
+ if len(p.indent) == 0 {
+ return
+ }
+ if len(p.indent) > 0 {
+ p.writer.Write([]byte("\n"))
+ for i := 0; i < p.depth; i++ {
+ io.WriteString(p.writer, p.indent)
+ }
+ }
+}
+
+func (p *textPlistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case *cfDictionary:
+ pval.sort()
+ p.writer.Write([]byte(`{`))
+ p.deltaIndent(1)
+ for i, k := range pval.keys {
+ p.writeIndent()
+ io.WriteString(p.writer, p.plistQuotedString(k))
+ p.writer.Write(p.dictKvDelimiter)
+ p.writePlistValue(pval.values[i])
+ p.writer.Write(p.dictEntryDelimiter)
+ }
+ p.deltaIndent(-1)
+ p.writeIndent()
+ p.writer.Write([]byte(`}`))
+ case *cfArray:
+ p.writer.Write([]byte(`(`))
+ p.deltaIndent(1)
+ for _, v := range pval.values {
+ p.writeIndent()
+ p.writePlistValue(v)
+ p.writer.Write(p.arrayDelimiter)
+ }
+ p.deltaIndent(-1)
+ p.writeIndent()
+ p.writer.Write([]byte(`)`))
+ case cfString:
+ io.WriteString(p.writer, p.plistQuotedString(string(pval)))
+ case *cfNumber:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*I`))
+ }
+ if pval.signed {
+ io.WriteString(p.writer, strconv.FormatInt(int64(pval.value), 10))
+ } else {
+ io.WriteString(p.writer, strconv.FormatUint(pval.value, 10))
+ }
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`>`))
+ }
+ case *cfReal:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*R`))
+ }
+ // GNUstep does not differentiate between 32/64-bit floats.
+ io.WriteString(p.writer, strconv.FormatFloat(pval.value, 'g', -1, 64))
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`>`))
+ }
+ case cfBoolean:
+ if p.format == GNUStepFormat {
+ if pval {
+ p.writer.Write([]byte(`<*BY>`))
+ } else {
+ p.writer.Write([]byte(`<*BN>`))
+ }
+ } else {
+ if pval {
+ p.writer.Write([]byte(`1`))
+ } else {
+ p.writer.Write([]byte(`0`))
+ }
+ }
+ case cfData:
+ var hexencoded [9]byte
+ var l int
+ var asc = 9
+ hexencoded[8] = ' '
+
+ p.writer.Write([]byte(`<`))
+ b := []byte(pval)
+ for i := 0; i < len(b); i += 4 {
+ l = i + 4
+ if l >= len(b) {
+ l = len(b)
+ // We no longer need the space - or the rest of the buffer.
+ // (we used >= above to get this part without another conditional :P)
+ asc = (l - i) * 2
+ }
+ // Fill the buffer (only up to 8 characters, to preserve the space we implicitly include
+ // at the end of every encode)
+ hex.Encode(hexencoded[:8], b[i:l])
+ io.WriteString(p.writer, string(hexencoded[:asc]))
+ }
+ p.writer.Write([]byte(`>`))
+ case cfDate:
+ if p.format == GNUStepFormat {
+ p.writer.Write([]byte(`<*D`))
+ io.WriteString(p.writer, time.Time(pval).In(time.UTC).Format(textPlistTimeLayout))
+ p.writer.Write([]byte(`>`))
+ } else {
+ io.WriteString(p.writer, p.plistQuotedString(time.Time(pval).In(time.UTC).Format(textPlistTimeLayout)))
+ }
+ case cfUID:
+ p.writePlistValue(pval.toDict())
+ }
+}
+
+func (p *textPlistGenerator) Indent(i string) {
+ p.indent = i
+ if i == "" {
+ p.dictKvDelimiter = []byte(`=`)
+ } else {
+ // For pretty-printing
+ p.dictKvDelimiter = []byte(` = `)
+ }
+}
+
+func newTextPlistGenerator(w io.Writer, format int) *textPlistGenerator {
+ table := &osQuotable
+ if format == GNUStepFormat {
+ table = &gsQuotable
+ }
+ return &textPlistGenerator{
+ writer: mustWriter{w},
+ format: format,
+ quotableTable: table,
+ dictKvDelimiter: []byte(`=`),
+ arrayDelimiter: []byte(`,`),
+ dictEntryDelimiter: []byte(`;`),
+ }
+}
diff --git a/vendor/howett.net/plist/text_parser.go b/vendor/howett.net/plist/text_parser.go
new file mode 100644
index 00000000..c60423ff
--- /dev/null
+++ b/vendor/howett.net/plist/text_parser.go
@@ -0,0 +1,580 @@
+// Parser for text plist formats.
+// @see https://github.com/apple/swift-corelibs-foundation/blob/master/CoreFoundation/Parsing.subproj/CFOldStylePList.c
+// @see https://github.com/gnustep/libs-base/blob/master/Source/NSPropertyList.m
+// This parser also handles strings files.
+
+package plist
+
+import (
+ "encoding/base64"
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "runtime"
+ "strings"
+ "time"
+ "unicode/utf16"
+ "unicode/utf8"
+)
+
+type textPlistParser struct {
+ reader io.Reader
+ format int
+
+ input string
+ start int
+ pos int
+ width int
+}
+
+func convertU16(buffer []byte, bo binary.ByteOrder) (string, error) {
+ if len(buffer)%2 != 0 {
+ return "", errors.New("truncated utf16")
+ }
+
+ tmp := make([]uint16, len(buffer)/2)
+ for i := 0; i < len(buffer); i += 2 {
+ tmp[i/2] = bo.Uint16(buffer[i : i+2])
+ }
+ return string(utf16.Decode(tmp)), nil
+}
+
+func guessEncodingAndConvert(buffer []byte) (string, error) {
+ if len(buffer) >= 3 && buffer[0] == 0xEF && buffer[1] == 0xBB && buffer[2] == 0xBF {
+ // UTF-8 BOM
+ return zeroCopy8BitString(buffer, 3, len(buffer)-3), nil
+ } else if len(buffer) >= 2 {
+ // UTF-16 guesses
+
+ switch {
+ // stream is big-endian (BOM is FE FF or head is 00 XX)
+ case (buffer[0] == 0xFE && buffer[1] == 0xFF):
+ return convertU16(buffer[2:], binary.BigEndian)
+ case (buffer[0] == 0 && buffer[1] != 0):
+ return convertU16(buffer, binary.BigEndian)
+
+ // stream is little-endian (BOM is FE FF or head is XX 00)
+ case (buffer[0] == 0xFF && buffer[1] == 0xFE):
+ return convertU16(buffer[2:], binary.LittleEndian)
+ case (buffer[0] != 0 && buffer[1] == 0):
+ return convertU16(buffer, binary.LittleEndian)
+ }
+ }
+
+ // fallback: assume ASCII (not great!)
+ return zeroCopy8BitString(buffer, 0, len(buffer)), nil
+}
+
+func (p *textPlistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ // Wrap all non-invalid-plist errors.
+ parseError = plistParseError{"text", r.(error)}
+ }
+ }()
+
+ buffer, err := ioutil.ReadAll(p.reader)
+ if err != nil {
+ panic(err)
+ }
+
+ p.input, err = guessEncodingAndConvert(buffer)
+ if err != nil {
+ panic(err)
+ }
+
+ val := p.parsePlistValue()
+
+ p.skipWhitespaceAndComments()
+ if p.peek() != eof {
+ if _, ok := val.(cfString); !ok {
+ p.error("garbage after end of document")
+ }
+
+ // Try parsing as .strings.
+ // See -[NSDictionary propertyListFromStringsFileFormat:].
+ p.start = 0
+ p.pos = 0
+ val = p.parseDictionary(true)
+ }
+
+ pval = val
+
+ return
+}
+
+const eof rune = -1
+
+func (p *textPlistParser) error(e string, args ...interface{}) {
+ line := strings.Count(p.input[:p.pos], "\n")
+ char := p.pos - strings.LastIndex(p.input[:p.pos], "\n") - 1
+ panic(fmt.Errorf("%s at line %d character %d", fmt.Sprintf(e, args...), line, char))
+}
+
+func (p *textPlistParser) next() rune {
+ if int(p.pos) >= len(p.input) {
+ p.width = 0
+ return eof
+ }
+ r, w := utf8.DecodeRuneInString(p.input[p.pos:])
+ p.width = w
+ p.pos += p.width
+ return r
+}
+
+func (p *textPlistParser) backup() {
+ p.pos -= p.width
+}
+
+func (p *textPlistParser) peek() rune {
+ r := p.next()
+ p.backup()
+ return r
+}
+
+func (p *textPlistParser) emit() string {
+ s := p.input[p.start:p.pos]
+ p.start = p.pos
+ return s
+}
+
+func (p *textPlistParser) ignore() {
+ p.start = p.pos
+}
+
+func (p *textPlistParser) empty() bool {
+ return p.start == p.pos
+}
+
+func (p *textPlistParser) scanUntil(ch rune) {
+ if x := strings.IndexRune(p.input[p.pos:], ch); x >= 0 {
+ p.pos += x
+ return
+ }
+ p.pos = len(p.input)
+}
+
+func (p *textPlistParser) scanUntilAny(chs string) {
+ if x := strings.IndexAny(p.input[p.pos:], chs); x >= 0 {
+ p.pos += x
+ return
+ }
+ p.pos = len(p.input)
+}
+
+func (p *textPlistParser) scanCharactersInSet(ch *characterSet) {
+ for ch.Contains(p.next()) {
+ }
+ p.backup()
+}
+
+func (p *textPlistParser) scanCharactersNotInSet(ch *characterSet) {
+ var r rune
+ for {
+ r = p.next()
+ if r == eof || ch.Contains(r) {
+ break
+ }
+ }
+ p.backup()
+}
+
+func (p *textPlistParser) skipWhitespaceAndComments() {
+ for {
+ p.scanCharactersInSet(&whitespace)
+ if strings.HasPrefix(p.input[p.pos:], "//") {
+ p.scanCharactersNotInSet(&newlineCharacterSet)
+ } else if strings.HasPrefix(p.input[p.pos:], "/*") {
+ if x := strings.Index(p.input[p.pos:], "*/"); x >= 0 {
+ p.pos += x + 2 // skip the */ as well
+ continue // consume more whitespace
+ } else {
+ p.error("unexpected eof in block comment")
+ }
+ } else {
+ break
+ }
+ }
+ p.ignore()
+}
+
+func (p *textPlistParser) parseOctalDigits(max int) uint64 {
+ var val uint64
+
+ for i := 0; i < max; i++ {
+ r := p.next()
+
+ if r >= '0' && r <= '7' {
+ val <<= 3
+ val |= uint64((r - '0'))
+ } else {
+ p.backup()
+ break
+ }
+ }
+ return val
+}
+
+func (p *textPlistParser) parseHexDigits(max int) uint64 {
+ var val uint64
+
+ for i := 0; i < max; i++ {
+ r := p.next()
+
+ if r >= 'a' && r <= 'f' {
+ val <<= 4
+ val |= 10 + uint64((r - 'a'))
+ } else if r >= 'A' && r <= 'F' {
+ val <<= 4
+ val |= 10 + uint64((r - 'A'))
+ } else if r >= '0' && r <= '9' {
+ val <<= 4
+ val |= uint64((r - '0'))
+ } else {
+ p.backup()
+ break
+ }
+ }
+ return val
+}
+
+// the \ has already been consumed
+func (p *textPlistParser) parseEscape() string {
+ var s string
+ switch p.next() {
+ case 'a':
+ s = "\a"
+ case 'b':
+ s = "\b"
+ case 'v':
+ s = "\v"
+ case 'f':
+ s = "\f"
+ case 't':
+ s = "\t"
+ case 'r':
+ s = "\r"
+ case 'n':
+ s = "\n"
+ case '\\':
+ s = `\`
+ case '"':
+ s = `"`
+ case 'x': // This is our extension.
+ s = string(rune(p.parseHexDigits(2)))
+ case 'u', 'U': // 'u' is a GNUstep extension.
+ s = string(rune(p.parseHexDigits(4)))
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ p.backup() // we've already consumed one of the digits
+ s = string(rune(p.parseOctalDigits(3)))
+ default:
+ p.backup() // everything else should be accepted
+ }
+ p.ignore() // skip the entire escape sequence
+ return s
+}
+
+// the " has already been consumed
+func (p *textPlistParser) parseQuotedString() cfString {
+ p.ignore() // ignore the "
+
+ slowPath := false
+ s := ""
+
+ for {
+ p.scanUntilAny(`"\`)
+ switch p.peek() {
+ case eof:
+ p.error("unexpected eof in quoted string")
+ case '"':
+ section := p.emit()
+ p.pos++ // skip "
+ if !slowPath {
+ return cfString(section)
+ } else {
+ s += section
+ return cfString(s)
+ }
+ case '\\':
+ slowPath = true
+ s += p.emit()
+ p.next() // consume \
+ s += p.parseEscape()
+ }
+ }
+}
+
+func (p *textPlistParser) parseUnquotedString() cfString {
+ p.scanCharactersNotInSet(&gsQuotable)
+ s := p.emit()
+ if s == "" {
+ p.error("invalid unquoted string (found an unquoted character that should be quoted?)")
+ }
+
+ return cfString(s)
+}
+
+// the { has already been consumed
+func (p *textPlistParser) parseDictionary(ignoreEof bool) cfValue {
+ //p.ignore() // ignore the {
+ var keypv cfValue
+ keys := make([]string, 0, 32)
+ values := make([]cfValue, 0, 32)
+outer:
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ if !ignoreEof {
+ p.error("unexpected eof in dictionary")
+ }
+ fallthrough
+ case '}':
+ break outer
+ case '"':
+ keypv = p.parseQuotedString()
+ default:
+ p.backup()
+ keypv = p.parseUnquotedString()
+ }
+
+ // INVARIANT: key can't be nil; parseQuoted and parseUnquoted
+ // will panic out before they return nil.
+
+ p.skipWhitespaceAndComments()
+
+ var val cfValue
+ n := p.next()
+ if n == ';' {
+ // This is supposed to be .strings-specific.
+ // GNUstep parses this as an empty string.
+ // Apple copies the key like we do.
+ val = keypv
+ } else if n == '=' {
+ // whitespace is consumed within
+ val = p.parsePlistValue()
+
+ p.skipWhitespaceAndComments()
+
+ if p.next() != ';' {
+ p.error("missing ; in dictionary")
+ }
+ } else {
+ p.error("missing = in dictionary")
+ }
+
+ keys = append(keys, string(keypv.(cfString)))
+ values = append(values, val)
+ }
+
+ dict := &cfDictionary{keys: keys, values: values}
+ return dict.maybeUID(p.format == OpenStepFormat)
+}
+
+// the ( has already been consumed
+func (p *textPlistParser) parseArray() *cfArray {
+ //p.ignore() // ignore the (
+ values := make([]cfValue, 0, 32)
+outer:
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ p.error("unexpected eof in array")
+ case ')':
+ break outer // done here
+ case ',':
+ continue // restart; ,) is valid and we don't want to blow it
+ default:
+ p.backup()
+ }
+
+ pval := p.parsePlistValue() // whitespace is consumed within
+ if str, ok := pval.(cfString); ok && string(str) == "" {
+ // Empty strings in arrays are apparently skipped?
+ // TODO: Figure out why this was implemented.
+ continue
+ }
+ values = append(values, pval)
+ }
+ return &cfArray{values}
+}
+
+// the <* have already been consumed
+func (p *textPlistParser) parseGNUStepValue() cfValue {
+ typ := p.next()
+
+ if typ == '>' || typ == eof { // <*>, <*EOF
+ p.error("invalid GNUStep extended value")
+ }
+
+ if typ != 'I' && typ != 'R' && typ != 'B' && typ != 'D' {
+ // early out: no need to collect the value if we'll fail to understand it
+ p.error("unknown GNUStep extended value type `" + string(typ) + "'")
+ }
+
+ if p.peek() == '"' { // <*x"
+ p.next()
+ }
+
+ p.ignore()
+ p.scanUntil('>')
+
+ if p.peek() == eof { // <*xEOF or <*x"EOF
+ p.error("unterminated GNUStep extended value")
+ }
+
+ if p.empty() { // <*x>, <*x"">
+ p.error("empty GNUStep extended value")
+ }
+
+ v := p.emit()
+ p.next() // consume the >
+
+ if v[len(v)-1] == '"' {
+ // GNUStep tolerates malformed quoted values, as in <*I5"> and <*I"5>
+ // It purportedly does so by stripping the trailing quote
+ v = v[:len(v)-1]
+ }
+
+ switch typ {
+ case 'I':
+ if v[0] == '-' {
+ n := mustParseInt(v, 10, 64)
+ return &cfNumber{signed: true, value: uint64(n)}
+ } else {
+ n := mustParseUint(v, 10, 64)
+ return &cfNumber{signed: false, value: n}
+ }
+ case 'R':
+ n := mustParseFloat(v, 64)
+ return &cfReal{wide: true, value: n} // TODO(DH) 32/64
+ case 'B':
+ b := v[0] == 'Y'
+ return cfBoolean(b)
+ case 'D':
+ t, err := time.Parse(textPlistTimeLayout, v)
+ if err != nil {
+ p.error(err.Error())
+ }
+
+ return cfDate(t.In(time.UTC))
+ }
+ // We should never get here; we checked the type above
+ return nil
+}
+
+// the <[ have already been consumed
+func (p *textPlistParser) parseGNUStepBase64() cfData {
+ p.ignore()
+ p.scanUntil(']')
+ v := p.emit()
+
+ if p.next() != ']' {
+ p.error("invalid GNUStep base64 data (expected ']')")
+ }
+
+ if p.next() != '>' {
+ p.error("invalid GNUStep base64 data (expected '>')")
+ }
+
+ // Emulate NSDataBase64DecodingIgnoreUnknownCharacters
+ filtered := strings.Map(base64ValidChars.Map, v)
+ data, err := base64.StdEncoding.DecodeString(filtered)
+ if err != nil {
+ p.error("invalid GNUStep base64 data: " + err.Error())
+ }
+ return cfData(data)
+}
+
+// The < has already been consumed
+func (p *textPlistParser) parseHexData() cfData {
+ buf := make([]byte, 256)
+ i := 0
+ c := 0
+
+ for {
+ r := p.next()
+ switch r {
+ case eof:
+ p.error("unexpected eof in data")
+ case '>':
+ if c&1 == 1 {
+ p.error("uneven number of hex digits in data")
+ }
+ p.ignore()
+ return cfData(buf[:i])
+ // Apple and GNUstep both want these in pairs. We are a bit more lax.
+ // GS accepts comments too, but that seems like a lot of work.
+ case ' ', '\t', '\n', '\r', '\u2028', '\u2029':
+ continue
+ }
+
+ buf[i] <<= 4
+ if r >= 'a' && r <= 'f' {
+ buf[i] |= 10 + byte((r - 'a'))
+ } else if r >= 'A' && r <= 'F' {
+ buf[i] |= 10 + byte((r - 'A'))
+ } else if r >= '0' && r <= '9' {
+ buf[i] |= byte((r - '0'))
+ } else {
+ p.error("unexpected hex digit `%c'", r)
+ }
+
+ c++
+ if c&1 == 0 {
+ i++
+ if i >= len(buf) {
+ realloc := make([]byte, len(buf)*2)
+ copy(realloc, buf)
+ buf = realloc
+ }
+ }
+ }
+}
+
+func (p *textPlistParser) parsePlistValue() cfValue {
+ for {
+ p.skipWhitespaceAndComments()
+
+ switch p.next() {
+ case eof:
+ return &cfDictionary{}
+ case '<':
+ switch p.next() {
+ case '*':
+ p.format = GNUStepFormat
+ return p.parseGNUStepValue()
+ case '[':
+ p.format = GNUStepFormat
+ return p.parseGNUStepBase64()
+ default:
+ p.backup()
+ return p.parseHexData()
+ }
+ case '"':
+ return p.parseQuotedString()
+ case '{':
+ return p.parseDictionary(false)
+ case '(':
+ return p.parseArray()
+ default:
+ p.backup()
+ return p.parseUnquotedString()
+ }
+ }
+}
+
+func newTextPlistParser(r io.Reader) *textPlistParser {
+ return &textPlistParser{
+ reader: r,
+ format: OpenStepFormat,
+ }
+}
diff --git a/vendor/howett.net/plist/text_tables.go b/vendor/howett.net/plist/text_tables.go
new file mode 100644
index 00000000..2bdd7ba9
--- /dev/null
+++ b/vendor/howett.net/plist/text_tables.go
@@ -0,0 +1,61 @@
+package plist
+
+type characterSet [4]uint64
+
+func (s *characterSet) Map(ch rune) rune {
+ if s.Contains(ch) {
+ return ch
+ } else {
+ return -1
+ }
+}
+
+func (s *characterSet) Contains(ch rune) bool {
+ return ch >= 0 && ch <= 255 && s.ContainsByte(byte(ch))
+}
+
+func (s *characterSet) ContainsByte(ch byte) bool {
+ return (s[ch/64]&(1<<(ch%64)) > 0)
+}
+
+// Bitmap of characters that must be inside a quoted string
+// when written to an old-style property list
+// Low bits represent lower characters, and each uint64 represents 64 characters.
+var gsQuotable = characterSet{
+ 0x78001385ffffffff,
+ 0xa800000138000000,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+}
+
+// 7f instead of 3f in the top line: CFOldStylePlist.c says . is valid, but they quote it.
+// ef instead og 6f in the top line: ' will be quoted
+var osQuotable = characterSet{
+ 0xf4007fefffffffff,
+ 0xf8000001f8000001,
+ 0xffffffffffffffff,
+ 0xffffffffffffffff,
+}
+
+var whitespace = characterSet{
+ 0x0000000100003f00,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
+
+var newlineCharacterSet = characterSet{
+ 0x0000000000002400,
+ 0x0000000000000000,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
+
+// Bitmap of characters that are valid in base64-encoded strings.
+// Used to filter out non-b64 characters to emulate NSDataBase64DecodingIgnoreUnknownCharacters
+var base64ValidChars = characterSet{
+ 0x23ff880000000000,
+ 0x07fffffe07fffffe,
+ 0x0000000000000000,
+ 0x0000000000000000,
+}
diff --git a/vendor/howett.net/plist/typeinfo.go b/vendor/howett.net/plist/typeinfo.go
new file mode 100644
index 00000000..f0b920f8
--- /dev/null
+++ b/vendor/howett.net/plist/typeinfo.go
@@ -0,0 +1,170 @@
+package plist
+
+import (
+ "reflect"
+ "strings"
+ "sync"
+)
+
+// typeInfo holds details for the plist representation of a type.
+type typeInfo struct {
+ fields []fieldInfo
+}
+
+// fieldInfo holds details for the plist representation of a single field.
+type fieldInfo struct {
+ idx []int
+ name string
+ omitEmpty bool
+}
+
+var tinfoMap = make(map[reflect.Type]*typeInfo)
+var tinfoLock sync.RWMutex
+
+// getTypeInfo returns the typeInfo structure with details necessary
+// for marshalling and unmarshalling typ.
+func getTypeInfo(typ reflect.Type) (*typeInfo, error) {
+ tinfoLock.RLock()
+ tinfo, ok := tinfoMap[typ]
+ tinfoLock.RUnlock()
+ if ok {
+ return tinfo, nil
+ }
+ tinfo = &typeInfo{}
+ if typ.Kind() == reflect.Struct {
+ n := typ.NumField()
+ for i := 0; i < n; i++ {
+ f := typ.Field(i)
+ if f.PkgPath != "" || f.Tag.Get("plist") == "-" {
+ continue // Private field
+ }
+
+ // For embedded structs, embed its fields.
+ if f.Anonymous {
+ t := f.Type
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ if t.Kind() == reflect.Struct {
+ inner, err := getTypeInfo(t)
+ if err != nil {
+ return nil, err
+ }
+ for _, finfo := range inner.fields {
+ finfo.idx = append([]int{i}, finfo.idx...)
+ if err := addFieldInfo(typ, tinfo, &finfo); err != nil {
+ return nil, err
+ }
+ }
+ continue
+ }
+ }
+
+ finfo, err := structFieldInfo(typ, &f)
+ if err != nil {
+ return nil, err
+ }
+
+ // Add the field if it doesn't conflict with other fields.
+ if err := addFieldInfo(typ, tinfo, finfo); err != nil {
+ return nil, err
+ }
+ }
+ }
+ tinfoLock.Lock()
+ tinfoMap[typ] = tinfo
+ tinfoLock.Unlock()
+ return tinfo, nil
+}
+
+// structFieldInfo builds and returns a fieldInfo for f.
+func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) {
+ finfo := &fieldInfo{idx: f.Index}
+
+ // Split the tag from the xml namespace if necessary.
+ tag := f.Tag.Get("plist")
+
+ // Parse flags.
+ tokens := strings.Split(tag, ",")
+ tag = tokens[0]
+ if len(tokens) > 1 {
+ tag = tokens[0]
+ for _, flag := range tokens[1:] {
+ switch flag {
+ case "omitempty":
+ finfo.omitEmpty = true
+ }
+ }
+ }
+
+ if tag == "" {
+ // If the name part of the tag is completely empty,
+ // use the field name
+ finfo.name = f.Name
+ return finfo, nil
+ }
+
+ finfo.name = tag
+ return finfo, nil
+}
+
+// addFieldInfo adds finfo to tinfo.fields if there are no
+// conflicts, or if conflicts arise from previous fields that were
+// obtained from deeper embedded structures than finfo. In the latter
+// case, the conflicting entries are dropped.
+// A conflict occurs when the path (parent + name) to a field is
+// itself a prefix of another path, or when two paths match exactly.
+// It is okay for field paths to share a common, shorter prefix.
+func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error {
+ var conflicts []int
+ // First, figure all conflicts. Most working code will have none.
+ for i := range tinfo.fields {
+ oldf := &tinfo.fields[i]
+ if newf.name == oldf.name {
+ conflicts = append(conflicts, i)
+ }
+ }
+
+ // Without conflicts, add the new field and return.
+ if conflicts == nil {
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+ }
+
+ // If any conflict is shallower, ignore the new field.
+ // This matches the Go field resolution on embedding.
+ for _, i := range conflicts {
+ if len(tinfo.fields[i].idx) < len(newf.idx) {
+ return nil
+ }
+ }
+
+ // Otherwise, the new field is shallower, and thus takes precedence,
+ // so drop the conflicting fields from tinfo and append the new one.
+ for c := len(conflicts) - 1; c >= 0; c-- {
+ i := conflicts[c]
+ copy(tinfo.fields[i:], tinfo.fields[i+1:])
+ tinfo.fields = tinfo.fields[:len(tinfo.fields)-1]
+ }
+ tinfo.fields = append(tinfo.fields, *newf)
+ return nil
+}
+
+// value returns v's field value corresponding to finfo.
+// It's equivalent to v.FieldByIndex(finfo.idx), but initializes
+// and dereferences pointers as necessary.
+func (finfo *fieldInfo) value(v reflect.Value) reflect.Value {
+ for i, x := range finfo.idx {
+ if i > 0 {
+ t := v.Type()
+ if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct {
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ v = v.Elem()
+ }
+ }
+ v = v.Field(x)
+ }
+ return v
+}
diff --git a/vendor/howett.net/plist/unmarshal.go b/vendor/howett.net/plist/unmarshal.go
new file mode 100644
index 00000000..63b4b1d5
--- /dev/null
+++ b/vendor/howett.net/plist/unmarshal.go
@@ -0,0 +1,331 @@
+package plist
+
+import (
+ "encoding"
+ "fmt"
+ "reflect"
+ "runtime"
+ "time"
+)
+
+type incompatibleDecodeTypeError struct {
+ dest reflect.Type
+ src string // type name (from cfValue)
+}
+
+func (u *incompatibleDecodeTypeError) Error() string {
+ return fmt.Sprintf("plist: type mismatch: tried to decode plist type `%v' into value of type `%v'", u.src, u.dest)
+}
+
+var (
+ plistUnmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+ textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
+ uidType = reflect.TypeOf(UID(0))
+)
+
+func isEmptyInterface(v reflect.Value) bool {
+ return v.Kind() == reflect.Interface && v.NumMethod() == 0
+}
+
+func (p *Decoder) unmarshalPlistInterface(pval cfValue, unmarshalable Unmarshaler) {
+ err := unmarshalable.UnmarshalPlist(func(i interface{}) (err error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ err = r.(error)
+ }
+ }()
+ p.unmarshal(pval, reflect.ValueOf(i))
+ return
+ })
+
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (p *Decoder) unmarshalTextInterface(pval cfString, unmarshalable encoding.TextUnmarshaler) {
+ err := unmarshalable.UnmarshalText([]byte(pval))
+ if err != nil {
+ panic(err)
+ }
+}
+
+func (p *Decoder) unmarshalTime(pval cfDate, val reflect.Value) {
+ val.Set(reflect.ValueOf(time.Time(pval)))
+}
+
+func (p *Decoder) unmarshalLaxString(s string, val reflect.Value) {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ i := mustParseInt(s, 10, 64)
+ val.SetInt(i)
+ return
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ i := mustParseUint(s, 10, 64)
+ val.SetUint(i)
+ return
+ case reflect.Float32, reflect.Float64:
+ f := mustParseFloat(s, 64)
+ val.SetFloat(f)
+ return
+ case reflect.Bool:
+ b := mustParseBool(s)
+ val.SetBool(b)
+ return
+ case reflect.Struct:
+ if val.Type() == timeType {
+ t, err := time.Parse(textPlistTimeLayout, s)
+ if err != nil {
+ panic(err)
+ }
+ val.Set(reflect.ValueOf(t.In(time.UTC)))
+ return
+ }
+ fallthrough
+ default:
+ panic(&incompatibleDecodeTypeError{val.Type(), "string"})
+ }
+}
+
+func (p *Decoder) unmarshal(pval cfValue, val reflect.Value) {
+ if pval == nil {
+ return
+ }
+
+ if val.Kind() == reflect.Ptr {
+ if val.IsNil() {
+ val.Set(reflect.New(val.Type().Elem()))
+ }
+ val = val.Elem()
+ }
+
+ if isEmptyInterface(val) {
+ v := p.valueInterface(pval)
+ val.Set(reflect.ValueOf(v))
+ return
+ }
+
+ incompatibleTypeError := &incompatibleDecodeTypeError{val.Type(), pval.typeName()}
+
+ // time.Time implements TextMarshaler, but we need to parse it as RFC3339
+ if date, ok := pval.(cfDate); ok {
+ if val.Type() == timeType {
+ p.unmarshalTime(date, val)
+ return
+ }
+ panic(incompatibleTypeError)
+ }
+
+ if receiver, can := implementsInterface(val, plistUnmarshalerType); can {
+ p.unmarshalPlistInterface(pval, receiver.(Unmarshaler))
+ return
+ }
+
+ if val.Type() != timeType {
+ if receiver, can := implementsInterface(val, textUnmarshalerType); can {
+ if str, ok := pval.(cfString); ok {
+ p.unmarshalTextInterface(str, receiver.(encoding.TextUnmarshaler))
+ } else {
+ panic(incompatibleTypeError)
+ }
+ return
+ }
+ }
+
+ typ := val.Type()
+
+ switch pval := pval.(type) {
+ case cfString:
+ if val.Kind() == reflect.String {
+ val.SetString(string(pval))
+ return
+ }
+ if p.lax {
+ p.unmarshalLaxString(string(pval), val)
+ return
+ }
+
+ panic(incompatibleTypeError)
+ case *cfNumber:
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(int64(pval.value))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val.SetUint(pval.value)
+ default:
+ panic(incompatibleTypeError)
+ }
+ case *cfReal:
+ if val.Kind() == reflect.Float32 || val.Kind() == reflect.Float64 {
+ // TODO: Consider warning on a downcast (storing a 64-bit value in a 32-bit reflect)
+ val.SetFloat(pval.value)
+ } else {
+ panic(incompatibleTypeError)
+ }
+ case cfBoolean:
+ if val.Kind() == reflect.Bool {
+ val.SetBool(bool(pval))
+ } else {
+ panic(incompatibleTypeError)
+ }
+ case cfData:
+ if val.Kind() != reflect.Slice && val.Kind() != reflect.Array {
+ panic(incompatibleTypeError)
+ }
+
+ if typ.Elem().Kind() != reflect.Uint8 {
+ panic(incompatibleTypeError)
+ }
+
+ b := []byte(pval)
+ switch val.Kind() {
+ case reflect.Slice:
+ val.SetBytes(b)
+ case reflect.Array:
+ if val.Len() < len(b) {
+ panic(fmt.Errorf("plist: attempted to unmarshal %d bytes into a byte array of size %d", len(b), val.Len()))
+ }
+ sval := reflect.ValueOf(b)
+ reflect.Copy(val, sval)
+ }
+ case cfUID:
+ if val.Type() == uidType {
+ val.SetUint(uint64(pval))
+ } else {
+ switch val.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ val.SetInt(int64(pval))
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ val.SetUint(uint64(pval))
+ default:
+ panic(incompatibleTypeError)
+ }
+ }
+ case *cfArray:
+ p.unmarshalArray(pval, val)
+ case *cfDictionary:
+ p.unmarshalDictionary(pval, val)
+ }
+}
+
+func (p *Decoder) unmarshalArray(a *cfArray, val reflect.Value) {
+ var n int
+ if val.Kind() == reflect.Slice {
+ // Slice of element values.
+ // Grow slice.
+ cnt := len(a.values) + val.Len()
+ if cnt >= val.Cap() {
+ ncap := 2 * cnt
+ if ncap < 4 {
+ ncap = 4
+ }
+ new := reflect.MakeSlice(val.Type(), val.Len(), ncap)
+ reflect.Copy(new, val)
+ val.Set(new)
+ }
+ n = val.Len()
+ val.SetLen(cnt)
+ } else if val.Kind() == reflect.Array {
+ if len(a.values) > val.Cap() {
+ panic(fmt.Errorf("plist: attempted to unmarshal %d values into an array of size %d", len(a.values), val.Cap()))
+ }
+ } else {
+ panic(&incompatibleDecodeTypeError{val.Type(), a.typeName()})
+ }
+
+ // Recur to read element into slice.
+ for _, sval := range a.values {
+ p.unmarshal(sval, val.Index(n))
+ n++
+ }
+ return
+}
+
+func (p *Decoder) unmarshalDictionary(dict *cfDictionary, val reflect.Value) {
+ typ := val.Type()
+ switch val.Kind() {
+ case reflect.Struct:
+ tinfo, err := getTypeInfo(typ)
+ if err != nil {
+ panic(err)
+ }
+
+ entries := make(map[string]cfValue, len(dict.keys))
+ for i, k := range dict.keys {
+ sval := dict.values[i]
+ entries[k] = sval
+ }
+
+ for _, finfo := range tinfo.fields {
+ p.unmarshal(entries[finfo.name], finfo.value(val))
+ }
+ case reflect.Map:
+ if val.IsNil() {
+ val.Set(reflect.MakeMap(typ))
+ }
+
+ for i, k := range dict.keys {
+ sval := dict.values[i]
+
+ keyv := reflect.ValueOf(k).Convert(typ.Key())
+ mapElem := reflect.New(typ.Elem()).Elem()
+
+ p.unmarshal(sval, mapElem)
+ val.SetMapIndex(keyv, mapElem)
+ }
+ default:
+ panic(&incompatibleDecodeTypeError{typ, dict.typeName()})
+ }
+}
+
+/* *Interface is modelled after encoding/json */
+func (p *Decoder) valueInterface(pval cfValue) interface{} {
+ switch pval := pval.(type) {
+ case cfString:
+ return string(pval)
+ case *cfNumber:
+ if pval.signed {
+ return int64(pval.value)
+ }
+ return pval.value
+ case *cfReal:
+ if pval.wide {
+ return pval.value
+ } else {
+ return float32(pval.value)
+ }
+ case cfBoolean:
+ return bool(pval)
+ case *cfArray:
+ return p.arrayInterface(pval)
+ case *cfDictionary:
+ return p.dictionaryInterface(pval)
+ case cfData:
+ return []byte(pval)
+ case cfDate:
+ return time.Time(pval)
+ case cfUID:
+ return UID(pval)
+ }
+ return nil
+}
+
+func (p *Decoder) arrayInterface(a *cfArray) []interface{} {
+ out := make([]interface{}, len(a.values))
+ for i, subv := range a.values {
+ out[i] = p.valueInterface(subv)
+ }
+ return out
+}
+
+func (p *Decoder) dictionaryInterface(dict *cfDictionary) map[string]interface{} {
+ out := make(map[string]interface{})
+ for i, k := range dict.keys {
+ subv := dict.values[i]
+ out[k] = p.valueInterface(subv)
+ }
+ return out
+}
diff --git a/vendor/howett.net/plist/util.go b/vendor/howett.net/plist/util.go
new file mode 100644
index 00000000..d4e437a4
--- /dev/null
+++ b/vendor/howett.net/plist/util.go
@@ -0,0 +1,25 @@
+package plist
+
+import "io"
+
+type countedWriter struct {
+ io.Writer
+ nbytes int
+}
+
+func (w *countedWriter) Write(p []byte) (int, error) {
+ n, err := w.Writer.Write(p)
+ w.nbytes += n
+ return n, err
+}
+
+func (w *countedWriter) BytesWritten() int {
+ return w.nbytes
+}
+
+func unsignedGetBase(s string) (string, int) {
+ if len(s) > 1 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X') {
+ return s[2:], 16
+ }
+ return s, 10
+}
diff --git a/vendor/howett.net/plist/xml_generator.go b/vendor/howett.net/plist/xml_generator.go
new file mode 100644
index 00000000..30597c16
--- /dev/null
+++ b/vendor/howett.net/plist/xml_generator.go
@@ -0,0 +1,178 @@
+package plist
+
+import (
+ "bufio"
+ "encoding/base64"
+ "encoding/xml"
+ "io"
+ "math"
+ "strconv"
+ "time"
+)
+
+const (
+ xmlHEADER string = `` + "\n"
+ xmlDOCTYPE = `` + "\n"
+ xmlArrayTag = "array"
+ xmlDataTag = "data"
+ xmlDateTag = "date"
+ xmlDictTag = "dict"
+ xmlFalseTag = "false"
+ xmlIntegerTag = "integer"
+ xmlKeyTag = "key"
+ xmlPlistTag = "plist"
+ xmlRealTag = "real"
+ xmlStringTag = "string"
+ xmlTrueTag = "true"
+)
+
+func formatXMLFloat(f float64) string {
+ switch {
+ case math.IsInf(f, 1):
+ return "inf"
+ case math.IsInf(f, -1):
+ return "-inf"
+ case math.IsNaN(f):
+ return "nan"
+ }
+ return strconv.FormatFloat(f, 'g', -1, 64)
+}
+
+type xmlPlistGenerator struct {
+ *bufio.Writer
+
+ indent string
+ depth int
+ putNewline bool
+}
+
+func (p *xmlPlistGenerator) generateDocument(root cfValue) {
+ p.WriteString(xmlHEADER)
+ p.WriteString(xmlDOCTYPE)
+
+ p.openTag(`plist version="1.0"`)
+ p.writePlistValue(root)
+ p.closeTag(xmlPlistTag)
+ p.Flush()
+}
+
+func (p *xmlPlistGenerator) openTag(n string) {
+ p.writeIndent(1)
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteByte('>')
+}
+
+func (p *xmlPlistGenerator) closeTag(n string) {
+ p.writeIndent(-1)
+ p.WriteString("")
+ p.WriteString(n)
+ p.WriteByte('>')
+}
+
+func (p *xmlPlistGenerator) element(n string, v string) {
+ p.writeIndent(0)
+ if len(v) == 0 {
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteString("/>")
+ } else {
+ p.WriteByte('<')
+ p.WriteString(n)
+ p.WriteByte('>')
+
+ err := xml.EscapeText(p.Writer, []byte(v))
+ if err != nil {
+ panic(err)
+ }
+
+ p.WriteString("")
+ p.WriteString(n)
+ p.WriteByte('>')
+ }
+}
+
+func (p *xmlPlistGenerator) writeDictionary(dict *cfDictionary) {
+ dict.sort()
+ p.openTag(xmlDictTag)
+ for i, k := range dict.keys {
+ p.element(xmlKeyTag, k)
+ p.writePlistValue(dict.values[i])
+ }
+ p.closeTag(xmlDictTag)
+}
+
+func (p *xmlPlistGenerator) writeArray(a *cfArray) {
+ p.openTag(xmlArrayTag)
+ for _, v := range a.values {
+ p.writePlistValue(v)
+ }
+ p.closeTag(xmlArrayTag)
+}
+
+func (p *xmlPlistGenerator) writePlistValue(pval cfValue) {
+ if pval == nil {
+ return
+ }
+
+ switch pval := pval.(type) {
+ case cfString:
+ p.element(xmlStringTag, string(pval))
+ case *cfNumber:
+ if pval.signed {
+ p.element(xmlIntegerTag, strconv.FormatInt(int64(pval.value), 10))
+ } else {
+ p.element(xmlIntegerTag, strconv.FormatUint(pval.value, 10))
+ }
+ case *cfReal:
+ p.element(xmlRealTag, formatXMLFloat(pval.value))
+ case cfBoolean:
+ if bool(pval) {
+ p.element(xmlTrueTag, "")
+ } else {
+ p.element(xmlFalseTag, "")
+ }
+ case cfData:
+ p.element(xmlDataTag, base64.StdEncoding.EncodeToString([]byte(pval)))
+ case cfDate:
+ p.element(xmlDateTag, time.Time(pval).In(time.UTC).Format(time.RFC3339))
+ case *cfDictionary:
+ p.writeDictionary(pval)
+ case *cfArray:
+ p.writeArray(pval)
+ case cfUID:
+ p.writePlistValue(pval.toDict())
+ }
+}
+
+func (p *xmlPlistGenerator) writeIndent(delta int) {
+ if len(p.indent) == 0 {
+ return
+ }
+
+ if delta < 0 {
+ p.depth--
+ }
+
+ if p.putNewline {
+ // from encoding/xml/marshal.go; it seems to be intended
+ // to suppress the first newline.
+ p.WriteByte('\n')
+ } else {
+ p.putNewline = true
+ }
+ for i := 0; i < p.depth; i++ {
+ p.WriteString(p.indent)
+ }
+ if delta > 0 {
+ p.depth++
+ }
+}
+
+func (p *xmlPlistGenerator) Indent(i string) {
+ p.indent = i
+}
+
+func newXMLPlistGenerator(w io.Writer) *xmlPlistGenerator {
+ return &xmlPlistGenerator{Writer: bufio.NewWriter(w)}
+}
diff --git a/vendor/howett.net/plist/xml_parser.go b/vendor/howett.net/plist/xml_parser.go
new file mode 100644
index 00000000..7415ef3e
--- /dev/null
+++ b/vendor/howett.net/plist/xml_parser.go
@@ -0,0 +1,211 @@
+package plist
+
+import (
+ "encoding/base64"
+ "encoding/xml"
+ "errors"
+ "fmt"
+ "io"
+ "runtime"
+ "strings"
+ "time"
+)
+
+type xmlPlistParser struct {
+ reader io.Reader
+ xmlDecoder *xml.Decoder
+ whitespaceReplacer *strings.Replacer
+ ntags int
+}
+
+func (p *xmlPlistParser) parseDocument() (pval cfValue, parseError error) {
+ defer func() {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+ if _, ok := r.(invalidPlistError); ok {
+ parseError = r.(error)
+ } else {
+ // Wrap all non-invalid-plist errors.
+ parseError = plistParseError{"XML", r.(error)}
+ }
+ }
+ }()
+ for {
+ if token, err := p.xmlDecoder.Token(); err == nil {
+ if element, ok := token.(xml.StartElement); ok {
+ pval = p.parseXMLElement(element)
+ if p.ntags == 0 {
+ panic(invalidPlistError{"XML", errors.New("no elements encountered")})
+ }
+ return
+ }
+ } else {
+ // The first XML parse turned out to be invalid:
+ // we do not have an XML property list.
+ panic(invalidPlistError{"XML", err})
+ }
+ }
+}
+
+func (p *xmlPlistParser) parseXMLElement(element xml.StartElement) cfValue {
+ var charData xml.CharData
+ switch element.Name.Local {
+ case "plist":
+ p.ntags++
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "plist" {
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ return p.parseXMLElement(el)
+ }
+ }
+ return nil
+ case "string":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ return cfString(charData)
+ case "integer":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ s := string(charData)
+ if len(s) == 0 {
+ panic(errors.New("invalid empty "))
+ }
+
+ if s[0] == '-' {
+ s, base := unsignedGetBase(s[1:])
+ n := mustParseInt("-"+s, base, 64)
+ return &cfNumber{signed: true, value: uint64(n)}
+ } else {
+ s, base := unsignedGetBase(s)
+ n := mustParseUint(s, base, 64)
+ return &cfNumber{signed: false, value: n}
+ }
+ case "real":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ n := mustParseFloat(string(charData), 64)
+ return &cfReal{wide: true, value: n}
+ case "true", "false":
+ p.ntags++
+ p.xmlDecoder.Skip()
+
+ b := element.Name.Local == "true"
+ return cfBoolean(b)
+ case "date":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ t, err := time.ParseInLocation(time.RFC3339, string(charData), time.UTC)
+ if err != nil {
+ panic(err)
+ }
+
+ return cfDate(t)
+ case "data":
+ p.ntags++
+ err := p.xmlDecoder.DecodeElement(&charData, &element)
+ if err != nil {
+ panic(err)
+ }
+
+ str := p.whitespaceReplacer.Replace(string(charData))
+
+ l := base64.StdEncoding.DecodedLen(len(str))
+ bytes := make([]uint8, l)
+ l, err = base64.StdEncoding.Decode(bytes, []byte(str))
+ if err != nil {
+ panic(err)
+ }
+
+ return cfData(bytes[:l])
+ case "dict":
+ p.ntags++
+ var key *string
+ keys := make([]string, 0, 32)
+ values := make([]cfValue, 0, 32)
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "dict" {
+ if key != nil {
+ panic(errors.New("missing value in dictionary"))
+ }
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ if el.Name.Local == "key" {
+ var k string
+ p.xmlDecoder.DecodeElement(&k, &el)
+ key = &k
+ } else {
+ if key == nil {
+ panic(errors.New("missing key in dictionary"))
+ }
+ keys = append(keys, *key)
+ values = append(values, p.parseXMLElement(el))
+ key = nil
+ }
+ }
+ }
+
+ dict := &cfDictionary{keys: keys, values: values}
+ return dict.maybeUID(false)
+ case "array":
+ p.ntags++
+ values := make([]cfValue, 0, 10)
+ for {
+ token, err := p.xmlDecoder.Token()
+ if err != nil {
+ panic(err)
+ }
+
+ if el, ok := token.(xml.EndElement); ok && el.Name.Local == "array" {
+ break
+ }
+
+ if el, ok := token.(xml.StartElement); ok {
+ values = append(values, p.parseXMLElement(el))
+ }
+ }
+ return &cfArray{values}
+ }
+ err := fmt.Errorf("encountered unknown element %s", element.Name.Local)
+ if p.ntags == 0 {
+ // If out first XML tag is invalid, it might be an openstep data element, ala or <0101>
+ panic(invalidPlistError{"XML", err})
+ }
+ panic(err)
+}
+
+func newXMLPlistParser(r io.Reader) *xmlPlistParser {
+ return &xmlPlistParser{r, xml.NewDecoder(r), strings.NewReplacer("\t", "", "\n", "", " ", "", "\r", ""), 0}
+}
diff --git a/vendor/howett.net/plist/zerocopy.go b/vendor/howett.net/plist/zerocopy.go
new file mode 100644
index 00000000..999f401b
--- /dev/null
+++ b/vendor/howett.net/plist/zerocopy.go
@@ -0,0 +1,20 @@
+// +build !appengine
+
+package plist
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+func zeroCopy8BitString(buf []byte, off int, len int) string {
+ if len == 0 {
+ return ""
+ }
+
+ var s string
+ hdr := (*reflect.StringHeader)(unsafe.Pointer(&s))
+ hdr.Data = uintptr(unsafe.Pointer(&buf[off]))
+ hdr.Len = len
+ return s
+}
diff --git a/vendor/howett.net/plist/zerocopy_appengine.go b/vendor/howett.net/plist/zerocopy_appengine.go
new file mode 100644
index 00000000..dbd9a1ac
--- /dev/null
+++ b/vendor/howett.net/plist/zerocopy_appengine.go
@@ -0,0 +1,7 @@
+// +build appengine
+
+package plist
+
+func zeroCopy8BitString(buf []byte, off int, len int) string {
+ return string(buf[off : off+len])
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index f9bb6b68..c5dc4613 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,3 +1,6 @@
+# github.com/StackExchange/wmi v1.2.1
+## explicit; go 1.13
+github.com/StackExchange/wmi
# github.com/akutz/gofsutil v0.1.2
## explicit
github.com/akutz/gofsutil
@@ -19,9 +22,16 @@ github.com/davecgh/go-spew/spew
# github.com/fsnotify/fsnotify v1.6.0
## explicit; go 1.16
github.com/fsnotify/fsnotify
+# github.com/ghodss/yaml v1.0.0
+## explicit
+github.com/ghodss/yaml
# github.com/go-logr/logr v0.4.0
## explicit; go 1.14
github.com/go-logr/logr
+# github.com/go-ole/go-ole v1.2.6
+## explicit; go 1.12
+github.com/go-ole/go-ole
+github.com/go-ole/go-ole/oleutil
# github.com/go-openapi/errors v0.20.2
## explicit; go 1.14
github.com/go-openapi/errors
@@ -66,6 +76,32 @@ github.com/imdario/mergo
# github.com/inconshreveable/mousetrap v1.0.0
## explicit
github.com/inconshreveable/mousetrap
+# github.com/jaypipes/ghw v0.10.0
+## explicit; go 1.18
+github.com/jaypipes/ghw
+github.com/jaypipes/ghw/pkg/baseboard
+github.com/jaypipes/ghw/pkg/bios
+github.com/jaypipes/ghw/pkg/block
+github.com/jaypipes/ghw/pkg/chassis
+github.com/jaypipes/ghw/pkg/context
+github.com/jaypipes/ghw/pkg/cpu
+github.com/jaypipes/ghw/pkg/gpu
+github.com/jaypipes/ghw/pkg/linuxdmi
+github.com/jaypipes/ghw/pkg/linuxpath
+github.com/jaypipes/ghw/pkg/marshal
+github.com/jaypipes/ghw/pkg/memory
+github.com/jaypipes/ghw/pkg/net
+github.com/jaypipes/ghw/pkg/option
+github.com/jaypipes/ghw/pkg/pci
+github.com/jaypipes/ghw/pkg/pci/address
+github.com/jaypipes/ghw/pkg/product
+github.com/jaypipes/ghw/pkg/snapshot
+github.com/jaypipes/ghw/pkg/topology
+github.com/jaypipes/ghw/pkg/unitutil
+github.com/jaypipes/ghw/pkg/util
+# github.com/jaypipes/pcidb v1.0.0
+## explicit; go 1.17
+github.com/jaypipes/pcidb
# github.com/json-iterator/go v1.1.11
## explicit; go 1.12
github.com/json-iterator/go
@@ -75,6 +111,9 @@ github.com/kr/pretty
# github.com/kr/text v0.2.0
## explicit
github.com/kr/text
+# github.com/mitchellh/go-homedir v1.1.0
+## explicit
+github.com/mitchellh/go-homedir
# github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd
## explicit
github.com/modern-go/concurrent
@@ -125,6 +164,9 @@ github.com/onsi/gomega/types
# github.com/peterhellberg/link v1.1.0
## explicit
github.com/peterhellberg/link
+# github.com/pkg/errors v0.9.1
+## explicit
+github.com/pkg/errors
# github.com/pmezard/go-difflib v1.0.0
## explicit
github.com/pmezard/go-difflib/difflib
@@ -308,6 +350,9 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
## explicit
gopkg.in/yaml.v3
+# howett.net/plist v1.0.0
+## explicit; go 1.12
+howett.net/plist
# k8s.io/api v0.22.1
## explicit; go 1.16
k8s.io/api/admissionregistration/v1