From d156cfd5b55aebdac055743bd7c0e0a138fc012a Mon Sep 17 00:00:00 2001 From: Hamza El-Saawy Date: Wed, 8 Nov 2023 11:21:31 -0500 Subject: [PATCH] [test] Add hvsock connection tests Add tests for binding to and listening on hyper-v sockets from within a uVM (as well as a hyper-v isolated containerd). Tests verify default SDDL and wildcard bind settings, as well updating the settings for a particular service ID. In order to test HVSocket communication, an agent is needed to run from within the uVM (or container within that). To accomplish that, the ability to re-exec the (functional) testing binary is added, so that it can be shared into the uVM (or container) and then run a separate code path that is defined within the same test case that is running on the host. For example, while running the test case `TestHVSock_Container_GuestBind/default`, the functional testing binary that is being run (i.e. `functional.test.exe`) is shared within the running container and then run with the flag `-run=^TestHVSock_Container_GuestBind$/^default$`. This causes the guest to bind to the agreed-upon Service GUID, and then (after the host connects to the same Service GUID), the guest verifies the expected VM and service GUIDs, and then ensures communication is possible. Signed-off-by: Hamza El-Saawy --- internal/uvm/create.go | 8 + test/functional/hvsock_test.go | 1150 ++++++++++++++++++++++++++++++++ test/functional/main_test.go | 143 ++-- test/internal/util/reexec.go | 73 ++ test/pkg/uvm/uvm.go | 11 +- 5 files changed, 1316 insertions(+), 69 deletions(-) create mode 100644 test/functional/hvsock_test.go create mode 100644 test/internal/util/reexec.go diff --git a/internal/uvm/create.go b/internal/uvm/create.go index fa28857617..f91848ccd0 100644 --- a/internal/uvm/create.go +++ b/internal/uvm/create.go @@ -10,6 +10,7 @@ import ( "path/filepath" "runtime" + "github.com/Microsoft/go-winio/pkg/guid" "github.com/sirupsen/logrus" "go.opencensus.io/trace" "golang.org/x/sys/windows" @@ -174,6 +175,13 @@ func (uvm *UtilityVM) ID() string { return uvm.hcsSystem.ID() } +// RuntimeID returns Hyper-V VM GUID. +// +// Only valid after the utility VM has been created. +func (uvm *UtilityVM) RuntimeID() guid.GUID { + return uvm.runtimeID +} + // OS returns the operating system of the utility VM. func (uvm *UtilityVM) OS() string { return uvm.operatingSystem diff --git a/test/functional/hvsock_test.go b/test/functional/hvsock_test.go new file mode 100644 index 0000000000..ea834c973f --- /dev/null +++ b/test/functional/hvsock_test.go @@ -0,0 +1,1150 @@ +//go:build windows && functional + +package functional + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "os" + "path/filepath" + "sync" + "testing" + "time" + + "github.com/Microsoft/go-winio" + "github.com/Microsoft/go-winio/pkg/guid" + ctrdoci "github.com/containerd/containerd/oci" + "github.com/google/go-cmp/cmp" + "github.com/opencontainers/runtime-spec/specs-go" + "golang.org/x/sys/windows" + + hcsschema "github.com/Microsoft/hcsshim/internal/hcs/schema2" + "github.com/Microsoft/hcsshim/osversion" + + testcmd "github.com/Microsoft/hcsshim/test/internal/cmd" + testcontainer "github.com/Microsoft/hcsshim/test/internal/container" + "github.com/Microsoft/hcsshim/test/internal/layers" + testoci "github.com/Microsoft/hcsshim/test/internal/oci" + "github.com/Microsoft/hcsshim/test/internal/util" + "github.com/Microsoft/hcsshim/test/pkg/require" + testuvm "github.com/Microsoft/hcsshim/test/pkg/uvm" +) + +// Hyper-V socket tests based on pipe tests in golang source. Ie, they re-exec the testing binary +// from within the uVM/container to run the other portion of the tests. +// Otherwise, a dedicated binary would be needed for these tests to run. +// +// See: +// https://cs.opensource.google/go/go/+/master:src/os/pipe_test.go;l=266-273;drc=0dfb22ed70749a2cd6d95ec6eee63bb213a940d4 + +// Since these test run on Windows, the tests (which exec-ing the testing binary from inside the guest) +// only work for WCOW. +// This is fine since Linux guests can only dial out over vsock. + +const ( + // total timeout for an hvsock test. + hvsockTestTimeout = 10 * time.Second + // how long to wait when dialing over hvsock. + hvsockDialTimeout = 3 * time.Second + // how long to wait when accepting new hvsock connections. + hvsockAcceptTimeout = 3 * time.Second +) + +// +// uVM tests +// + +func TestHVSock_UVM_HostBind(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureWCOW, featureUVM, featureHVSocket) + + ctx := util.Context(context.Background(), t) + + for _, tc := range hvsockHostBindTestCases { + t.Run(tc.name, func(t *testing.T) { + msg1 := "hello from " + t.Name() + msg2 := "echo from " + t.Name() + svcGUID := getHVSockServiceGUID(t) + + // + // guest code + // + + util.RunInReExec(ctx, t, hostBindReExecFunc(svcGUID, tc.guestDialErr, msg1, msg2)) + + // + // host code + // + + opts := defaultWCOWOptions(ctx, t) + if tc.hvsockConfig != nil { + t.Logf("adding HVSocket config setting to WCOW uVM options for service %v: %#+v", svcGUID, tc.hvsockConfig) + if opts.AdditionalHyperVConfig == nil { + opts.AdditionalHyperVConfig = map[string]hcsschema.HvSocketServiceConfig{} + } + opts.AdditionalHyperVConfig[svcGUID.String()] = *tc.hvsockConfig + } + + vm := testuvm.CreateAndStart(ctx, t, opts) + + // create deadline for rest of the test (excluding the uVM creation) + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) //nolint:govet // ctx is shadowed + t.Cleanup(cancel) + + // bind to guest (or wildcard) address + id := vm.RuntimeID() + t.Logf("guest uVM runtime ID: %v", id) + if tc.useWildcard { + id = tc.wildcard + } + addr := &winio.HvsockAddr{ + VMID: id, + ServiceID: svcGUID, + } + + t.Logf("listening to hvsock address: %v", addr) + l, err := winio.ListenHvsock(addr) + if !errors.Is(err, tc.hostListenErr) { + t.Fatalf("expected listen error %v; got: %v", tc.hostListenErr, err) + } + if err != nil { + // expected an error, cant do much else + return + } + + t.Cleanup(func() { + if err := l.Close(); err != nil { + t.Errorf("could not close listener on address %v: %v", addr, err) + } + }) + + var hostConn net.Conn + acceptErrCh := goBlockT(func() (err error) { + // don't want to call t.Error here, the error could be due to the hv socket lister + // being closed after timeing out + hostConn, err = l.Accept() + if err != nil { + t.Logf("accept failed: %v", err) + } else { + t.Cleanup(func() { hostConn.Close() }) + } + return err + }) + + guestPath := filepath.Join(`C:\`, filepath.Base(os.Args[0])) + testuvm.Share(ctx, t, vm, os.Args[0], guestPath, true) + + reexecCmd := fmt.Sprintf(`%s -test.run=%s`, guestPath, util.TestNameRegex(t)) + if testing.Verbose() { + reexecCmd += " -test.v" + } + + ps := testoci.CreateWindowsSpec(ctx, t, vm.ID(), + testoci.DefaultWindowsSpecOpts(vm.ID(), + ctrdoci.WithUsername(`NT AUTHORITY\SYSTEM`), + ctrdoci.WithEnv([]string{util.ReExecEnv + "=1"}), + ctrdoci.WithProcessCommandLine(reexecCmd), + )...).Process + + cmdIO := testcmd.NewBufferedIO() + c := testcmd.Create(ctx, t, vm, ps, cmdIO) + + testcmd.Start(ctx, t, c) + t.Cleanup(func() { + testcmd.WaitExitCode(ctx, t, c, 0) + + s, _ := cmdIO.Output() + t.Logf("guest exec:\n%s", s) + }) + + select { + case <-time.After(hvsockAcceptTimeout): + if tc.guestDialErr != nil { + // expected guest to error while dialing hv socket connection + t.Logf("timed out waiting for guest to connect") + return + } + t.Fatalf("timed out waiting for hvsock connection") + case err := <-acceptErrCh: + if err != nil { + t.Fatalf("accept failed: %v", err) + } + } + + t.Logf("accepted connection: %v->%v", hostConn.LocalAddr(), hostConn.RemoteAddr()) + verifyLocalHvSockConn(ctx, t, hostConn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: vm.RuntimeID(), + }) + + got := readConn(ctx, t, hostConn) + if got != msg1 { + t.Fatalf("got %q, wanted %q", got, msg1) + } + + writeConn(ctx, t, hostConn, []byte(msg2)) + + if got2 := readConn(ctx, t, hostConn); got2 != "" { + t.Logf("read was not empty: %s", got2) + } + }) + } +} + +func TestHVSock_UVM_GuestBind(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureWCOW, featureUVM, featureHVSocket) + + ctx := util.Context(context.Background(), t) + + for _, tc := range hvsockGuestBindTestCases { + t.Run(tc.name, func(t *testing.T) { + msg1 := "hello from " + t.Name() + msg2 := "echo from " + t.Name() + svcGUID := getHVSockServiceGUID(t) + + // + // guest code + // + + util.RunInReExec(ctx, t, guestBindReExecFunc(svcGUID, false, tc.guestListenErr, tc.hostDialErr != nil, msg1, msg2)) + + // + // host code + // + + opts := defaultWCOWOptions(ctx, t) + if tc.hvsockConfig != nil { + t.Logf("adding HVSocket config setting to WCOW uVM options for service %v: %#+v", svcGUID, tc.hvsockConfig) + if opts.AdditionalHyperVConfig == nil { + opts.AdditionalHyperVConfig = map[string]hcsschema.HvSocketServiceConfig{} + } + opts.AdditionalHyperVConfig[svcGUID.String()] = *tc.hvsockConfig + } + + vm := testuvm.CreateAndStart(ctx, t, opts) + + // create deadline for rest of the test (excluding the uVM creation) + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) //nolint:govet // ctx is shadowed + t.Cleanup(cancel) + + guestPath := filepath.Join(`C:\`, filepath.Base(os.Args[0])) + testuvm.Share(ctx, t, vm, os.Args[0], guestPath, true) + + reexecCmd := fmt.Sprintf(`%s -test.run=%s`, guestPath, util.TestNameRegex(t)) + if testing.Verbose() { + reexecCmd += " -test.v" + } + + ps := testoci.CreateWindowsSpec(ctx, t, vm.ID(), + testoci.DefaultWindowsSpecOpts(vm.ID(), + ctrdoci.WithUsername(`NT AUTHORITY\SYSTEM`), + ctrdoci.WithEnv([]string{util.ReExecEnv + "=1"}), + ctrdoci.WithProcessCommandLine(reexecCmd), + )...).Process + + cmdIO := testcmd.NewBufferedIO() + c := testcmd.Create(ctx, t, vm, ps, cmdIO) + + testcmd.Start(ctx, t, c) + t.Cleanup(func() { + testcmd.WaitExitCode(ctx, t, c, 0) + + s, _ := cmdIO.Output() + t.Logf("guest exec:\n%s", s) + }) + + // bind to uVM (or wildcard) address + id := vm.RuntimeID() + t.Logf("guest uVM runtime ID: %v", id) + if tc.useWildcard { + id = tc.wildcard + } + addr := &winio.HvsockAddr{ + VMID: id, + ServiceID: svcGUID, + } + + // wait a bit just to make sure exec in guest has had long enough to start and listen on hvsock + time.Sleep(10 * time.Millisecond) + + dialCtx, dialCancel := context.WithTimeout(ctx, hvsockDialTimeout) + t.Cleanup(dialCancel) + + t.Logf("dialing guest on: %v", addr) + hostConn, err := winio.Dial(dialCtx, addr) + if !errors.Is(err, tc.hostDialErr) { + t.Fatalf("expected dial error %v; got: %v", tc.hostDialErr, err) + } + if err != nil { + // expected an error, cant do much else + t.Logf("dial failed: %v", err) + return + } + + t.Cleanup(func() { + if err := hostConn.Close(); err != nil { + t.Errorf("could not close connection on address %v: %v", hostConn.LocalAddr(), err) + } + }) + t.Logf("dialled connection: %v->%v", hostConn.LocalAddr(), hostConn.RemoteAddr()) + + verifyRemoteHvSockConn(ctx, t, hostConn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: vm.RuntimeID(), + }) + + writeConn(ctx, t, hostConn, []byte(msg1)) + + got := readConn(ctx, t, hostConn) + if got != msg2 { + t.Fatalf("got %q, wanted %q", got, msg2) + } + }) + } +} + +// +// container tests +// + +// ! NOTE: +// (v2 Xenon) containers (currently) inherit uVM HyperV socket settings +// since the HCS document they are create with has an empty `HvSock` field. +// These tests will fail if that is no longer the case. +// +// See: +// - internal\hcsoci.createWindowsContainerDocument +// - internal\hcs\schema2.Container.HvSockets + +func TestHVSock_Container_HostBind(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureWCOW, featureUVM, featureContainer, featureHVSocket) + + ctx := util.Context(context.Background(), t) + + for _, tc := range hvsockHostBindTestCases { + t.Run(tc.name, func(t *testing.T) { + msg1 := "hello from " + t.Name() + msg2 := "echo from " + t.Name() + svcGUID := getHVSockServiceGUID(t) + + // + // guest code + // + + util.RunInReExec(ctx, t, hostBindReExecFunc(svcGUID, tc.guestDialErr, msg1, msg2)) + + // + // host code + // + + opts := defaultWCOWOptions(ctx, t) + if tc.hvsockConfig != nil { + t.Logf("adding HVSocket config setting to WCOW uVM options for service %v: %#+v", svcGUID, tc.hvsockConfig) + if opts.AdditionalHyperVConfig == nil { + opts.AdditionalHyperVConfig = map[string]hcsschema.HvSocketServiceConfig{} + } + opts.AdditionalHyperVConfig[svcGUID.String()] = *tc.hvsockConfig + } + + vm := testuvm.CreateAndStart(ctx, t, opts) + + guestPath := filepath.Join(`C:\`, filepath.Base(os.Args[0])) + reexecCmd := fmt.Sprintf(`%s -test.run=%s`, guestPath, util.TestNameRegex(t)) + if testing.Verbose() { + reexecCmd += " -test.v" + } + + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + ctrdoci.WithUsername(`NT AUTHORITY\SYSTEM`), + ctrdoci.WithEnv([]string{util.ReExecEnv + "=1"}), + ctrdoci.WithProcessCommandLine(reexecCmd), + ctrdoci.WithMounts([]specs.Mount{{ + Source: os.Args[0], + Destination: guestPath, + Options: []string{"ro"}, + }}), + )...) + + ctr, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + // create deadline for rest of the test (excluding the uVM and container creation) + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) //nolint:govet // ctx is shadowed + t.Cleanup(cancel) + + // bind to guest (or wildcard) address + id := vm.RuntimeID() + t.Logf("guest uVM runtime ID: %v", id) + if tc.useWildcard { + id = tc.wildcard + } + addr := &winio.HvsockAddr{ + VMID: id, + ServiceID: svcGUID, + } + + t.Logf("listening to hvsock address: %v", addr) + l, err := winio.ListenHvsock(addr) + if !errors.Is(err, tc.hostListenErr) { + t.Fatalf("expected listen error %v; got: %v", tc.hostListenErr, err) + } + if err != nil { + // expected an error, cant do much else + return + } + + t.Cleanup(func() { + if err := l.Close(); err != nil { + t.Errorf("could not close listener on address %v: %v", addr, err) + } + }) + + var hostConn net.Conn + acceptErrCh := goBlockT(func() (err error) { + // don't want to call t.Error here, the error could be due to the hv socket lister + // being closed after timeing out + hostConn, err = l.Accept() + if err != nil { + t.Logf("accept failed: %v", err) + } else { + t.Cleanup(func() { hostConn.Close() }) + } + return err + }) + + // start the container (and its init process) + cmdIO := testcmd.NewBufferedIO() + c := testcontainer.StartWithSpec(ctx, t, ctr, spec.Process, cmdIO) + t.Cleanup(func() { + testcmd.WaitExitCode(ctx, t, c, 0) + + s, _ := cmdIO.Output() + t.Logf("guest exec:\n%s", s) + + testcontainer.Kill(ctx, t, ctr) + testcontainer.Wait(ctx, t, ctr) + }) + + select { + case <-time.After(hvsockAcceptTimeout): + if tc.guestDialErr != nil { + // expected guest to error while dialing hv socket connection + t.Logf("timed out waiting for guest to connect") + return + } + t.Fatalf("timed out waiting for hvsock connection") + case err := <-acceptErrCh: + if err != nil { + t.Fatalf("accept failed: %v", err) + } + } + + t.Logf("accepted connection: %v->%v", hostConn.LocalAddr(), hostConn.RemoteAddr()) + verifyLocalHvSockConn(ctx, t, hostConn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: vm.RuntimeID(), + }) + + got := readConn(ctx, t, hostConn) + if got != msg1 { + t.Fatalf("got %q, wanted %q", got, msg1) + } + + writeConn(ctx, t, hostConn, []byte(msg2)) + + if got2 := readConn(ctx, t, hostConn); got2 != "" { + t.Logf("read was not empty: %s", got2) + } + }) + } +} + +func TestHVSock_Container_GuestBind(t *testing.T) { + require.Build(t, osversion.RS5) + requireFeatures(t, featureWCOW, featureUVM, featureContainer, featureHVSocket) + + ctx := util.Context(context.Background(), t) + + for _, tc := range hvsockGuestBindTestCases { + t.Run(tc.name, func(t *testing.T) { + msg1 := "hello from " + t.Name() + msg2 := "echo from " + t.Name() + svcGUID := getHVSockServiceGUID(t) + + // + // guest code + // + + util.RunInReExec(ctx, t, guestBindReExecFunc(svcGUID, true, tc.guestListenErr, tc.hostDialErr != nil, msg1, msg2)) + + // + // host code + // + + opts := defaultWCOWOptions(ctx, t) + if tc.hvsockConfig != nil { + t.Logf("adding HVSocket config setting to WCOW uVM options for service %v: %#+v", svcGUID, tc.hvsockConfig) + if opts.AdditionalHyperVConfig == nil { + opts.AdditionalHyperVConfig = map[string]hcsschema.HvSocketServiceConfig{} + } + opts.AdditionalHyperVConfig[svcGUID.String()] = *tc.hvsockConfig + } + + vm := testuvm.CreateAndStart(ctx, t, opts) + + guestPath := filepath.Join(`C:\`, filepath.Base(os.Args[0])) + reexecCmd := fmt.Sprintf(`%s -test.run=%s`, guestPath, util.TestNameRegex(t)) + if testing.Verbose() { + reexecCmd += " -test.v" + } + + cID := vm.ID() + "-container" + scratch := layers.WCOWScratchDir(ctx, t, "") + spec := testoci.CreateWindowsSpec(ctx, t, cID, + testoci.DefaultWindowsSpecOpts(cID, + testoci.WithWindowsLayerFolders(append(windowsImageLayers(ctx, t), scratch)), + ctrdoci.WithUsername(`NT AUTHORITY\SYSTEM`), + ctrdoci.WithEnv([]string{util.ReExecEnv + "=1"}), + ctrdoci.WithProcessCommandLine(reexecCmd), + ctrdoci.WithMounts([]specs.Mount{{ + Source: os.Args[0], + Destination: guestPath, + Options: []string{"ro"}, + }}), + )...) + + ctr, _, cleanup := testcontainer.Create(ctx, t, vm, spec, cID, hcsOwner) + t.Cleanup(cleanup) + + // create deadline for rest of the test (excluding the uVM and container creation) + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) //nolint:govet // ctx is shadowed + t.Cleanup(cancel) + + // start the container (and its init process) + cmdIO := testcmd.NewBufferedIO() + c := testcontainer.StartWithSpec(ctx, t, ctr, spec.Process, cmdIO) + + t.Cleanup(func() { + testcmd.WaitExitCode(ctx, t, c, 0) + + s, _ := cmdIO.Output() + t.Logf("guest exec:\n%s", s) + + testcontainer.Kill(ctx, t, ctr) + testcontainer.Wait(ctx, t, ctr) + }) + + // bind to uVM (or wildcard) address + id := vm.RuntimeID() + t.Logf("guest uVM runtime ID: %v", id) + if tc.useWildcard { + id = tc.wildcard + } + addr := &winio.HvsockAddr{ + VMID: id, + ServiceID: svcGUID, + } + + // wait a bit just to make sure exec in guest has had long enough to start and listen on hvsock + time.Sleep(10 * time.Millisecond) + + dialCtx, dialCancel := context.WithTimeout(ctx, hvsockDialTimeout) + t.Cleanup(dialCancel) + + t.Logf("dialing guest on: %v", addr) + hostConn, err := winio.Dial(dialCtx, addr) + if !errors.Is(err, tc.hostDialErr) { + t.Fatalf("expected dial error %v; got: %v", tc.hostDialErr, err) + } + if err != nil { + // expected an error, cant do much else + t.Logf("dial failed: %v", err) + return + } + + t.Cleanup(func() { + if err := hostConn.Close(); err != nil { + t.Errorf("could not close connection on address %v: %v", hostConn.LocalAddr(), err) + } + }) + t.Logf("dialled connection: %v->%v", hostConn.LocalAddr(), hostConn.RemoteAddr()) + + verifyRemoteHvSockConn(ctx, t, hostConn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: vm.RuntimeID(), + }) + + writeConn(ctx, t, hostConn, []byte(msg1)) + + got := readConn(ctx, t, hostConn) + if got != msg2 { + t.Fatalf("got %q, wanted %q", got, msg2) + } + }) + } +} + +// +// test cases +// + +// Common SDDL strings. +const ( + allowElevatedSDDL = "D:P(A;;FA;;;SY)(A;;FA;;;BA)" + denyAllSDDL = "D:P(D;;FA;;;WD)" +) + +var hvsockHostBindTestCases = []struct { + name string + + useWildcard bool + wildcard guid.GUID + + hvsockConfig *hcsschema.HvSocketServiceConfig + + hostListenErr error + guestDialErr error +}{ + // + // defaults + // + + { + name: "default", + }, + { + name: "wildcard default", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + guestDialErr: windows.WSAECONNREFUSED, + }, + { + name: "wildcard children default", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + guestDialErr: windows.WSAECONNREFUSED, + }, + + // + // connection allowed + // + + { + name: "vm id allowed", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + }, + { + name: "wildcard allowed", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: true, + }, + }, + { + name: "wildcard children allowed", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: true, + }, + }, + + // + // connection denied + // + + { + name: "vm id denied", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + hostListenErr: windows.WSAEACCES, + }, + { + name: "wildcard denied", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + guestDialErr: windows.WSAECONNREFUSED, + }, + { + name: "wildcard children denied", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + guestDialErr: windows.WSAECONNREFUSED, + }, + + // + // connection disabled + // + + { + name: "vm id disabled", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + Disabled: true, + }, + + // windows.WSAEINVAL, returned from windows.Listen, implies that the socket was not bound prior. + // + // See: + // https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-listen + hostListenErr: windows.WSAEINVAL, + }, + { + name: "wildcard disabled", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: true, + Disabled: true, + }, + guestDialErr: windows.WSAECONNREFUSED, + }, + { + name: "wildcard children disabled", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: allowElevatedSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: true, + Disabled: true, + }, + guestDialErr: windows.WSAECONNREFUSED, + }, +} + +var hvsockGuestBindTestCases = []struct { + name string + + useWildcard bool + wildcard guid.GUID + + hvsockConfig *hcsschema.HvSocketServiceConfig + + hostDialErr error + guestListenErr error +}{ + // + // defaults + // + + { + name: "default", + }, + { + name: "wildcard default", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + { + name: "wildcard children default", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + + // + // connection allowed + // + + { + name: "vm id allowed", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + }, + }, + { + name: "wildcard allowed", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + { + name: "wildcard children allowed", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + + // + // connection denied + // + + { + name: "vm id denied", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + hostDialErr: windows.WSAEACCES, + }, + { + name: "wildcard denied", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + { + name: "wildcard children denied", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: denyAllSDDL, + AllowWildcardBinds: false, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + + // + // connection disabled + // + + { + name: "vm id disabled", + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + Disabled: true, + }, + + hostDialErr: windows.WSAEINVAL, + }, + { + name: "wildcard disabled", + useWildcard: true, + wildcard: winio.HvsockGUIDWildcard(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + Disabled: true, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, + { + name: "wildcard children disabled", + useWildcard: true, + wildcard: winio.HvsockGUIDChildren(), + hvsockConfig: &hcsschema.HvSocketServiceConfig{ + BindSecurityDescriptor: denyAllSDDL, + ConnectSecurityDescriptor: allowElevatedSDDL, + AllowWildcardBinds: false, + Disabled: true, + }, + hostDialErr: windows.WSAEADDRNOTAVAIL, + }, +} + +// +// functions to be run from within guest +// + +// code to run from guest when host is binding to hyper-v socket address. +func hostBindReExecFunc(svcGUID guid.GUID, dialErr error, msg1, msg2 string) func(context.Context, testing.TB) { + return func(ctx context.Context, tb testing.TB) { //nolint:thelper + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) + tb.Cleanup(cancel) + + dialCtx, dialCancel := context.WithTimeout(ctx, hvsockDialTimeout) + tb.Cleanup(dialCancel) + + addr := &winio.HvsockAddr{ + VMID: winio.HvsockGUIDParent(), + ServiceID: svcGUID, + } + + tb.Logf("dialing host on: %v", addr) + conn, err := winio.Dial(dialCtx, addr) + if !errors.Is(err, dialErr) { + tb.Fatalf("expected dial error %v; got: %v", dialErr, err) + } + if err != nil { + // expected an error, cant do much else + tb.Logf("dial failed: %v", err) + return + } + + tb.Cleanup(func() { + if err := conn.Close(); err != nil { + tb.Errorf("could not close connection on address %v: %v", conn.LocalAddr(), err) + } + }) + + tb.Logf("dialled connection: %v->%v", conn.LocalAddr(), conn.RemoteAddr()) + verifyRemoteHvSockConn(ctx, tb, conn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: winio.HvsockGUIDParent(), + }) + + writeConn(ctx, tb, conn, []byte(msg1)) + + got := readConn(ctx, tb, conn) + if got != msg2 { + tb.Fatalf("got %q, wanted %q", got, msg2) + } + } +} + +// code to run from guest when guest is binding to hyper-v socket address. +func guestBindReExecFunc( + svcGUID guid.GUID, + inContainer bool, + listenErr error, + hostDialErr bool, + msg1, msg2 string, +) func(context.Context, testing.TB) { + return func(ctx context.Context, tb testing.TB) { //nolint:thelper + ctx, cancel := context.WithTimeout(ctx, hvsockTestTimeout) + tb.Cleanup(cancel) + + // when listening from inside a container, the parent is the uVM + // so change VM ID to everyone (there is no grandparent wildcard 😢) + vmID := winio.HvsockGUIDParent() + if inContainer { + vmID = winio.HvsockGUIDWildcard() + } + addr := &winio.HvsockAddr{ + VMID: vmID, + ServiceID: svcGUID, + } + + tb.Logf("listening to hvsock address: %v", addr) + l, err := winio.ListenHvsock(addr) + if !errors.Is(err, listenErr) { + tb.Fatalf("expected listen error %v; got: %v", listenErr, err) + } + if err != nil { + // expected an error, cant do much else + return + } + tb.Cleanup(func() { + if err := l.Close(); err != nil { + tb.Errorf("could not close listener on address %v: %v", addr, err) + } + }) + + var conn net.Conn + acceptErrCh := goBlockT(func() (err error) { + // don't want to call t.Error here, the error could be due to the hv socket lister + // being closed after timeing out + conn, err = l.Accept() + if err != nil { + tb.Logf("accept failed: %v", err) + } else { + tb.Cleanup(func() { conn.Close() }) + } + return err + }) + + select { + case <-time.After(hvsockAcceptTimeout): + if hostDialErr { + // expected host to error while dialing hv socket connection + tb.Logf("timed out waiting for host to connect") + return + } + tb.Fatalf("timed out waiting for hvsock connection") + case err := <-acceptErrCh: + if err != nil { + tb.Fatalf("accept failed: %v", err) + } + } + + tb.Logf("accepted connection: %v->%v", conn.LocalAddr(), conn.RemoteAddr()) + verifyLocalHvSockConn(ctx, tb, conn, winio.HvsockAddr{ + ServiceID: svcGUID, + VMID: winio.HvsockGUIDParent(), + }) + + got := readConn(ctx, tb, conn) + if got != msg1 { + tb.Fatalf("got %q, wanted %q", got, msg1) + } + + writeConn(ctx, tb, conn, []byte(msg2)) + + if got2 := readConn(ctx, tb, conn); got2 != "" { + tb.Logf("read was not empty: %s", got2) + } + } +} + +// +// hvsock helper functions +// + +func readConn(ctx context.Context, tb testing.TB, conn net.Conn) string { + tb.Helper() + + b := make([]byte, 1024) // hopefully a KiB is enough for a full read + + var n int + var err error + waitGoContext(ctx, tb, func() { + n, err = conn.Read(b) + }) + + if err != nil && !errors.Is(err, io.EOF) { + tb.Fatalf("read on %v failed: %v", conn.LocalAddr(), err) + } + + s := string(b[:n]) + tb.Logf("read: %s", s) + return s +} + +func writeConn(ctx context.Context, tb testing.TB, conn net.Conn, b []byte) { + tb.Helper() + + n := len(b) // write shouldn't modify the len of b, but just in case ... + var nn int + var err error + waitGoContext(ctx, tb, func() { + nn, err = conn.Write(b) + }) + + if errors.Is(err, windows.WSAESHUTDOWN) { + tb.Fatalf("write on closed connection (%v)", conn.LocalAddr()) + } else if err != nil { + tb.Fatalf("write to %v failed: %v", conn.LocalAddr(), err) + } else if nn != n { + tb.Fatalf("expected to write %d byte; wrote %d", nn, n) + } + + tb.Logf("wrote: %s", string(b)) +} + +func verifyLocalHvSockConn(ctx context.Context, tb testing.TB, conn net.Conn, want winio.HvsockAddr) { + tb.Helper() + + hvConn, ok := conn.(*winio.HvsockConn) + if !ok { + tb.Fatalf("connection is not a of type (*winio.HvsockConn): %T", conn) + } + + verifyHvSockAddr(ctx, tb, hvConn.LocalAddr(), want) +} + +func verifyRemoteHvSockConn(ctx context.Context, tb testing.TB, conn net.Conn, want winio.HvsockAddr) { + tb.Helper() + + hvConn, ok := conn.(*winio.HvsockConn) + if !ok { + tb.Fatalf("connection is not a of type (*winio.HvsockConn): %T", conn) + } + verifyHvSockAddr(ctx, tb, hvConn.RemoteAddr(), want) +} + +func verifyHvSockAddr(_ context.Context, tb testing.TB, addr net.Addr, want winio.HvsockAddr) { + tb.Helper() + + got, ok := addr.(*winio.HvsockAddr) + if !ok { + tb.Fatalf("address is not a of type (*winio.HvsockAddr): %T", addr) + } + tb.Logf("address: %v", got) + if diff := cmp.Diff(*got, want); diff != "" { + tb.Fatalf("address mismatch (-want +got):\n%s", diff) + } +} + +var hvsockServiceGUID = sync.OnceValues(func() (guid.GUID, error) { + return guid.NewV5(guid.GUID{}, []byte(hcsOwner)) +}) + +func getHVSockServiceGUID(tb testing.TB) guid.GUID { + tb.Helper() + + g, err := hvsockServiceGUID() + if err != nil { + tb.Fatalf("could not create Hyper-V socket service ID: %v", err) + } + return g +} + +// +// misc helpers +// + +func waitGoContext(ctx context.Context, tb testing.TB, f func()) { + tb.Helper() + + done := make(chan struct{}) + go func() { + defer close(done) + f() + }() + + select { + case <-done: + case <-ctx.Done(): + tb.Fatalf("context cancelled: %v", ctx.Err()) + } +} + +// goBlockT launches f in a go routine and returns a channel to wait on for f's completion. +func goBlockT[T any](f func() T) <-chan T { + ch := make(chan T) + go func() { + defer close(ch) + + ch <- f() + }() + + return ch +} diff --git a/test/functional/main_test.go b/test/functional/main_test.go index 2e46a3d98f..737476f5eb 100644 --- a/test/functional/main_test.go +++ b/test/functional/main_test.go @@ -98,11 +98,12 @@ const ( // resources and misc functionality. - featureScratch = "Scratch" // validate scratch layer mounting - featurePlan9 = "Plan9" // Plan9 file shares - featureSCSI = "SCSI" // SCSI disk (virtuall and physical) mounts - featureVSMB = "vSMB" // virtual SMB file shares - featureVPMEM = "vPMEM" // virtual PMEM mounts + featureScratch = "Scratch" // validate scratch layer mounting + featurePlan9 = "Plan9" // Plan9 file shares + featureSCSI = "SCSI" // SCSI disk (virtuall and physical) mounts + featureVSMB = "vSMB" // virtual SMB file shares + featureVPMEM = "vPMEM" // virtual PMEM mounts + featureHVSocket = "HVSocket" // Hyper-V socket functionality ) var allFeatures = []string{ @@ -117,10 +118,12 @@ var allFeatures = []string{ featureSCSI, featureVSMB, featureVPMEM, + featureHVSocket, } var ( - flagLogLevel = testflag.NewLogrusLevel("log-level", logrus.WarnLevel.String(), "logrus logging `level`") + flagLogLevel = testflag.NewLogrusLevel("log-level", logrus.WarnLevel.String(), "logrus logging `level`") + flagFeatures = testflag.NewFeatureFlag(allFeatures) flagContainerdNamespace = flag.String("ctr-namespace", hcsOwner, "containerd `namespace` to use when creating OCI specs") @@ -172,85 +175,89 @@ func runTests(m *testing.M) error { logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true}) logrus.SetLevel(flagLogLevel.Level) - logrus.Debugf("using features: %s", flagFeatures.Strings()) + if !util.IsTestReExec() { + // don't bother re-setting up testing infra for a re-exec, since we really shouldn't + // be doing testing/uVM/image related things outside the main test - if flagFeatures.IsSet(featureLCOWIntegrity) { - logrus.Info("appending verity information to LCOW images") - alpineImagePaths.AppendVerity = true - } + log.G(ctx).WithField("features", flagFeatures.String()).Debug("provided features") - imgs := []*testlayers.LazyImageLayers{} - if flagFeatures.IsSet(featureLCOWIntegrity) || flagFeatures.IsSet(featureLCOW) { - imgs = append(imgs, alpineImagePaths) - } - - if flagFeatures.IsSet(featureWCOW) { - wcow, err := wcowImagePathsOnce() - if err != nil { - return err + if flagFeatures.IsSet(featureLCOWIntegrity) { + logrus.Info("appending verity information to LCOW images") + alpineImagePaths.AppendVerity = true } - logrus.WithField("image", wcow.nanoserver.Image).Info("using Nano Server image") - logrus.WithField("image", wcow.servercore.Image).Info("using Server Core image") + imgs := []*testlayers.LazyImageLayers{} - imgs = append(imgs, wcow.nanoserver, wcow.servercore) - } + if flagFeatures.IsSet(featureLCOWIntegrity) || flagFeatures.IsSet(featureLCOW) { + imgs = append(imgs, alpineImagePaths) + } - for _, l := range imgs { - l.TempPath = *flagLayerTempDir - } + if flagFeatures.IsSet(featureWCOW) { + wcow, err := wcowImagePathsOnce() + if err != nil { + return err + } - defer func(ctx context.Context) { - cleanupComputeSystems(ctx, hcsOwner) + logrus.WithField("image", wcow.nanoserver.Image).Info("using Nano Server image") + logrus.WithField("image", wcow.servercore.Image).Info("using Server Core image") + + imgs = append(imgs, wcow.nanoserver, wcow.servercore) + } for _, l := range imgs { - if l == nil { - continue - } - // just log errors: no other cleanup possible - if err := l.Close(ctx); err != nil { - log.G(ctx).WithFields(logrus.Fields{ - logrus.ErrorKey: err, - "image": l.Image, - "platform": l.Platform, - }).Warning("image cleanup failed") - } + l.TempPath = *flagLayerTempDir } - }(ctx) - // print additional configuration options when running benchmarks, so we can track performance. - // - // also, print to ETW instead of stdout to mirror actual deployments, and to prevent logs from - // interfering with benchmarking output - if util.RunningBenchmarks() { - util.PrintAdditionalBenchmarkConfig() - // also print out the features used as part of the benchmarking config - fmt.Printf("features: %s\n", flagFeatures.Strings()) - - provider, err := etw.NewProviderWithOptions("Microsoft.Virtualization.RunHCS") - if err != nil { - logrus.Error(err) - } else { - if hook, err := etwlogrus.NewHookFromProvider(provider); err == nil { - logrus.AddHook(hook) + defer func(ctx context.Context) { + cleanupComputeSystems(ctx, hcsOwner) + + for _, l := range imgs { + if l == nil { + continue + } + // just log errors: no other cleanup possible + if err := l.Close(ctx); err != nil { + log.G(ctx).WithFields(logrus.Fields{ + logrus.ErrorKey: err, + "image": l.Image, + "platform": l.Platform, + }).Warning("image cleanup failed") + } + } + }(ctx) + + // print additional configuration options when running benchmarks, so we can track performance. + // + // also, print to ETW instead of stdout to mirror actual deployments, and to prevent logs from + // interfering with benchmarking output + if util.RunningBenchmarks() { + util.PrintAdditionalBenchmarkConfig() + // also print out the features used as part of the benchmarking config + fmt.Printf("features: %s\n", flagFeatures.Strings()) + + provider, err := etw.NewProviderWithOptions("Microsoft.Virtualization.RunHCS") + if err != nil { + logrus.Error(err) } else { - logrus.WithError(err).Error("could not create ETW logrus hook") + if hook, err := etwlogrus.NewHookFromProvider(provider); err == nil { + logrus.AddHook(hook) + } else { + logrus.WithError(err).Error("could not create ETW logrus hook") + } } - } - // regardless of ETW provider status, still discard logs - logrus.SetFormatter(log.NopFormatter{}) - logrus.SetOutput(io.Discard) + // regardless of ETW provider status, still discard logs + logrus.SetFormatter(log.NopFormatter{}) + logrus.SetOutput(io.Discard) - defer func() { - // un-discard logs during cleanup - logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true}) - logrus.SetOutput(os.Stdout) - }() + defer func() { + // un-discard logs during cleanup + logrus.SetFormatter(&logrus.TextFormatter{FullTimestamp: true}) + logrus.SetOutput(os.Stdout) + }() + } } - log.G(ctx).WithField("features", flagFeatures.String()).Info("provided features") - if e := m.Run(); e != 0 { return cli.Exit("", e) } diff --git a/test/internal/util/reexec.go b/test/internal/util/reexec.go new file mode 100644 index 0000000000..55a38d6077 --- /dev/null +++ b/test/internal/util/reexec.go @@ -0,0 +1,73 @@ +package util + +import ( + "context" + "os" + "strings" + "testing" +) + +/* +Tests may need to run code from a different process or scope (e.g., from within a container or uVM). +Rather than creating a dedicated binary per testcase/situtation, allow tests to re-exec +the current testing binary and only run a dedicated code block. +This leverages builtin `go test -run ` functionality, consolidates code +so that the test and re-exec code can be updated in tandem, and removes the need to build +and manage additional utility binaries during testing. + +Inspired by pipe tests in golang source: +https://cs.opensource.google/go/go/+/master:src/os/pipe_test.go;l=266-273;drc=0dfb22ed70749a2cd6d95ec6eee63bb213a940d4 + +Tests that re-exec themselves must set the [ReExecEnv] environment variable in the new process +in order to proprely skip testing setup and allow [RunInReExec] to function properly. + +Additionally, [TestNameRegex] should passed to the testing binary's `-run` flag +in order to only run the current test case in the re-exec. +*/ + +// ReExecEnv is used to indicate that the current testing binary has been re-execed. +// +// Tests should set this environment variable before re-execing themselves. +const ReExecEnv = "HCSSHIM_TEST_RE_EXEC" + +// IsTestReExec checks if the current test execution is a re-exec of a testing binary. +// I.e., it checks if the [ReExecEnv] environment variable is set. +func IsTestReExec() bool { + if !testing.Testing() { + return false + } + + _, ok := os.LookupEnv(ReExecEnv) + return ok +} + +// RunInReExec checks if it is executing in within a re-exec (via [IsTestReExec]) +// and, if so, calls f and then [testing.TB.Skip] to skip the remainder of the test. +func RunInReExec(ctx context.Context, tb testing.TB, f func(context.Context, testing.TB)) { + tb.Helper() + + if !IsTestReExec() { + return + } + + f(ctx, tb) + tb.Skip("finished running code from re-exec") +} + +// TestNameRegex returns a regex expresion that matches the current test name exactly. +// +// `-test.run regex` matches the individual test name components be splitting on `/`. +// So `A/B` will first match test names against `A`, and then, for all matched tests, +// match sub-tests against `B`. +// Therefore, for a test named `foo/bar`, return `^foo$/^bar$`. +// +// See: `go help test`. +func TestNameRegex(tb testing.TB) string { + tb.Helper() + + ss := make([]string, 0) + for _, s := range strings.Split(tb.Name(), `/`) { + ss = append(ss, `^`+s+`$`) + } + return strings.Join(ss, `/`) +} diff --git a/test/pkg/uvm/uvm.go b/test/pkg/uvm/uvm.go index 3ae1eb61d7..84c6fbf030 100644 --- a/test/pkg/uvm/uvm.go +++ b/test/pkg/uvm/uvm.go @@ -84,6 +84,15 @@ func Kill(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM) { func Close(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM) { tb.Helper() if err := vm.CloseCtx(ctx); err != nil { - tb.Fatalf("could not close uvm %q: %s", vm.ID(), err) + tb.Fatalf("could not close uvm %q: %v", vm.ID(), err) + } +} + +func Share(ctx context.Context, tb testing.TB, vm *uvm.UtilityVM, hostPath, guestPath string, readOnly bool) { + tb.Helper() + tb.Logf("sharing %q to %q inside uvm %s", hostPath, guestPath, vm.ID()) + + if err := vm.Share(ctx, hostPath, guestPath, readOnly); err != nil { + tb.Fatalf("could not share %q into uvm %s as %q: %v", hostPath, vm.ID(), guestPath, err) } }