From 5b4841f308d8eda0205292bd31d060e623e65743 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 23 Jan 2019 13:05:59 -0800 Subject: [PATCH 01/25] llb: initial fileop implementation Signed-off-by: Tonis Tiigi --- client/llb/fileop.go | 647 ++++ client/llb/fileop_test.go | 586 +++ client/llb/state.go | 6 + solver/pb/attr.go | 2 + solver/pb/ops.pb.go | 7507 ++++++++++++++++++++++++------------- solver/pb/ops.proto | 74 +- worker/base/worker.go | 2 + 7 files changed, 6161 insertions(+), 2663 deletions(-) create mode 100644 client/llb/fileop.go create mode 100644 client/llb/fileop_test.go diff --git a/client/llb/fileop.go b/client/llb/fileop.go new file mode 100644 index 000000000000..82fc87c655da --- /dev/null +++ b/client/llb/fileop.go @@ -0,0 +1,647 @@ +package llb + +import ( + _ "crypto/sha256" + "os" + "path" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// Examples: +// local := llb.Local(...) +// llb.Image().Dir("/abc").File(Mkdir("./foo").Mkfile("/abc/foo/bar", []byte("data"))) +// llb.Image().File(Mkdir("/foo").Mkfile("/foo/bar", []byte("data"))) +// llb.Image().File(Copy(local, "/foo", "/bar")).File(Copy(local, "/foo2", "/bar2")) +// +// a := Mkdir("./foo") // *FileAction /ced/foo +// b := Mkdir("./bar") // /abc/bar +// c := b.Copy(a.WithState(llb.Scratch().Dir("/ced")), "./foo", "./baz") // /abc/baz +// llb.Image().Dir("/abc").File(c) +// +// In future this can be extended to multiple outputs with: +// a := Mkdir("./foo") +// b, id := a.GetSelector() +// c := b.Mkdir("./bar") +// filestate = state.File(c) +// filestate.GetOutput(id).Exec() + +func NewFileOp(s State, action *FileAction) *FileOp { + action = action.bind(s) + + f := &FileOp{ + action: action, + } + + f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) { + return pb.OutputIndex(0), nil + }} + + return f +} + +// CopyInput is either llb.State or *FileActionWithState +type CopyInput interface { + isFileOpCopyInput() +} + +type subAction interface { + toProtoAction(string, pb.InputIndex) pb.IsFileAction +} + +type FileAction struct { + state *State + prev *FileAction + action subAction + err error +} + +func (fa *FileAction) Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { + a := Mkdir(p, m, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Mkfile(p string, m os.FileMode, dt []byte, opt ...MkfileOption) *FileAction { + a := Mkfile(p, m, dt, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Rm(p string, opt ...RmOption) *FileAction { + a := Rm(p, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) Copy(input CopyInput, src, dest string, opt ...CopyOption) *FileAction { + a := Copy(input, src, dest, opt...) + a.prev = fa + return a +} + +func (fa *FileAction) allOutputs(m map[Output]struct{}) { + if fa == nil { + return + } + if fa.state != nil && fa.state.Output() != nil { + m[fa.state.Output()] = struct{}{} + } + + if a, ok := fa.action.(*fileActionCopy); ok { + if a.state != nil { + if out := a.state.Output(); out != nil { + m[out] = struct{}{} + } + } else if a.fas != nil { + a.fas.allOutputs(m) + } + } + fa.prev.allOutputs(m) +} + +func (fa *FileAction) bind(s State) *FileAction { + if fa == nil { + return nil + } + fa2 := *fa + fa2.prev = fa.prev.bind(s) + fa2.state = &s + return &fa2 +} + +func (fa *FileAction) WithState(s State) CopyInput { + return &fileActionWithState{FileAction: fa.bind(s)} +} + +type fileActionWithState struct { + *FileAction +} + +func (fas *fileActionWithState) isFileOpCopyInput() {} + +func Mkdir(p string, m os.FileMode, opt ...MkdirOption) *FileAction { + var mi MkdirInfo + for _, o := range opt { + o.SetMkdirOption(&mi) + } + return &FileAction{ + action: &fileActionMkdir{ + file: p, + mode: m, + info: mi, + }, + } +} + +type fileActionMkdir struct { + file string + mode os.FileMode + info MkdirInfo +} + +func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: normalizePath(parent, a.file), + Mode: int32(a.mode & 0777), + MakeParents: a.info.MakeParents, + Owner: a.info.ChownOpt.marshal(base), + }, + } +} + +type MkdirOption interface { + SetMkdirOption(*MkdirInfo) +} + +type ChownOption interface { + MkdirOption + MkfileOption + CopyOption +} + +type mkdirOptionFunc func(*MkdirInfo) + +func (fn mkdirOptionFunc) SetMkdirOption(mi *MkdirInfo) { + fn(mi) +} + +var _ MkdirOption = &MkdirInfo{} + +func WithParents(b bool) MkdirOption { + return mkdirOptionFunc(func(mi *MkdirInfo) { + mi.MakeParents = b + }) +} + +type MkdirInfo struct { + MakeParents bool + ChownOpt *ChownOpt +} + +func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) { + *mi2 = *mi +} + +func WithUser(name string) ChownOption { + return ChownOpt{ + User: &UserOpt{Name: name}, + } +} + +func WithUIDGID(uid, gid int) ChownOption { + return ChownOpt{ + User: &UserOpt{UID: uid}, + Group: &UserOpt{UID: gid}, + } +} + +type ChownOpt struct { + User *UserOpt + Group *UserOpt +} + +func (co ChownOpt) SetMkdirOption(mi *MkdirInfo) { + mi.ChownOpt = &co +} +func (co ChownOpt) SetMkfileOption(mi *MkfileInfo) { + mi.ChownOpt = &co +} +func (co ChownOpt) SetCopyOption(mi *CopyInfo) { + mi.ChownOpt = &co +} + +func (cp *ChownOpt) marshal(base pb.InputIndex) *pb.ChownOpt { + if cp == nil { + return nil + } + return &pb.ChownOpt{ + User: cp.User.marshal(base), + Group: cp.Group.marshal(base), + } +} + +type UserOpt struct { + UID int + Name string +} + +func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { + if up == nil { + return nil + } + if up.Name != "" { + return &pb.UserOpt{Name: up.Name, Input: base} + } + return &pb.UserOpt{Id: int32(up.UID), Input: -1} +} + +func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { + var mi MkfileInfo + for _, o := range opts { + o.SetMkfileOption(&mi) + } + + return &FileAction{ + action: &fileActionMkfile{ + file: p, + mode: m, + dt: dt, + info: mi, + }, + } +} + +type MkfileOption interface { + SetMkfileOption(*MkfileInfo) +} + +type MkfileInfo struct { + ChownOpt *ChownOpt +} + +func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) { + *mi2 = *mi +} + +var _ MkfileOption = &MkfileInfo{} + +type fileActionMkfile struct { + file string + mode os.FileMode + dt []byte + info MkfileInfo +} + +func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: normalizePath(parent, a.file), + Mode: int32(a.mode & 0777), + Data: a.dt, + Owner: a.info.ChownOpt.marshal(base), + }, + } +} + +func Rm(p string, opts ...RmOption) *FileAction { + var mi RmInfo + for _, o := range opts { + o.SetRmOption(&mi) + } + + return &FileAction{ + action: &fileActionRm{ + file: p, + info: mi, + }, + } +} + +type RmOption interface { + SetRmOption(*RmInfo) +} + +type rmOptionFunc func(*RmInfo) + +func (fn rmOptionFunc) SetRmOption(mi *RmInfo) { + fn(mi) +} + +type RmInfo struct { + AllowNotFound bool + AllowWildcard bool +} + +func (mi *RmInfo) SetRmOption(mi2 *RmInfo) { + *mi2 = *mi +} + +var _ RmOption = &RmInfo{} + +func WithAllowNotFound(b bool) RmOption { + return rmOptionFunc(func(mi *RmInfo) { + mi.AllowNotFound = b + }) +} + +func WithAllowWildcard(b bool) RmOption { + return rmOptionFunc(func(mi *RmInfo) { + mi.AllowWildcard = b + }) +} + +type fileActionRm struct { + file string + info RmInfo +} + +func (a *fileActionRm) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + return &pb.FileAction_Rm{ + Rm: &pb.FileActionRm{ + Path: normalizePath(parent, a.file), + AllowNotFound: a.info.AllowNotFound, + AllowWildcard: a.info.AllowWildcard, + }, + } +} + +func Copy(input CopyInput, src, dest string, opts ...CopyOption) *FileAction { + var state *State + var fas *fileActionWithState + var err error + if st, ok := input.(State); ok { + state = &st + } else if v, ok := input.(*fileActionWithState); ok { + fas = v + } else { + err = errors.Errorf("invalid input type %T for copy", input) + } + + var mi CopyInfo + for _, o := range opts { + o.SetCopyOption(&mi) + } + + return &FileAction{ + action: &fileActionCopy{ + state: state, + fas: fas, + src: src, + dest: dest, + info: mi, + }, + err: err, + } +} + +type CopyOption interface { + SetCopyOption(*CopyInfo) +} + +type CopyInfo struct { + Mode *os.FileMode + FollowSymlinks bool + CopyDirContentsOnly bool + AttemptUnpack bool + CreateDestPath bool + AllowWildcard bool + AllowEmptyWildcard bool + ChownOpt *ChownOpt +} + +func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) { + *mi2 = *mi +} + +var _ CopyOption = &CopyInfo{} + +type fileActionCopy struct { + state *State + fas *fileActionWithState + src string + dest string + info CopyInfo +} + +func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { + c := &pb.FileActionCopy{ + Src: a.sourcePath(), + Dest: normalizePath(parent, a.dest), + Owner: a.info.ChownOpt.marshal(base), + AllowWildcard: a.info.AllowWildcard, + AllowEmptyWildcard: a.info.AllowEmptyWildcard, + FollowSymlink: a.info.FollowSymlinks, + DirCopyContents: a.info.CopyDirContentsOnly, + AttemptUnpack: a.info.AttemptUnpack, + CreateDestPath: a.info.CreateDestPath, + } + if a.info.Mode != nil { + c.Mode = int32(*a.info.Mode) + } else { + c.Mode = -1 + } + return &pb.FileAction_Copy{ + Copy: c, + } +} + +func (c *fileActionCopy) sourcePath() string { + p := path.Clean(c.src) + if !path.IsAbs(p) { + if c.state != nil { + p = path.Join("/", c.state.GetDir(), p) + } else if c.fas != nil { + p = path.Join("/", c.fas.state.GetDir(), p) + } + } + return p +} + +type FileOp struct { + MarshalCache + action *FileAction + output Output + + // constraints Constraints + isValidated bool +} + +func (f *FileOp) Validate() error { + if f.isValidated { + return nil + } + if f.action == nil { + return errors.Errorf("action is required") + } + f.isValidated = true + return nil +} + +type marshalState struct { + visited map[*FileAction]*fileActionState + inputs []*pb.Input + actions []*fileActionState +} + +func newMarshalState() *marshalState { + return &marshalState{ + visited: map[*FileAction]*fileActionState{}, + } +} + +type fileActionState struct { + base pb.InputIndex + input pb.InputIndex + inputRelative *int + input2 pb.InputIndex + input2Relative *int + target int + action subAction + fa *FileAction +} + +func (ms *marshalState) addInput(st *fileActionState, c *Constraints, o Output) (pb.InputIndex, error) { + inp, err := o.ToInput(c) + if err != nil { + return 0, err + } + for i, inp2 := range ms.inputs { + if *inp == *inp2 { + return pb.InputIndex(i), nil + } + } + i := pb.InputIndex(len(ms.inputs)) + ms.inputs = append(ms.inputs, inp) + return i, nil +} + +func (ms *marshalState) add(fa *FileAction, c *Constraints) (*fileActionState, error) { + if st, ok := ms.visited[fa]; ok { + return st, nil + } + + if fa.err != nil { + return nil, fa.err + } + + var prevState *fileActionState + if parent := fa.prev; parent != nil { + var err error + prevState, err = ms.add(parent, c) + if err != nil { + return nil, err + } + } + + st := &fileActionState{ + action: fa.action, + input: -1, + input2: -1, + base: -1, + fa: fa, + } + + if source := fa.state.Output(); source != nil { + inp, err := ms.addInput(st, c, source) + if err != nil { + return nil, err + } + st.base = inp + } + + if fa.prev == nil { + st.input = st.base + } else { + st.inputRelative = &prevState.target + } + + if a, ok := fa.action.(*fileActionCopy); ok { + if a.state != nil { + if out := a.state.Output(); out != nil { + inp, err := ms.addInput(st, c, out) + if err != nil { + return nil, err + } + st.input2 = inp + } + } else if a.fas != nil { + src, err := ms.add(a.fas.FileAction, c) + if err != nil { + return nil, err + } + st.input2Relative = &src.target + } else { + return nil, errors.Errorf("invalid empty source for copy") + } + } + + st.target = len(ms.actions) + + ms.visited[fa] = st + ms.actions = append(ms.actions, st) + + return st, nil +} + +func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, error) { + if f.Cached(c) { + return f.Load() + } + if err := f.Validate(); err != nil { + return "", nil, nil, err + } + + pfo := &pb.FileOp{} + + pop, md := MarshalConstraints(c, &Constraints{}) + pop.Op = &pb.Op_File{ + File: pfo, + } + + state := newMarshalState() + _, err := state.add(f.action, c) + if err != nil { + return "", nil, nil, err + } + pop.Inputs = state.inputs + + for i, st := range state.actions { + output := pb.OutputIndex(-1) + if i+1 == len(state.actions) { + output = 0 + } + + var parent string + if st.fa.state != nil { + parent = st.fa.state.GetDir() + } + + pfo.Actions = append(pfo.Actions, &pb.FileAction{ + Input: getIndex(st.input, len(state.actions), st.inputRelative), + SecondaryInput: getIndex(st.input2, len(state.actions), st.input2Relative), + Output: output, + Action: st.action.toProtoAction(parent, st.base), + }) + } + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, err + } + f.Store(dt, md, c) + return f.Load() +} + +func normalizePath(parent, p string) string { + p = path.Clean(p) + if !path.IsAbs(p) { + p = path.Join("/", parent, p) + } + return p +} + +func (f *FileOp) Output() Output { + return f.output +} + +func (f *FileOp) Inputs() (inputs []Output) { + mm := map[Output]struct{}{} + + f.action.allOutputs(mm) + + for o := range mm { + inputs = append(inputs, o) + } + return inputs +} + +func getIndex(input pb.InputIndex, len int, relative *int) pb.InputIndex { + if relative != nil { + return pb.InputIndex(len + *relative) + } + return input +} diff --git a/client/llb/fileop_test.go b/client/llb/fileop_test.go new file mode 100644 index 000000000000..f0a433e945d9 --- /dev/null +++ b/client/llb/fileop_test.go @@ -0,0 +1,586 @@ +package llb + +import ( + "testing" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/stretchr/testify/require" +) + +func TestFileMkdir(t *testing.T) { + t.Parallel() + + st := Image("foo").File(Mkdir("/foo", 0700)) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 1, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + + require.Equal(t, "/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) +} + +func TestFileMkdirChain(t *testing.T) { + t.Parallel() + + st := Image("foo").Dir("/etc").File(Mkdir("/foo", 0700).Mkdir("bar", 0600, WithParents(true)).Mkdir("bar/baz", 0701, WithParents(false))) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 3, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, "/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) + require.Equal(t, false, mkdir.MakeParents) + require.Nil(t, mkdir.Owner) + + action = f.Actions[1] + require.Equal(t, 3, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, "/etc/bar", mkdir.Path) + require.Equal(t, 0600, int(mkdir.Mode)) + require.Equal(t, true, mkdir.MakeParents) + require.Nil(t, mkdir.Owner) + + action = f.Actions[2] + require.Equal(t, 4, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, "/etc/bar/baz", mkdir.Path) + require.Equal(t, 0701, int(mkdir.Mode)) + require.Equal(t, false, mkdir.MakeParents) + require.Nil(t, mkdir.Owner) +} + +func TestFileMkfile(t *testing.T) { + t.Parallel() + + st := Image("foo").File(Mkfile("/foo", 0700, []byte("data"))) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 1, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkfile).Mkfile + + require.Equal(t, "/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) + require.Equal(t, "data", string(mkdir.Data)) +} + +func TestFileRm(t *testing.T) { + t.Parallel() + + st := Image("foo").File(Rm("/foo")) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 1, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + rm := action.Action.(*pb.FileAction_Rm).Rm + require.Equal(t, "/foo", rm.Path) +} + +func TestFileSimpleChains(t *testing.T) { + t.Parallel() + + st := Image("foo").Dir("/tmp"). + File( + Mkdir("foo/bar/", 0700). + Rm("abc"). + Mkfile("foo/bar/baz", 0777, []byte("d0")), + ). + Dir("sub"). + File( + Rm("foo"). + Mkfile("/abc", 0701, []byte("d1")), + ) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 4, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[2]) + + f := arr[2].Op.(*pb.Op_File).File + require.Equal(t, len(arr[2].Inputs), 1) + require.Equal(t, m[arr[2].Inputs[0].Digest], arr[1]) + require.Equal(t, 0, int(arr[2].Inputs[0].Index)) + require.Equal(t, 2, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + rm := action.Action.(*pb.FileAction_Rm).Rm + require.Equal(t, "/tmp/sub/foo", rm.Path) + + action = f.Actions[1] + require.Equal(t, 2, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile + require.Equal(t, "/abc", mkfile.Path) + + f = arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + require.Equal(t, 3, len(f.Actions)) + + action = f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, "/tmp/foo/bar", mkdir.Path) + + action = f.Actions[1] + require.Equal(t, 3, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + rm = action.Action.(*pb.FileAction_Rm).Rm + require.Equal(t, "/tmp/abc", rm.Path) + + action = f.Actions[2] + require.Equal(t, 4, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + mkfile = action.Action.(*pb.FileAction_Mkfile).Mkfile + require.Equal(t, "/tmp/foo/bar/baz", mkfile.Path) +} + +func TestFileCopy(t *testing.T) { + t.Parallel() + + st := Image("foo").Dir("/tmp").File(Copy(Image("bar").Dir("/etc"), "foo", "bar")) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 4, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[2]) + + f := arr[2].Op.(*pb.Op_File).File + require.Equal(t, 2, len(arr[2].Inputs)) + require.Equal(t, "docker-image://docker.io/library/foo:latest", m[arr[2].Inputs[0].Digest].Op.(*pb.Op_Source).Source.Identifier) + require.Equal(t, 0, int(arr[2].Inputs[0].Index)) + require.Equal(t, "docker-image://docker.io/library/bar:latest", m[arr[2].Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier) + require.Equal(t, 0, int(arr[2].Inputs[1].Index)) + + require.Equal(t, 1, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, 1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + copy := action.Action.(*pb.FileAction_Copy).Copy + + require.Equal(t, "/etc/foo", copy.Src) + require.Equal(t, "/tmp/bar", copy.Dest) +} + +func TestFileCopyFromAction(t *testing.T) { + t.Parallel() + + st := Image("foo").Dir("/out").File( + Copy( + Mkdir("foo", 0700). + Mkfile("foo/bar", 0600, []byte("dt")). + WithState(Scratch().Dir("/tmp")), + "foo/bar", "baz")) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, 1, len(arr[1].Inputs)) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 3, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, -1, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + + require.Equal(t, "/tmp/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) + + action = f.Actions[1] + require.Equal(t, 3, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile + + require.Equal(t, "/tmp/foo/bar", mkfile.Path) + require.Equal(t, 0600, int(mkfile.Mode)) + require.Equal(t, "dt", string(mkfile.Data)) + + action = f.Actions[2] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, 4, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + copy := action.Action.(*pb.FileAction_Copy).Copy + + require.Equal(t, "/tmp/foo/bar", copy.Src) + require.Equal(t, "/out/baz", copy.Dest) +} + +func TestFilePipeline(t *testing.T) { + t.Parallel() + + st := Image("foo").Dir("/out"). + File( + Copy( + Mkdir("foo", 0700). + Mkfile("foo/bar", 0600, []byte("dt")). + WithState(Image("bar").Dir("/tmp")), + "foo/bar", "baz"). + Rm("foo/bax"), + ). + File( + Mkdir("/bar", 0701). + Copy(Image("foo"), "in", "out"). + Copy(Image("baz").Dir("/base"), "in2", "out2"), + ) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + + require.Equal(t, 6, len(arr)) // 3 img + 2 file + pointer + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[4]) + + f := arr[4].Op.(*pb.Op_File).File + require.Equal(t, 3, len(arr[4].Inputs)) + + require.Equal(t, "docker-image://docker.io/library/foo:latest", m[arr[4].Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier) + require.Equal(t, 0, int(arr[4].Inputs[1].Index)) + require.Equal(t, "docker-image://docker.io/library/baz:latest", m[arr[4].Inputs[2].Digest].Op.(*pb.Op_Source).Source.Identifier) + require.Equal(t, 0, int(arr[4].Inputs[2].Index)) + + require.Equal(t, 3, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + + require.Equal(t, "/bar", mkdir.Path) + require.Equal(t, 0701, int(mkdir.Mode)) + + action = f.Actions[1] + require.Equal(t, 3, int(action.Input)) + require.Equal(t, 1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + copy := action.Action.(*pb.FileAction_Copy).Copy + + require.Equal(t, "/in", copy.Src) + require.Equal(t, "/out/out", copy.Dest) + + action = f.Actions[2] + require.Equal(t, 4, int(action.Input)) + require.Equal(t, 2, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + copy = action.Action.(*pb.FileAction_Copy).Copy + + require.Equal(t, "/base/in2", copy.Src) + require.Equal(t, "/out/out2", copy.Dest) + + f = m[arr[4].Inputs[0].Digest].Op.(*pb.Op_File).File + op := m[arr[4].Inputs[0].Digest] + require.Equal(t, 2, len(op.Inputs)) + require.Equal(t, 4, len(f.Actions)) + + action = f.Actions[0] + require.Equal(t, 1, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + require.Equal(t, "docker-image://docker.io/library/bar:latest", m[op.Inputs[1].Digest].Op.(*pb.Op_Source).Source.Identifier) + mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir + + require.Equal(t, "/tmp/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) + + action = f.Actions[1] + require.Equal(t, 4, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile + + require.Equal(t, "/tmp/foo/bar", mkfile.Path) + require.Equal(t, 0600, int(mkfile.Mode)) + require.Equal(t, "dt", string(mkfile.Data)) + + action = f.Actions[2] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, 5, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + require.Equal(t, arr[4].Inputs[1].Digest, op.Inputs[0].Digest) + + copy = action.Action.(*pb.FileAction_Copy).Copy + + require.Equal(t, "/tmp/foo/bar", copy.Src) + require.Equal(t, "/out/baz", copy.Dest) + + action = f.Actions[3] + require.Equal(t, 6, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + rm := action.Action.(*pb.FileAction_Rm).Rm + require.Equal(t, "/out/foo/bax", rm.Path) +} + +func TestFileOwner(t *testing.T) { + t.Parallel() + + st := Image("foo").File(Mkdir("/foo", 0700).Mkdir("bar", 0600, WithUIDGID(123, 456)).Mkdir("bar/baz", 0701, WithUser("foouser"))) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 3, len(f.Actions)) + + action := f.Actions[0] + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Nil(t, mkdir.Owner) + + action = f.Actions[1] + mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, 123, int(mkdir.Owner.User.Id)) + require.Equal(t, "", mkdir.Owner.User.Name) + require.Equal(t, -1, int(mkdir.Owner.User.Input)) + require.Equal(t, 456, int(mkdir.Owner.Group.Id)) + require.Equal(t, "", mkdir.Owner.Group.Name) + require.Equal(t, -1, int(mkdir.Owner.Group.Input)) + + action = f.Actions[2] + mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, 0, int(mkdir.Owner.User.Id)) + require.Equal(t, "foouser", mkdir.Owner.User.Name) + require.Equal(t, 0, int(mkdir.Owner.User.Input)) + require.Nil(t, mkdir.Owner.Group) +} + +func TestFileCopyOwner(t *testing.T) { + t.Parallel() + + st := Scratch(). + File(Mkdir("/foo", 0700, WithUser("user1")). + Copy(Image("foo"), "src1", "dst", WithUser("user2")). + Copy( + Copy(Scratch(), "src0", "src2", WithUser("user3")).WithState(Image("foo")), + "src2", "dst", WithUser("user4")). + Copy(Image("foo"), "src3", "dst", WithUIDGID(1, 2)), + ) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 5, len(f.Actions)) + + action := f.Actions[0] + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, 0, int(mkdir.Owner.User.Id)) + require.Equal(t, "user1", mkdir.Owner.User.Name) + require.Equal(t, -1, int(mkdir.Owner.User.Input)) + require.Nil(t, mkdir.Owner.Group) + + action = f.Actions[1] + copy := action.Action.(*pb.FileAction_Copy).Copy + require.Equal(t, "/src1", copy.Src) + require.Equal(t, 0, int(copy.Owner.User.Id)) + require.Equal(t, "user2", copy.Owner.User.Name) + require.Equal(t, -1, int(copy.Owner.User.Input)) + require.Nil(t, copy.Owner.Group) + + action = f.Actions[2] + copy = action.Action.(*pb.FileAction_Copy).Copy + require.Equal(t, "/src0", copy.Src) + require.Equal(t, 0, int(copy.Owner.User.Id)) + require.Equal(t, "user3", copy.Owner.User.Name) + require.Equal(t, 0, int(copy.Owner.User.Input)) + require.Nil(t, copy.Owner.Group) + + action = f.Actions[3] + copy = action.Action.(*pb.FileAction_Copy).Copy + require.Equal(t, "/src2", copy.Src) + require.Equal(t, 0, int(copy.Owner.User.Id)) + require.Equal(t, "user4", copy.Owner.User.Name) + require.Equal(t, -1, int(copy.Owner.User.Input)) + require.Nil(t, copy.Owner.Group) + + action = f.Actions[4] + copy = action.Action.(*pb.FileAction_Copy).Copy + require.Equal(t, "/src3", copy.Src) + require.Equal(t, 1, int(copy.Owner.User.Id)) + require.Equal(t, "", copy.Owner.User.Name) + require.Equal(t, -1, int(copy.Owner.User.Input)) + require.Equal(t, 2, int(copy.Owner.Group.Id)) + require.Equal(t, "", copy.Owner.Group.Name) + require.Equal(t, -1, int(copy.Owner.Group.Input)) +} + +func parseDef(t *testing.T, def [][]byte) (map[digest.Digest]pb.Op, []pb.Op) { + m := map[digest.Digest]pb.Op{} + arr := make([]pb.Op, 0, len(def)) + + for _, dt := range def { + var op pb.Op + err := (&op).Unmarshal(dt) + require.NoError(t, err) + dgst := digest.FromBytes(dt) + m[dgst] = op + arr = append(arr, op) + // fmt.Printf(":: %T %+v\n", op.Op, op) + } + + return m, arr +} + +func last(t *testing.T, arr []pb.Op) (digest.Digest, int) { + require.True(t, len(arr) > 1) + + op := arr[len(arr)-1] + require.Equal(t, 1, len(op.Inputs)) + return op.Inputs[0].Digest, int(op.Inputs[0].Index) +} diff --git a/client/llb/state.go b/client/llb/state.go index a07f5171ebe1..5929e19a506e 100644 --- a/client/llb/state.go +++ b/client/llb/state.go @@ -229,6 +229,10 @@ func (s State) Run(ro ...RunOption) ExecState { } } +func (s State) File(a *FileAction) State { + return s.WithOutput(NewFileOp(s, a).Output()) +} + func (s State) AddEnv(key, value string) State { return s.AddEnvf(key, value) } @@ -295,6 +299,8 @@ func (s State) AddExtraHost(host string, ip net.IP) State { return extraHost(host, ip)(s) } +func (s State) isFileOpCopyInput() {} + type output struct { vertex Vertex getIndex func() (pb.OutputIndex, error) diff --git a/solver/pb/attr.go b/solver/pb/attr.go index f44c4b477101..97d2971cbb00 100644 --- a/solver/pb/attr.go +++ b/solver/pb/attr.go @@ -21,3 +21,5 @@ const AttrImageResolveModeDefault = "default" const AttrImageResolveModeForcePull = "pull" const AttrImageResolveModePreferLocal = "local" const AttrImageRecordType = "image.recordtype" + +type IsFileAction = isFileAction_Action diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 7ec6596b024b..8ec78919836d 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -54,7 +54,7 @@ func (x NetMode) String() string { return proto.EnumName(NetMode_name, int32(x)) } func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{0} + return fileDescriptor_ops_68b9efdb358e5df5, []int{0} } // MountType defines a type of a mount from a supported set @@ -87,7 +87,7 @@ func (x MountType) String() string { return proto.EnumName(MountType_name, int32(x)) } func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{1} + return fileDescriptor_ops_68b9efdb358e5df5, []int{1} } // CacheSharingOpt defines different sharing modes for cache mount @@ -117,7 +117,7 @@ func (x CacheSharingOpt) String() string { return proto.EnumName(CacheSharingOpt_name, int32(x)) } func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{2} + return fileDescriptor_ops_68b9efdb358e5df5, []int{2} } // Op represents a vertex of the LLB DAG. @@ -127,7 +127,7 @@ type Op struct { // Types that are valid to be assigned to Op: // *Op_Exec // *Op_Source - // *Op_Copy + // *Op_File // *Op_Build Op isOp_Op `protobuf_oneof:"op"` Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` @@ -138,7 +138,7 @@ func (m *Op) Reset() { *m = Op{} } func (m *Op) String() string { return proto.CompactTextString(m) } func (*Op) ProtoMessage() {} func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{0} + return fileDescriptor_ops_68b9efdb358e5df5, []int{0} } func (m *Op) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -175,8 +175,8 @@ type Op_Exec struct { type Op_Source struct { Source *SourceOp `protobuf:"bytes,3,opt,name=source,proto3,oneof"` } -type Op_Copy struct { - Copy *CopyOp `protobuf:"bytes,4,opt,name=copy,proto3,oneof"` +type Op_File struct { + File *FileOp `protobuf:"bytes,4,opt,name=file,proto3,oneof"` } type Op_Build struct { Build *BuildOp `protobuf:"bytes,5,opt,name=build,proto3,oneof"` @@ -184,7 +184,7 @@ type Op_Build struct { func (*Op_Exec) isOp_Op() {} func (*Op_Source) isOp_Op() {} -func (*Op_Copy) isOp_Op() {} +func (*Op_File) isOp_Op() {} func (*Op_Build) isOp_Op() {} func (m *Op) GetOp() isOp_Op { @@ -215,9 +215,9 @@ func (m *Op) GetSource() *SourceOp { return nil } -func (m *Op) GetCopy() *CopyOp { - if x, ok := m.GetOp().(*Op_Copy); ok { - return x.Copy +func (m *Op) GetFile() *FileOp { + if x, ok := m.GetOp().(*Op_File); ok { + return x.File } return nil } @@ -248,7 +248,7 @@ func (*Op) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, fun return _Op_OneofMarshaler, _Op_OneofUnmarshaler, _Op_OneofSizer, []interface{}{ (*Op_Exec)(nil), (*Op_Source)(nil), - (*Op_Copy)(nil), + (*Op_File)(nil), (*Op_Build)(nil), } } @@ -267,9 +267,9 @@ func _Op_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { if err := b.EncodeMessage(x.Source); err != nil { return err } - case *Op_Copy: + case *Op_File: _ = b.EncodeVarint(4<<3 | proto.WireBytes) - if err := b.EncodeMessage(x.Copy); err != nil { + if err := b.EncodeMessage(x.File); err != nil { return err } case *Op_Build: @@ -303,13 +303,13 @@ func _Op_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bo err := b.DecodeMessage(msg) m.Op = &Op_Source{msg} return true, err - case 4: // op.copy + case 4: // op.file if wire != proto.WireBytes { return true, proto.ErrInternalBadWireType } - msg := new(CopyOp) + msg := new(FileOp) err := b.DecodeMessage(msg) - m.Op = &Op_Copy{msg} + m.Op = &Op_File{msg} return true, err case 5: // op.build if wire != proto.WireBytes { @@ -338,8 +338,8 @@ func _Op_OneofSizer(msg proto.Message) (n int) { n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s - case *Op_Copy: - s := proto.Size(x.Copy) + case *Op_File: + s := proto.Size(x.File) n += 1 // tag and wire n += proto.SizeVarint(uint64(s)) n += s @@ -368,7 +368,7 @@ func (m *Platform) Reset() { *m = Platform{} } func (m *Platform) String() string { return proto.CompactTextString(m) } func (*Platform) ProtoMessage() {} func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{1} + return fileDescriptor_ops_68b9efdb358e5df5, []int{1} } func (m *Platform) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{2} + return fileDescriptor_ops_68b9efdb358e5df5, []int{2} } func (m *Input) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *ExecOp) Reset() { *m = ExecOp{} } func (m *ExecOp) String() string { return proto.CompactTextString(m) } func (*ExecOp) ProtoMessage() {} func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{3} + return fileDescriptor_ops_68b9efdb358e5df5, []int{3} } func (m *ExecOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -538,7 +538,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{4} + return fileDescriptor_ops_68b9efdb358e5df5, []int{4} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +622,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{5} + return fileDescriptor_ops_68b9efdb358e5df5, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{6} + return fileDescriptor_ops_68b9efdb358e5df5, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +766,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{7} + return fileDescriptor_ops_68b9efdb358e5df5, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +845,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{8} + return fileDescriptor_ops_68b9efdb358e5df5, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -905,97 +905,6 @@ func (m *SSHOpt) GetOptional() bool { return false } -// CopyOp copies files across Ops. -type CopyOp struct { - Src []*CopySource `protobuf:"bytes,1,rep,name=src,proto3" json:"src,omitempty"` - Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` -} - -func (m *CopyOp) Reset() { *m = CopyOp{} } -func (m *CopyOp) String() string { return proto.CompactTextString(m) } -func (*CopyOp) ProtoMessage() {} -func (*CopyOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{9} -} -func (m *CopyOp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CopyOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (dst *CopyOp) XXX_Merge(src proto.Message) { - xxx_messageInfo_CopyOp.Merge(dst, src) -} -func (m *CopyOp) XXX_Size() int { - return m.Size() -} -func (m *CopyOp) XXX_DiscardUnknown() { - xxx_messageInfo_CopyOp.DiscardUnknown(m) -} - -var xxx_messageInfo_CopyOp proto.InternalMessageInfo - -func (m *CopyOp) GetSrc() []*CopySource { - if m != nil { - return m.Src - } - return nil -} - -func (m *CopyOp) GetDest() string { - if m != nil { - return m.Dest - } - return "" -} - -// CopySource specifies a source for CopyOp. -type CopySource struct { - Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"` -} - -func (m *CopySource) Reset() { *m = CopySource{} } -func (m *CopySource) String() string { return proto.CompactTextString(m) } -func (*CopySource) ProtoMessage() {} -func (*CopySource) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{10} -} -func (m *CopySource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CopySource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalTo(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (dst *CopySource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CopySource.Merge(dst, src) -} -func (m *CopySource) XXX_Size() int { - return m.Size() -} -func (m *CopySource) XXX_DiscardUnknown() { - xxx_messageInfo_CopySource.DiscardUnknown(m) -} - -var xxx_messageInfo_CopySource proto.InternalMessageInfo - -func (m *CopySource) GetSelector() string { - if m != nil { - return m.Selector - } - return "" -} - // SourceOp specifies a source such as build contexts and images. type SourceOp struct { // TODO: use source type or any type instead of URL protocol. @@ -1009,7 +918,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{11} + return fileDescriptor_ops_68b9efdb358e5df5, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1061,7 +970,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{12} + return fileDescriptor_ops_68b9efdb358e5df5, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1116,7 +1025,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{13} + return fileDescriptor_ops_68b9efdb358e5df5, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1157,7 +1066,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{14} + return fileDescriptor_ops_68b9efdb358e5df5, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1218,7 +1127,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{15} + return fileDescriptor_ops_68b9efdb358e5df5, []int{13} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1261,7 +1170,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{16} + return fileDescriptor_ops_68b9efdb358e5df5, []int{14} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1323,7 +1232,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{17} + return fileDescriptor_ops_68b9efdb358e5df5, []int{15} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1368,7 +1277,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{18} + return fileDescriptor_ops_68b9efdb358e5df5, []int{16} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1416,7 +1325,7 @@ func (m *HostIP) Reset() { *m = HostIP{} } func (m *HostIP) String() string { return proto.CompactTextString(m) } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_821a7942fdf920e6, []int{19} + return fileDescriptor_ops_68b9efdb358e5df5, []int{17} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1455,598 +1364,705 @@ func (m *HostIP) GetIP() string { return "" } -func init() { - proto.RegisterType((*Op)(nil), "pb.Op") - proto.RegisterType((*Platform)(nil), "pb.Platform") - proto.RegisterType((*Input)(nil), "pb.Input") - proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") - proto.RegisterType((*Meta)(nil), "pb.Meta") - proto.RegisterType((*Mount)(nil), "pb.Mount") - proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") - proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") - proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") - proto.RegisterType((*CopyOp)(nil), "pb.CopyOp") - proto.RegisterType((*CopySource)(nil), "pb.CopySource") - proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") - proto.RegisterMapType((map[string]string)(nil), "pb.SourceOp.AttrsEntry") - proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") - proto.RegisterMapType((map[string]string)(nil), "pb.BuildOp.AttrsEntry") - proto.RegisterMapType((map[string]*BuildInput)(nil), "pb.BuildOp.InputsEntry") - proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") - proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") - proto.RegisterMapType((map[github_com_moby_buildkit_util_apicaps.CapID]bool)(nil), "pb.OpMetadata.CapsEntry") - proto.RegisterMapType((map[string]string)(nil), "pb.OpMetadata.DescriptionEntry") - proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") - proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") - proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") - proto.RegisterType((*Definition)(nil), "pb.Definition") - proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") - proto.RegisterType((*HostIP)(nil), "pb.HostIP") - proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) - proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) - proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) +type FileOp struct { + Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` } -func (m *Op) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) + +func (m *FileOp) Reset() { *m = FileOp{} } +func (m *FileOp) String() string { return proto.CompactTextString(m) } +func (*FileOp) ProtoMessage() {} +func (*FileOp) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{18} +} +func (m *FileOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (dst *FileOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileOp.Merge(dst, src) +} +func (m *FileOp) XXX_Size() int { + return m.Size() +} +func (m *FileOp) XXX_DiscardUnknown() { + xxx_messageInfo_FileOp.DiscardUnknown(m) } -func (m *Op) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Inputs) > 0 { - for _, msg := range m.Inputs { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } +var xxx_messageInfo_FileOp proto.InternalMessageInfo + +func (m *FileOp) GetActions() []*FileAction { + if m != nil { + return m.Actions } - if m.Op != nil { - nn1, err := m.Op.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += nn1 + return nil +} + +type FileAction struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` + SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"` + Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"` + // Types that are valid to be assigned to Action: + // *FileAction_Copy + // *FileAction_Mkfile + // *FileAction_Mkdir + // *FileAction_Rm + Action isFileAction_Action `protobuf_oneof:"action"` +} + +func (m *FileAction) Reset() { *m = FileAction{} } +func (m *FileAction) String() string { return proto.CompactTextString(m) } +func (*FileAction) ProtoMessage() {} +func (*FileAction) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{19} +} +func (m *FileAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err } - if m.Platform != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Platform.Size())) - n2, err := m.Platform.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 + return b[:n], nil +} +func (dst *FileAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileAction.Merge(dst, src) +} +func (m *FileAction) XXX_Size() int { + return m.Size() +} +func (m *FileAction) XXX_DiscardUnknown() { + xxx_messageInfo_FileAction.DiscardUnknown(m) +} + +var xxx_messageInfo_FileAction proto.InternalMessageInfo + +type isFileAction_Action interface { + isFileAction_Action() + MarshalTo([]byte) (int, error) + Size() int +} + +type FileAction_Copy struct { + Copy *FileActionCopy `protobuf:"bytes,4,opt,name=copy,proto3,oneof"` +} +type FileAction_Mkfile struct { + Mkfile *FileActionMkFile `protobuf:"bytes,5,opt,name=mkfile,proto3,oneof"` +} +type FileAction_Mkdir struct { + Mkdir *FileActionMkDir `protobuf:"bytes,6,opt,name=mkdir,proto3,oneof"` +} +type FileAction_Rm struct { + Rm *FileActionRm `protobuf:"bytes,7,opt,name=rm,proto3,oneof"` +} + +func (*FileAction_Copy) isFileAction_Action() {} +func (*FileAction_Mkfile) isFileAction_Action() {} +func (*FileAction_Mkdir) isFileAction_Action() {} +func (*FileAction_Rm) isFileAction_Action() {} + +func (m *FileAction) GetAction() isFileAction_Action { + if m != nil { + return m.Action } - if m.Constraints != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Constraints.Size())) - n3, err := m.Constraints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 + return nil +} + +func (m *FileAction) GetCopy() *FileActionCopy { + if x, ok := m.GetAction().(*FileAction_Copy); ok { + return x.Copy } - return i, nil + return nil } -func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Exec != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Exec.Size())) - n4, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 +func (m *FileAction) GetMkfile() *FileActionMkFile { + if x, ok := m.GetAction().(*FileAction_Mkfile); ok { + return x.Mkfile } - return i, nil + return nil } -func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Source != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 + +func (m *FileAction) GetMkdir() *FileActionMkDir { + if x, ok := m.GetAction().(*FileAction_Mkdir); ok { + return x.Mkdir } - return i, nil + return nil } -func (m *Op_Copy) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Copy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Copy.Size())) - n6, err := m.Copy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 + +func (m *FileAction) GetRm() *FileActionRm { + if x, ok := m.GetAction().(*FileAction_Rm); ok { + return x.Rm } - return i, nil + return nil } -func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { - i := 0 - if m.Build != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Build.Size())) - n7, err := m.Build.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*FileAction) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _FileAction_OneofMarshaler, _FileAction_OneofUnmarshaler, _FileAction_OneofSizer, []interface{}{ + (*FileAction_Copy)(nil), + (*FileAction_Mkfile)(nil), + (*FileAction_Mkdir)(nil), + (*FileAction_Rm)(nil), } - return i, nil } -func (m *Platform) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err + +func _FileAction_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*FileAction) + // action + switch x := m.Action.(type) { + case *FileAction_Copy: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Copy); err != nil { + return err + } + case *FileAction_Mkfile: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mkfile); err != nil { + return err + } + case *FileAction_Mkdir: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Mkdir); err != nil { + return err + } + case *FileAction_Rm: + _ = b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Rm); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("FileAction.Action has unexpected type %T", x) } - return dAtA[:n], nil + return nil } -func (m *Platform) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Architecture) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - } - if len(m.OS) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.OS))) - i += copy(dAtA[i:], m.OS) - } - if len(m.Variant) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Variant))) - i += copy(dAtA[i:], m.Variant) - } - if len(m.OSVersion) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.OSVersion))) - i += copy(dAtA[i:], m.OSVersion) - } - if len(m.OSFeatures) > 0 { - for _, s := range m.OSFeatures { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) +func _FileAction_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*FileAction) + switch tag { + case 4: // action.copy + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileActionCopy) + err := b.DecodeMessage(msg) + m.Action = &FileAction_Copy{msg} + return true, err + case 5: // action.mkfile + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileActionMkFile) + err := b.DecodeMessage(msg) + m.Action = &FileAction_Mkfile{msg} + return true, err + case 6: // action.mkdir + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileActionMkDir) + err := b.DecodeMessage(msg) + m.Action = &FileAction_Mkdir{msg} + return true, err + case 7: // action.rm + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType } + msg := new(FileActionRm) + err := b.DecodeMessage(msg) + m.Action = &FileAction_Rm{msg} + return true, err + default: + return false, nil } - return i, nil } -func (m *Input) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func _FileAction_OneofSizer(msg proto.Message) (n int) { + m := msg.(*FileAction) + // action + switch x := m.Action.(type) { + case *FileAction_Copy: + s := proto.Size(x.Copy) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *FileAction_Mkfile: + s := proto.Size(x.Mkfile) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *FileAction_Mkdir: + s := proto.Size(x.Mkdir) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *FileAction_Rm: + s := proto.Size(x.Rm) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) } - return dAtA[:n], nil + return n } -func (m *Input) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Digest) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) - i += copy(dAtA[i:], m.Digest) - } - if m.Index != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Index)) - } - return i, nil +type FileActionCopy struct { + Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` + Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` + DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` + AttemptUnpack bool `protobuf:"varint,8,opt,name=attemptUnpack,proto3" json:"attemptUnpack,omitempty"` + CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` + AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` } -func (m *ExecOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } +func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } +func (*FileActionCopy) ProtoMessage() {} +func (*FileActionCopy) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{20} +} +func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionCopy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (dst *FileActionCopy) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionCopy.Merge(dst, src) +} +func (m *FileActionCopy) XXX_Size() int { + return m.Size() +} +func (m *FileActionCopy) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionCopy.DiscardUnknown(m) } -func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Meta != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Meta.Size())) - n8, err := m.Meta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if len(m.Mounts) > 0 { - for _, msg := range m.Mounts { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Network != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Network)) +var xxx_messageInfo_FileActionCopy proto.InternalMessageInfo + +func (m *FileActionCopy) GetSrc() string { + if m != nil { + return m.Src } - return i, nil + return "" } -func (m *Meta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *FileActionCopy) GetDest() string { + if m != nil { + return m.Dest } - return dAtA[:n], nil + return "" } -func (m *Meta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Cwd) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) - i += copy(dAtA[i:], m.Cwd) +func (m *FileActionCopy) GetOwner() *ChownOpt { + if m != nil { + return m.Owner } - if len(m.User) > 0 { - dAtA[i] = 0x22 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) + return nil +} + +func (m *FileActionCopy) GetMode() int32 { + if m != nil { + return m.Mode } - if m.ProxyEnv != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintOps(dAtA, i, uint64(m.ProxyEnv.Size())) - n9, err := m.ProxyEnv.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 + return 0 +} + +func (m *FileActionCopy) GetFollowSymlink() bool { + if m != nil { + return m.FollowSymlink } - if len(m.ExtraHosts) > 0 { - for _, msg := range m.ExtraHosts { - dAtA[i] = 0x32 - i++ - i = encodeVarintOps(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } + return false +} + +func (m *FileActionCopy) GetDirCopyContents() bool { + if m != nil { + return m.DirCopyContents } - return i, nil + return false } -func (m *Mount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *FileActionCopy) GetAttemptUnpack() bool { + if m != nil { + return m.AttemptUnpack } - return dAtA[:n], nil + return false } -func (m *Mount) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Input != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) - } - if len(m.Selector) > 0 { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - } - if len(m.Dest) > 0 { - dAtA[i] = 0x1a - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i += copy(dAtA[i:], m.Dest) - } - if m.Output != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Output)) - } - if m.Readonly { - dAtA[i] = 0x28 - i++ - if m.Readonly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.MountType != 0 { - dAtA[i] = 0x30 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.MountType)) - } - if m.CacheOpt != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.CacheOpt.Size())) - n10, err := m.CacheOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.SecretOpt != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.SecretOpt.Size())) - n11, err := m.SecretOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.SSHOpt != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.SSHOpt.Size())) - n12, err := m.SSHOpt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 +func (m *FileActionCopy) GetCreateDestPath() bool { + if m != nil { + return m.CreateDestPath } - return i, nil + return false } -func (m *CacheOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err +func (m *FileActionCopy) GetAllowWildcard() bool { + if m != nil { + return m.AllowWildcard } - return dAtA[:n], nil + return false } -func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Sharing != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Sharing)) +func (m *FileActionCopy) GetAllowEmptyWildcard() bool { + if m != nil { + return m.AllowEmptyWildcard } - return i, nil + return false } -func (m *SecretOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +type FileActionMkFile struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } +func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } +func (*FileActionMkFile) ProtoMessage() {} +func (*FileActionMkFile) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{21} +} +func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionMkFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (dst *FileActionMkFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionMkFile.Merge(dst, src) +} +func (m *FileActionMkFile) XXX_Size() int { + return m.Size() +} +func (m *FileActionMkFile) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionMkFile.DiscardUnknown(m) } -func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Uid != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) +var xxx_messageInfo_FileActionMkFile proto.InternalMessageInfo + +func (m *FileActionMkFile) GetPath() string { + if m != nil { + return m.Path } - if m.Gid != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + return "" +} + +func (m *FileActionMkFile) GetMode() int32 { + if m != nil { + return m.Mode } - if m.Mode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + return 0 +} + +func (m *FileActionMkFile) GetData() []byte { + if m != nil { + return m.Data } - if m.Optional { - dAtA[i] = 0x28 - i++ - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return nil +} + +func (m *FileActionMkFile) GetOwner() *ChownOpt { + if m != nil { + return m.Owner } - return i, nil + return nil } -func (m *SSHOpt) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +type FileActionMkDir struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } +func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } +func (*FileActionMkDir) ProtoMessage() {} +func (*FileActionMkDir) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{22} +} +func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionMkDir) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (dst *FileActionMkDir) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionMkDir.Merge(dst, src) +} +func (m *FileActionMkDir) XXX_Size() int { + return m.Size() +} +func (m *FileActionMkDir) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionMkDir.DiscardUnknown(m) } -func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ID) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) - i += copy(dAtA[i:], m.ID) - } - if m.Uid != 0 { - dAtA[i] = 0x10 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Uid)) +var xxx_messageInfo_FileActionMkDir proto.InternalMessageInfo + +func (m *FileActionMkDir) GetPath() string { + if m != nil { + return m.Path } - if m.Gid != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + return "" +} + +func (m *FileActionMkDir) GetMode() int32 { + if m != nil { + return m.Mode } - if m.Mode != 0 { - dAtA[i] = 0x20 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + return 0 +} + +func (m *FileActionMkDir) GetMakeParents() bool { + if m != nil { + return m.MakeParents } - if m.Optional { - dAtA[i] = 0x28 - i++ - if m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ + return false +} + +func (m *FileActionMkDir) GetOwner() *ChownOpt { + if m != nil { + return m.Owner } - return i, nil + return nil } -func (m *CopyOp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) +type FileActionRm struct { + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` + AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` +} + +func (m *FileActionRm) Reset() { *m = FileActionRm{} } +func (m *FileActionRm) String() string { return proto.CompactTextString(m) } +func (*FileActionRm) ProtoMessage() {} +func (*FileActionRm) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{23} +} +func (m *FileActionRm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FileActionRm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) if err != nil { return nil, err } - return dAtA[:n], nil + return b[:n], nil +} +func (dst *FileActionRm) XXX_Merge(src proto.Message) { + xxx_messageInfo_FileActionRm.Merge(dst, src) +} +func (m *FileActionRm) XXX_Size() int { + return m.Size() +} +func (m *FileActionRm) XXX_DiscardUnknown() { + xxx_messageInfo_FileActionRm.DiscardUnknown(m) } -func (m *CopyOp) MarshalTo(dAtA []byte) (int, error) { - var i int +var xxx_messageInfo_FileActionRm proto.InternalMessageInfo + +func (m *FileActionRm) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *FileActionRm) GetAllowNotFound() bool { + if m != nil { + return m.AllowNotFound + } + return false +} + +func (m *FileActionRm) GetAllowWildcard() bool { + if m != nil { + return m.AllowWildcard + } + return false +} + +type ChownOpt struct { + User *UserOpt `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"` + Group *UserOpt `protobuf:"bytes,2,opt,name=group,proto3" json:"group,omitempty"` +} + +func (m *ChownOpt) Reset() { *m = ChownOpt{} } +func (m *ChownOpt) String() string { return proto.CompactTextString(m) } +func (*ChownOpt) ProtoMessage() {} +func (*ChownOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{24} +} +func (m *ChownOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ChownOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *ChownOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_ChownOpt.Merge(dst, src) +} +func (m *ChownOpt) XXX_Size() int { + return m.Size() +} +func (m *ChownOpt) XXX_DiscardUnknown() { + xxx_messageInfo_ChownOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_ChownOpt proto.InternalMessageInfo + +func (m *ChownOpt) GetUser() *UserOpt { + if m != nil { + return m.User + } + return nil +} + +func (m *ChownOpt) GetGroup() *UserOpt { + if m != nil { + return m.Group + } + return nil +} + +type UserOpt struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` + Id int32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` +} + +func (m *UserOpt) Reset() { *m = UserOpt{} } +func (m *UserOpt) String() string { return proto.CompactTextString(m) } +func (*UserOpt) ProtoMessage() {} +func (*UserOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_68b9efdb358e5df5, []int{25} +} +func (m *UserOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *UserOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserOpt.Merge(dst, src) +} +func (m *UserOpt) XXX_Size() int { + return m.Size() +} +func (m *UserOpt) XXX_DiscardUnknown() { + xxx_messageInfo_UserOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_UserOpt proto.InternalMessageInfo + +func (m *UserOpt) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UserOpt) GetId() int32 { + if m != nil { + return m.Id + } + return 0 +} + +func init() { + proto.RegisterType((*Op)(nil), "pb.Op") + proto.RegisterType((*Platform)(nil), "pb.Platform") + proto.RegisterType((*Input)(nil), "pb.Input") + proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") + proto.RegisterType((*Meta)(nil), "pb.Meta") + proto.RegisterType((*Mount)(nil), "pb.Mount") + proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") + proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") + proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") + proto.RegisterType((*SourceOp)(nil), "pb.SourceOp") + proto.RegisterMapType((map[string]string)(nil), "pb.SourceOp.AttrsEntry") + proto.RegisterType((*BuildOp)(nil), "pb.BuildOp") + proto.RegisterMapType((map[string]string)(nil), "pb.BuildOp.AttrsEntry") + proto.RegisterMapType((map[string]*BuildInput)(nil), "pb.BuildOp.InputsEntry") + proto.RegisterType((*BuildInput)(nil), "pb.BuildInput") + proto.RegisterType((*OpMetadata)(nil), "pb.OpMetadata") + proto.RegisterMapType((map[github_com_moby_buildkit_util_apicaps.CapID]bool)(nil), "pb.OpMetadata.CapsEntry") + proto.RegisterMapType((map[string]string)(nil), "pb.OpMetadata.DescriptionEntry") + proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") + proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") + proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") + proto.RegisterType((*Definition)(nil), "pb.Definition") + proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") + proto.RegisterType((*HostIP)(nil), "pb.HostIP") + proto.RegisterType((*FileOp)(nil), "pb.FileOp") + proto.RegisterType((*FileAction)(nil), "pb.FileAction") + proto.RegisterType((*FileActionCopy)(nil), "pb.FileActionCopy") + proto.RegisterType((*FileActionMkFile)(nil), "pb.FileActionMkFile") + proto.RegisterType((*FileActionMkDir)(nil), "pb.FileActionMkDir") + proto.RegisterType((*FileActionRm)(nil), "pb.FileActionRm") + proto.RegisterType((*ChownOpt)(nil), "pb.ChownOpt") + proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") + proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) + proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) + proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) +} +func (m *Op) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Op) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Src) > 0 { - for _, msg := range m.Src { + if len(m.Inputs) > 0 { + for _, msg := range m.Inputs { dAtA[i] = 0xa i++ i = encodeVarintOps(dAtA, i, uint64(msg.Size())) @@ -2057,16 +2073,93 @@ func (m *CopyOp) MarshalTo(dAtA []byte) (int, error) { i += n } } - if len(m.Dest) > 0 { - dAtA[i] = 0x12 + if m.Op != nil { + nn1, err := m.Op.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + if m.Platform != nil { + dAtA[i] = 0x52 i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) - i += copy(dAtA[i:], m.Dest) + i = encodeVarintOps(dAtA, i, uint64(m.Platform.Size())) + n2, err := m.Platform.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Constraints != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Constraints.Size())) + n3, err := m.Constraints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } return i, nil } -func (m *CopySource) Marshal() (dAtA []byte, err error) { +func (m *Op_Exec) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Exec != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Exec.Size())) + n4, err := m.Exec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *Op_Source) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Source != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *Op_File) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.File != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.File.Size())) + n6, err := m.File.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *Op_Build) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Build != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Build.Size())) + n7, err := m.Build.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *Platform) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2076,26 +2169,54 @@ func (m *CopySource) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *CopySource) MarshalTo(dAtA []byte) (int, error) { +func (m *Platform) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Input != 0 { - dAtA[i] = 0x8 + if len(m.Architecture) > 0 { + dAtA[i] = 0xa i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i = encodeVarintOps(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) } - if len(m.Selector) > 0 { + if len(m.OS) > 0 { dAtA[i] = 0x12 i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) + i = encodeVarintOps(dAtA, i, uint64(len(m.OS))) + i += copy(dAtA[i:], m.OS) + } + if len(m.Variant) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Variant))) + i += copy(dAtA[i:], m.Variant) + } + if len(m.OSVersion) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.OSVersion))) + i += copy(dAtA[i:], m.OSVersion) + } + if len(m.OSFeatures) > 0 { + for _, s := range m.OSFeatures { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } return i, nil } -func (m *SourceOp) Marshal() (dAtA []byte, err error) { +func (m *Input) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2105,43 +2226,26 @@ func (m *SourceOp) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { +func (m *Input) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Identifier) > 0 { + if len(m.Digest) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) - i += copy(dAtA[i:], m.Identifier) + i = encodeVarintOps(dAtA, i, uint64(len(m.Digest))) + i += copy(dAtA[i:], m.Digest) } - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k, _ := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) - for _, k := range keysForAttrs { - dAtA[i] = 0x12 - i++ - v := m.Attrs[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } + if m.Index != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Index)) } return i, nil } -func (m *BuildOp) Marshal() (dAtA []byte, err error) { +func (m *ExecOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2151,108 +2255,42 @@ func (m *BuildOp) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { +func (m *ExecOp) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Builder != 0 { - dAtA[i] = 0x8 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Builder)) - } - if len(m.Inputs) > 0 { - keysForInputs := make([]string, 0, len(m.Inputs)) - for k, _ := range m.Inputs { - keysForInputs = append(keysForInputs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForInputs) - for _, k := range keysForInputs { - dAtA[i] = 0x12 - i++ - v := m.Inputs[string(k)] - msgSize := 0 - if v != nil { - msgSize = v.Size() - msgSize += 1 + sovOps(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintOps(dAtA, i, uint64(v.Size())) - n13, err := v.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - } - } - if m.Def != nil { - dAtA[i] = 0x1a + if m.Meta != nil { + dAtA[i] = 0xa i++ - i = encodeVarintOps(dAtA, i, uint64(m.Def.Size())) - n14, err := m.Def.MarshalTo(dAtA[i:]) + i = encodeVarintOps(dAtA, i, uint64(m.Meta.Size())) + n8, err := m.Meta.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n14 + i += n8 } - if len(m.Attrs) > 0 { - keysForAttrs := make([]string, 0, len(m.Attrs)) - for k, _ := range m.Attrs { - keysForAttrs = append(keysForAttrs, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) - for _, k := range keysForAttrs { - dAtA[i] = 0x22 - i++ - v := m.Attrs[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) + if len(m.Mounts) > 0 { + for _, msg := range m.Mounts { dAtA[i] = 0x12 i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return i, nil -} - -func (m *BuildInput) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Input != 0 { - dAtA[i] = 0x8 + if m.Network != 0 { + dAtA[i] = 0x18 i++ - i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i = encodeVarintOps(dAtA, i, uint64(m.Network)) } return i, nil } -func (m *OpMetadata) Marshal() (dAtA []byte, err error) { +func (m *Meta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2262,83 +2300,79 @@ func (m *OpMetadata) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { +func (m *Meta) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.IgnoreCache { - dAtA[i] = 0x8 - i++ - if m.IgnoreCache { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i++ } - if len(m.Description) > 0 { - keysForDescription := make([]string, 0, len(m.Description)) - for k, _ := range m.Description { - keysForDescription = append(keysForDescription, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) - for _, k := range keysForDescription { + if len(m.Env) > 0 { + for _, s := range m.Env { dAtA[i] = 0x12 i++ - v := m.Description[string(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) i++ - i = encodeVarintOps(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) + i += copy(dAtA[i:], s) } } - if m.ExportCache != nil { + if len(m.Cwd) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Cwd))) + i += copy(dAtA[i:], m.Cwd) + } + if len(m.User) > 0 { dAtA[i] = 0x22 i++ - i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size())) - n15, err := m.ExportCache.MarshalTo(dAtA[i:]) + i = encodeVarintOps(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + } + if m.ProxyEnv != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ProxyEnv.Size())) + n9, err := m.ProxyEnv.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n15 + i += n9 } - if len(m.Caps) > 0 { - keysForCaps := make([]string, 0, len(m.Caps)) - for k, _ := range m.Caps { - keysForCaps = append(keysForCaps, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCaps) - for _, k := range keysForCaps { - dAtA[i] = 0x2a - i++ - v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(k)] - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 - i = encodeVarintOps(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x10 + if len(m.ExtraHosts) > 0 { + for _, msg := range m.ExtraHosts { + dAtA[i] = 0x32 i++ - if v { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i++ + i += n } } return i, nil } -func (m *ExportCache) Marshal() (dAtA []byte, err error) { +func (m *Mount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2348,25 +2382,88 @@ func (m *ExportCache) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { +func (m *Mount) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if m.Value { + if m.Input != 0 { dAtA[i] = 0x8 i++ - if m.Value { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if len(m.Selector) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + } + if len(m.Dest) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + if m.Output != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Output)) + } + if m.Readonly { + dAtA[i] = 0x28 + i++ + if m.Readonly { dAtA[i] = 1 } else { dAtA[i] = 0 } i++ } - return i, nil -} - -func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { + if m.MountType != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.MountType)) + } + if m.CacheOpt != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.CacheOpt.Size())) + n10, err := m.CacheOpt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + if m.SecretOpt != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.SecretOpt.Size())) + n11, err := m.SecretOpt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + if m.SSHOpt != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.SSHOpt.Size())) + n12, err := m.SSHOpt.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil +} + +func (m *CacheOpt) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2376,39 +2473,75 @@ func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { +func (m *CacheOpt) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.HttpProxy) > 0 { + if len(m.ID) > 0 { dAtA[i] = 0xa i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) - i += copy(dAtA[i:], m.HttpProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - if len(m.HttpsProxy) > 0 { - dAtA[i] = 0x12 + if m.Sharing != 0 { + dAtA[i] = 0x10 i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) - i += copy(dAtA[i:], m.HttpsProxy) + i = encodeVarintOps(dAtA, i, uint64(m.Sharing)) } - if len(m.FtpProxy) > 0 { - dAtA[i] = 0x1a + return i, nil +} + +func (m *SecretOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.ID) > 0 { + dAtA[i] = 0xa i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) - i += copy(dAtA[i:], m.FtpProxy) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) } - if len(m.NoProxy) > 0 { - dAtA[i] = 0x22 + if m.Uid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Uid)) + } + if m.Gid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + } + if m.Optional { + dAtA[i] = 0x28 + i++ + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) - i += copy(dAtA[i:], m.NoProxy) } return i, nil } -func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { +func (m *SSHOpt) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2418,30 +2551,46 @@ func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *WorkerConstraints) MarshalTo(dAtA []byte) (int, error) { +func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) + if len(m.ID) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i += copy(dAtA[i:], m.ID) + } + if m.Uid != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Uid)) + } + if m.Gid != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Gid)) + } + if m.Mode != 0 { + dAtA[i] = 0x20 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + } + if m.Optional { + dAtA[i] = 0x28 + i++ + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } return i, nil } -func (m *Definition) Marshal() (dAtA []byte, err error) { +func (m *SourceOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2451,35 +2600,28 @@ func (m *Definition) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *Definition) MarshalTo(dAtA []byte) (int, error) { +func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Def) > 0 { - for _, b := range m.Def { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(b))) - i += copy(dAtA[i:], b) - } + if len(m.Identifier) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Identifier))) + i += copy(dAtA[i:], m.Identifier) } - if len(m.Metadata) > 0 { - keysForMetadata := make([]string, 0, len(m.Metadata)) - for k, _ := range m.Metadata { - keysForMetadata = append(keysForMetadata, string(k)) + if len(m.Attrs) > 0 { + keysForAttrs := make([]string, 0, len(m.Attrs)) + for k, _ := range m.Attrs { + keysForAttrs = append(keysForAttrs, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) - for _, k := range keysForMetadata { + github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) + for _, k := range keysForAttrs { dAtA[i] = 0x12 i++ - v := m.Metadata[github_com_opencontainers_go_digest.Digest(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovOps(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + v := m.Attrs[string(k)] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) i = encodeVarintOps(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa i++ @@ -2487,18 +2629,14 @@ func (m *Definition) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], k) dAtA[i] = 0x12 i++ - i = encodeVarintOps(dAtA, i, uint64((&v).Size())) - n16, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } return i, nil } -func (m *HostIP) Marshal() (dAtA []byte, err error) { +func (m *BuildOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) @@ -2508,568 +2646,2260 @@ func (m *HostIP) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { +func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - if len(m.Host) > 0 { - dAtA[i] = 0xa - i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - } - if len(m.IP) > 0 { - dAtA[i] = 0x12 + if m.Builder != 0 { + dAtA[i] = 0x8 i++ - i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - } - return i, nil -} - -func encodeVarintOps(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Op) Size() (n int) { - if m == nil { - return 0 + i = encodeVarintOps(dAtA, i, uint64(m.Builder)) } - var l int - _ = l if len(m.Inputs) > 0 { - for _, e := range m.Inputs { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) + keysForInputs := make([]string, 0, len(m.Inputs)) + for k, _ := range m.Inputs { + keysForInputs = append(keysForInputs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForInputs) + for _, k := range keysForInputs { + dAtA[i] = 0x12 + i++ + v := m.Inputs[string(k)] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(v.Size())) + n13, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + } } } - if m.Op != nil { - n += m.Op.Size() - } - if m.Platform != nil { - l = m.Platform.Size() - n += 1 + l + sovOps(uint64(l)) - } - if m.Constraints != nil { - l = m.Constraints.Size() - n += 1 + l + sovOps(uint64(l)) - } - return n -} - -func (m *Op_Exec) Size() (n int) { - if m == nil { - return 0 + if m.Def != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Def.Size())) + n14, err := m.Def.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 } - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovOps(uint64(l)) + if len(m.Attrs) > 0 { + keysForAttrs := make([]string, 0, len(m.Attrs)) + for k, _ := range m.Attrs { + keysForAttrs = append(keysForAttrs, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttrs) + for _, k := range keysForAttrs { + dAtA[i] = 0x22 + i++ + v := m.Attrs[string(k)] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } - return n + return i, nil } -func (m *Op_Source) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Source != nil { - l = m.Source.Size() - n += 1 + l + sovOps(uint64(l)) + +func (m *BuildInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Op_Copy) Size() (n int) { - if m == nil { - return 0 - } + +func (m *BuildInput) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Copy != nil { - l = m.Copy.Size() - n += 1 + l + sovOps(uint64(l)) + if m.Input != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) } - return n + return i, nil } -func (m *Op_Build) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Build != nil { - l = m.Build.Size() - n += 1 + l + sovOps(uint64(l)) + +func (m *OpMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *Platform) Size() (n int) { - if m == nil { - return 0 - } + +func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.Architecture) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.OS) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if m.IgnoreCache { + dAtA[i] = 0x8 + i++ + if m.IgnoreCache { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - l = len(m.Variant) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.Description) > 0 { + keysForDescription := make([]string, 0, len(m.Description)) + for k, _ := range m.Description { + keysForDescription = append(keysForDescription, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDescription) + for _, k := range keysForDescription { + dAtA[i] = 0x12 + i++ + v := m.Description[string(k)] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } - l = len(m.OSVersion) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if m.ExportCache != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size())) + n15, err := m.ExportCache.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 } - if len(m.OSFeatures) > 0 { - for _, s := range m.OSFeatures { - l = len(s) - n += 1 + l + sovOps(uint64(l)) + if len(m.Caps) > 0 { + keysForCaps := make([]string, 0, len(m.Caps)) + for k, _ := range m.Caps { + keysForCaps = append(keysForCaps, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCaps) + for _, k := range keysForCaps { + dAtA[i] = 0x2a + i++ + v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(k)] + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x10 + i++ + if v { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } } - return n + return i, nil } -func (m *Input) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Digest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Index != 0 { - n += 1 + sovOps(uint64(m.Index)) +func (m *ExportCache) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *ExecOp) Size() (n int) { - if m == nil { - return 0 - } +func (m *ExportCache) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Meta != nil { - l = m.Meta.Size() - n += 1 + l + sovOps(uint64(l)) - } - if len(m.Mounts) > 0 { - for _, e := range m.Mounts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if m.Network != 0 { - n += 1 + sovOps(uint64(m.Network)) - } - return n + return i, nil } -func (m *Meta) Size() (n int) { - if m == nil { - return 0 +func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *ProxyEnv) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, s := range m.Env { - l = len(s) - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Cwd) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.HttpProxy) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpProxy))) + i += copy(dAtA[i:], m.HttpProxy) } - l = len(m.User) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.HttpsProxy) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.HttpsProxy))) + i += copy(dAtA[i:], m.HttpsProxy) } - if m.ProxyEnv != nil { - l = m.ProxyEnv.Size() - n += 1 + l + sovOps(uint64(l)) + if len(m.FtpProxy) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.FtpProxy))) + i += copy(dAtA[i:], m.FtpProxy) } - if len(m.ExtraHosts) > 0 { - for _, e := range m.ExtraHosts { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } + if len(m.NoProxy) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.NoProxy))) + i += copy(dAtA[i:], m.NoProxy) } - return n + return i, nil } -func (m *Mount) Size() (n int) { - if m == nil { - return 0 +func (m *WorkerConstraints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *WorkerConstraints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - l = len(m.Selector) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Output != 0 { - n += 1 + sovOps(uint64(m.Output)) - } - if m.Readonly { - n += 2 - } - if m.MountType != 0 { - n += 1 + sovOps(uint64(m.MountType)) - } - if m.CacheOpt != nil { - l = m.CacheOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SecretOpt != nil { - l = m.SecretOpt.Size() - n += 2 + l + sovOps(uint64(l)) - } - if m.SSHOpt != nil { - l = m.SSHOpt.Size() - n += 2 + l + sovOps(uint64(l)) + if len(m.Filter) > 0 { + for _, s := range m.Filter { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return n + return i, nil } -func (m *CacheOpt) Size() (n int) { - if m == nil { - return 0 +func (m *Definition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *Definition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.Def) > 0 { + for _, b := range m.Def { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(b))) + i += copy(dAtA[i:], b) + } } - if m.Sharing != 0 { - n += 1 + sovOps(uint64(m.Sharing)) + if len(m.Metadata) > 0 { + keysForMetadata := make([]string, 0, len(m.Metadata)) + for k, _ := range m.Metadata { + keysForMetadata = append(keysForMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + for _, k := range keysForMetadata { + dAtA[i] = 0x12 + i++ + v := m.Metadata[github_com_opencontainers_go_digest.Digest(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovOps(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovOps(uint64(len(k))) + msgSize + i = encodeVarintOps(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64((&v).Size())) + n16, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + } } - return n + return i, nil } -func (m *SecretOpt) Size() (n int) { - if m == nil { - return 0 +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) + if len(m.Host) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) } - if m.Optional { - n += 2 + if len(m.IP) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) } - return n + return i, nil } -func (m *SSHOpt) Size() (n int) { - if m == nil { - return 0 +func (m *FileOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *FileOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.ID) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.Actions) > 0 { + for _, msg := range m.Actions { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - if m.Uid != 0 { - n += 1 + sovOps(uint64(m.Uid)) - } - if m.Gid != 0 { - n += 1 + sovOps(uint64(m.Gid)) - } - if m.Mode != 0 { - n += 1 + sovOps(uint64(m.Mode)) - } - if m.Optional { - n += 2 - } - return n + return i, nil } -func (m *CopyOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Src) > 0 { - for _, e := range m.Src { - l = e.Size() - n += 1 + l + sovOps(uint64(l)) - } - } - l = len(m.Dest) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) +func (m *FileAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *CopySource) Size() (n int) { - if m == nil { - return 0 - } +func (m *FileAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) - } - l = len(m.Selector) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + dAtA[i] = 0x8 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) } - return n -} - -func (m *SourceOp) Size() (n int) { - if m == nil { - return 0 + if m.SecondaryInput != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.SecondaryInput)) } - var l int - _ = l - l = len(m.Identifier) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if m.Output != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Output)) } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + if m.Action != nil { + nn17, err := m.Action.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += nn17 } - return n + return i, nil } -func (m *BuildOp) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Builder != 0 { - n += 1 + sovOps(uint64(m.Builder)) - } - if len(m.Inputs) > 0 { - for k, v := range m.Inputs { - _ = k - _ = v - l = 0 - if v != nil { - l = v.Size() - l += 1 + sovOps(uint64(l)) - } - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) +func (m *FileAction_Copy) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Copy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Copy.Size())) + n18, err := m.Copy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n18 } - if m.Def != nil { - l = m.Def.Size() - n += 1 + l + sovOps(uint64(l)) + return i, nil +} +func (m *FileAction_Mkfile) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Mkfile != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mkfile.Size())) + n19, err := m.Mkfile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n19 } - if len(m.Attrs) > 0 { - for k, v := range m.Attrs { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + return i, nil +} +func (m *FileAction_Mkdir) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Mkdir != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mkdir.Size())) + n20, err := m.Mkdir.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n20 } - return n + return i, nil } - -func (m *BuildInput) Size() (n int) { - if m == nil { - return 0 +func (m *FileAction_Rm) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Rm != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Rm.Size())) + n21, err := m.Rm.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 } - var l int - _ = l - if m.Input != 0 { - n += 1 + sovOps(uint64(m.Input)) + return i, nil +} +func (m *FileActionCopy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - return n + return dAtA[:n], nil } -func (m *OpMetadata) Size() (n int) { - if m == nil { - return 0 - } +func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - if m.IgnoreCache { - n += 2 + if len(m.Src) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Src))) + i += copy(dAtA[i:], m.Src) } - if len(m.Description) > 0 { - for k, v := range m.Description { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + if len(m.Dest) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Dest))) + i += copy(dAtA[i:], m.Dest) + } + if m.Owner != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Owner.Size())) + n22, err := m.Owner.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n22 } - if m.ExportCache != nil { - l = m.ExportCache.Size() - n += 1 + l + sovOps(uint64(l)) + if m.Mode != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) } - if len(m.Caps) > 0 { - for k, v := range m.Caps { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + if m.FollowSymlink { + dAtA[i] = 0x30 + i++ + if m.FollowSymlink { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - return n -} - -func (m *ExportCache) Size() (n int) { - if m == nil { - return 0 + if m.DirCopyContents { + dAtA[i] = 0x38 + i++ + if m.DirCopyContents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - var l int - _ = l - if m.Value { - n += 2 + if m.AttemptUnpack { + dAtA[i] = 0x40 + i++ + if m.AttemptUnpack { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ } - return n + if m.CreateDestPath { + dAtA[i] = 0x48 + i++ + if m.CreateDestPath { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowWildcard { + dAtA[i] = 0x50 + i++ + if m.AllowWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowEmptyWildcard { + dAtA[i] = 0x58 + i++ + if m.AllowEmptyWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -func (m *ProxyEnv) Size() (n int) { - if m == nil { - return 0 +func (m *FileActionMkFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } + return dAtA[:n], nil +} + +func (m *FileActionMkFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i var l int _ = l - l = len(m.HttpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) } - l = len(m.HttpsProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) } - l = len(m.FtpProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - l = len(m.NoProxy) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) + if m.Owner != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Owner.Size())) + n23, err := m.Owner.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - return n + return i, nil } -func (m *WorkerConstraints) Size() (n int) { +func (m *FileActionMkDir) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionMkDir) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.Mode != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Mode)) + } + if m.MakeParents { + dAtA[i] = 0x18 + i++ + if m.MakeParents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Owner != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Owner.Size())) + n24, err := m.Owner.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + return i, nil +} + +func (m *FileActionRm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FileActionRm) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Path) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + } + if m.AllowNotFound { + dAtA[i] = 0x10 + i++ + if m.AllowNotFound { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowWildcard { + dAtA[i] = 0x18 + i++ + if m.AllowWildcard { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *ChownOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ChownOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.User != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(m.User.Size())) + n25, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + if m.Group != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Group.Size())) + n26, err := m.Group.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + } + return i, nil +} + +func (m *UserOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Input != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + } + if m.Id != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Id)) + } + return i, nil +} + +func encodeVarintOps(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Op) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Filter) > 0 { - for _, s := range m.Filter { - l = len(s) + if len(m.Inputs) > 0 { + for _, e := range m.Inputs { + l = e.Size() n += 1 + l + sovOps(uint64(l)) } } + if m.Op != nil { + n += m.Op.Size() + } + if m.Platform != nil { + l = m.Platform.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Constraints != nil { + l = m.Constraints.Size() + n += 1 + l + sovOps(uint64(l)) + } return n } -func (m *Definition) Size() (n int) { +func (m *Op_Exec) Size() (n int) { if m == nil { return 0 } var l int _ = l - if len(m.Def) > 0 { - for _, b := range m.Def { - l = len(b) - n += 1 + l + sovOps(uint64(l)) - } + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovOps(uint64(l)) } - if len(m.Metadata) > 0 { - for k, v := range m.Metadata { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) - n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) - } + return n +} +func (m *Op_Source) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Source != nil { + l = m.Source.Size() + n += 1 + l + sovOps(uint64(l)) } return n } - -func (m *HostIP) Size() (n int) { +func (m *Op_File) Size() (n int) { if m == nil { return 0 } var l int _ = l - l = len(m.Host) + if m.File != nil { + l = m.File.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Build) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Build != nil { + l = m.Build.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Platform) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Architecture) if l > 0 { n += 1 + l + sovOps(uint64(l)) } - l = len(m.IP) + l = len(m.OS) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Variant) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.OSVersion) if l > 0 { n += 1 + l + sovOps(uint64(l)) } - return n -} + if len(m.OSFeatures) > 0 { + for _, s := range m.OSFeatures { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Input) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Index != 0 { + n += 1 + sovOps(uint64(m.Index)) + } + return n +} + +func (m *ExecOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Meta != nil { + l = m.Meta.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Mounts) > 0 { + for _, e := range m.Mounts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + if m.Network != 0 { + n += 1 + sovOps(uint64(m.Network)) + } + return n +} + +func (m *Meta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.Cwd) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.User) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.ProxyEnv != nil { + l = m.ProxyEnv.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.ExtraHosts) > 0 { + for _, e := range m.ExtraHosts { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Mount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + l = len(m.Selector) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Readonly { + n += 2 + } + if m.MountType != 0 { + n += 1 + sovOps(uint64(m.MountType)) + } + if m.CacheOpt != nil { + l = m.CacheOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + if m.SecretOpt != nil { + l = m.SecretOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + if m.SSHOpt != nil { + l = m.SSHOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } + return n +} + +func (m *CacheOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Sharing != 0 { + n += 1 + sovOps(uint64(m.Sharing)) + } + return n +} + +func (m *SecretOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sovOps(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovOps(uint64(m.Gid)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.Optional { + n += 2 + } + return n +} + +func (m *SSHOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Uid != 0 { + n += 1 + sovOps(uint64(m.Uid)) + } + if m.Gid != 0 { + n += 1 + sovOps(uint64(m.Gid)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.Optional { + n += 2 + } + return n +} + +func (m *SourceOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Identifier) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Builder != 0 { + n += 1 + sovOps(uint64(m.Builder)) + } + if len(m.Inputs) > 0 { + for k, v := range m.Inputs { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovOps(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + l + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.Def != nil { + l = m.Def.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Attrs) > 0 { + for k, v := range m.Attrs { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *BuildInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *OpMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.IgnoreCache { + n += 2 + } + if len(m.Description) > 0 { + for k, v := range m.Description { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v))) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + if m.ExportCache != nil { + l = m.ExportCache.Size() + n += 1 + l + sovOps(uint64(l)) + } + if len(m.Caps) > 0 { + for k, v := range m.Caps { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1 + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ExportCache) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *ProxyEnv) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.HttpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.HttpsProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.FtpProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.NoProxy) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *WorkerConstraints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Filter) > 0 { + for _, s := range m.Filter { + l = len(s) + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *Definition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Def) > 0 { + for _, b := range m.Def { + l = len(b) + n += 1 + l + sovOps(uint64(l)) + } + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + l + sovOps(uint64(l)) + n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) + } + } + return n +} + +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *FileOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Actions) > 0 { + for _, e := range m.Actions { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *FileAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + if m.SecondaryInput != 0 { + n += 1 + sovOps(uint64(m.SecondaryInput)) + } + if m.Output != 0 { + n += 1 + sovOps(uint64(m.Output)) + } + if m.Action != nil { + n += m.Action.Size() + } + return n +} + +func (m *FileAction_Copy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Copy != nil { + l = m.Copy.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Mkfile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mkfile != nil { + l = m.Mkfile.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Mkdir) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Mkdir != nil { + l = m.Mkdir.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileAction_Rm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Rm != nil { + l = m.Rm.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *FileActionCopy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Src) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Dest) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.FollowSymlink { + n += 2 + } + if m.DirCopyContents { + n += 2 + } + if m.AttemptUnpack { + n += 2 + } + if m.CreateDestPath { + n += 2 + } + if m.AllowWildcard { + n += 2 + } + if m.AllowEmptyWildcard { + n += 2 + } + return n +} + +func (m *FileActionMkFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *FileActionMkDir) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Mode != 0 { + n += 1 + sovOps(uint64(m.Mode)) + } + if m.MakeParents { + n += 2 + } + if m.Owner != nil { + l = m.Owner.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *FileActionRm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.AllowNotFound { + n += 2 + } + if m.AllowWildcard { + n += 2 + } + return n +} + +func (m *ChownOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.User != nil { + l = m.User.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Group != nil { + l = m.Group.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *UserOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + if m.Id != 0 { + n += 1 + sovOps(uint64(m.Id)) + } + return n +} + +func sovOps(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozOps(x uint64) (n int) { + return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Op) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Op: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, &Input{}) + if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ExecOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Exec{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SourceOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Source{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field File", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_File{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &BuildOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Build{v} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Platform == nil { + m.Platform = &Platform{} + } + if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Constraints == nil { + m.Constraints = &WorkerConstraints{} + } + if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Platform) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Platform: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OS = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Variant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSFeatures", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSFeatures = append(m.OSFeatures, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Input) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Input: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Meta == nil { + m.Meta = &Meta{} + } + if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mounts = append(m.Mounts, &Mount{}) + if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + } + m.Network = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Network |= (NetMode(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } -func sovOps(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } + if iNdEx > l { + return io.ErrUnexpectedEOF } - return n -} -func sozOps(x uint64) (n int) { - return sovOps(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return nil } -func (m *Op) Unmarshal(dAtA []byte) error { +func (m *Meta) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3092,17 +4922,17 @@ func (m *Op) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Op: wiretype end group for non-group") + return fmt.Errorf("proto: Meta: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Op: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3112,28 +4942,26 @@ func (m *Op) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Inputs = append(m.Inputs, &Input{}) - if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3143,29 +4971,26 @@ func (m *Op) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &ExecOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Exec{v} + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3175,29 +5000,26 @@ func (m *Op) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &SourceOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Source{v} + m.Cwd = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3207,59 +5029,24 @@ func (m *Op) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - v := &CopyOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Copy{v} + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Build", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - v := &BuildOp{} - if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - m.Op = &Op_Build{v} - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3283,16 +5070,16 @@ func (m *Op) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Platform == nil { - m.Platform = &Platform{} + if m.ProxyEnv == nil { + m.ProxyEnv = &ProxyEnv{} } - if err := m.Platform.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Constraints", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3316,10 +5103,8 @@ func (m *Op) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Constraints == nil { - m.Constraints = &WorkerConstraints{} - } - if err := m.Constraints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) + if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3344,7 +5129,7 @@ func (m *Op) Unmarshal(dAtA []byte) error { } return nil } -func (m *Platform) Unmarshal(dAtA []byte) error { +func (m *Mount) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3367,15 +5152,34 @@ func (m *Platform) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Platform: wiretype end group for non-group") + return fmt.Errorf("proto: Mount: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Platform: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= (InputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3400,11 +5204,11 @@ func (m *Platform) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Architecture = string(dAtA[iNdEx:postIndex]) + m.Selector = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OS", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3429,13 +5233,71 @@ func (m *Platform) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.OS = string(dAtA[iNdEx:postIndex]) + m.Dest = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) + } + m.Output = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Output |= (OutputIndex(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Readonly = bool(v != 0) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) + } + m.MountType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MountType |= (MountType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Variant", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3445,26 +5307,30 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Variant = string(dAtA[iNdEx:postIndex]) + if m.CacheOpt == nil { + m.CacheOpt = &CacheOpt{} + } + if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3474,26 +5340,30 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.OSVersion = string(dAtA[iNdEx:postIndex]) + if m.SecretOpt == nil { + m.SecretOpt = &SecretOpt{} + } + if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 22: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSFeatures", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3503,20 +5373,24 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.OSFeatures = append(m.OSFeatures, string(dAtA[iNdEx:postIndex])) + if m.SSHOpt == nil { + m.SSHOpt = &SSHOpt{} + } + if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3539,7 +5413,7 @@ func (m *Platform) Unmarshal(dAtA []byte) error { } return nil } -func (m *Input) Unmarshal(dAtA []byte) error { +func (m *CacheOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3562,15 +5436,15 @@ func (m *Input) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Input: wiretype end group for non-group") + return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Input: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3595,13 +5469,13 @@ func (m *Input) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Sharing", wireType) } - m.Index = 0 + m.Sharing = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3611,7 +5485,7 @@ func (m *Input) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Index |= (OutputIndex(b) & 0x7F) << shift + m.Sharing |= (CacheSharingOpt(b) & 0x7F) << shift if b < 0x80 { break } @@ -3637,7 +5511,7 @@ func (m *Input) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExecOp) Unmarshal(dAtA []byte) error { +func (m *SecretOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3660,17 +5534,17 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExecOp: wiretype end group for non-group") + return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExecOp: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Meta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3680,30 +5554,26 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.Meta == nil { - m.Meta = &Meta{} - } - if err := m.Meta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Mounts", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) } - var msglen int + m.Uid = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3713,28 +5583,54 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.Uid |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.Mounts = append(m.Mounts, &Mount{}) - if err := m.Mounts[len(m.Mounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - iNdEx = postIndex - case 3: + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 5: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - m.Network = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3744,11 +5640,12 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Network |= (NetMode(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + m.Optional = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -3770,7 +5667,7 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { } return nil } -func (m *Meta) Unmarshal(dAtA []byte) error { +func (m *SSHOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3793,15 +5690,15 @@ func (m *Meta) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Meta: wiretype end group for non-group") + return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Meta: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3826,13 +5723,13 @@ func (m *Meta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + m.ID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) } - var stringLen uint64 + m.Uid = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3842,26 +5739,35 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Uid |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Gid = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Gid |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cwd", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var stringLen uint64 + m.Mode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -3871,24 +5777,84 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Mode |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen - if postIndex > l { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SourceOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Cwd = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3913,11 +5879,11 @@ func (m *Meta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.User = string(dAtA[iNdEx:postIndex]) + m.Identifier = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProxyEnv", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3941,43 +5907,97 @@ func (m *Meta) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.ProxyEnv == nil { - m.ProxyEnv = &ProxyEnv{} - } - if err := m.ProxyEnv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) + if m.Attrs == nil { + m.Attrs = make(map[string]string) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExtraHosts = append(m.ExtraHosts, &HostIP{}) - if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Attrs[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -4000,7 +6020,7 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } return nil } -func (m *Mount) Unmarshal(dAtA []byte) error { +func (m *BuildOp) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4023,17 +6043,17 @@ func (m *Mount) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Mount: wiretype end group for non-group") + return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Mount: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) } - m.Input = 0 + m.Builder = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4043,16 +6063,16 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift + m.Builder |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4062,26 +6082,120 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(dAtA[iNdEx:postIndex]) + if m.Inputs == nil { + m.Inputs = make(map[string]*BuildInput) + } + var mapkey string + var mapvalue *BuildInput + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &BuildInput{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Inputs[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4091,82 +6205,28 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Dest = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) - } - m.Output = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Output |= (OutputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Readonly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Readonly = bool(v != 0) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MountType", wireType) + if m.Def == nil { + m.Def = &Definition{} } - m.MountType = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MountType |= (MountType(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - case 20: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4190,51 +6250,153 @@ func (m *Mount) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.CacheOpt == nil { - m.CacheOpt = &CacheOpt{} - } - if err := m.CacheOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretOpt", wireType) + if m.Attrs == nil { + m.Attrs = make(map[string]string) } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - if msglen < 0 { + m.Attrs[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - if m.SecretOpt == nil { - m.SecretOpt = &SecretOpt{} + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BuildInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps } - if err := m.SecretOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + if iNdEx >= l { + return io.ErrUnexpectedEOF } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType) + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - var msglen int + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4244,25 +6406,11 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.Input |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SSHOpt == nil { - m.SSHOpt = &SSHOpt{} - } - if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -4284,7 +6432,7 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } return nil } -func (m *CacheOpt) Unmarshal(dAtA []byte) error { +func (m *OpMetadata) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4307,17 +6455,37 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CacheOpt: wiretype end group for non-group") + return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CacheOpt: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IgnoreCache = bool(v != 0) + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4327,26 +6495,115 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + if m.Description == nil { + m.Description = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Description[mapkey] = mapvalue iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Sharing", wireType) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) } - m.Sharing = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4356,11 +6613,134 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Sharing |= (CacheSharingOpt(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ExportCache == nil { + m.ExportCache = &ExportCache{} + } + if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Caps == nil { + m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool) + } + var mapkey github_com_moby_buildkit_util_apicaps.CapID + var mapvalue bool + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthOps + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapvaluetemp int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapvaluetemp |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + mapvalue = bool(mapvaluetemp != 0) + } else { + iNdEx = entryPreIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -4382,7 +6762,7 @@ func (m *CacheOpt) Unmarshal(dAtA []byte) error { } return nil } -func (m *SecretOpt) Unmarshal(dAtA []byte) error { +func (m *ExportCache) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4405,101 +6785,15 @@ func (m *SecretOpt) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SecretOpt: wiretype end group for non-group") + return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SecretOpt: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -4516,7 +6810,7 @@ func (m *SecretOpt) Unmarshal(dAtA []byte) error { break } } - m.Optional = bool(v != 0) + m.Value = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -4538,7 +6832,7 @@ func (m *SecretOpt) Unmarshal(dAtA []byte) error { } return nil } -func (m *SSHOpt) Unmarshal(dAtA []byte) error { +func (m *ProxyEnv) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4561,15 +6855,15 @@ func (m *SSHOpt) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group") + return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4594,70 +6888,13 @@ func (m *SSHOpt) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.ID = string(dAtA[iNdEx:postIndex]) + m.HttpProxy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - m.Uid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Uid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType) - } - m.Gid = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Gid |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - m.Mode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Mode |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4667,67 +6904,26 @@ func (m *SSHOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Optional = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CopyOp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CopyOp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CopyOp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.HttpsProxy = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4737,26 +6933,24 @@ func (m *CopyOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + msglen + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Src = append(m.Src, &CopySource{}) - if err := m.Src[len(m.Src)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.FtpProxy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4781,7 +6975,7 @@ func (m *CopyOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Dest = string(dAtA[iNdEx:postIndex]) + m.NoProxy = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -4804,7 +6998,7 @@ func (m *CopyOp) Unmarshal(dAtA []byte) error { } return nil } -func (m *CopySource) Unmarshal(dAtA []byte) error { +func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4827,34 +7021,15 @@ func (m *CopySource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CopySource: wiretype end group for non-group") + return fmt.Errorf("proto: WorkerConstraints: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CopySource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: WorkerConstraints: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) - } - m.Input = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -4879,7 +7054,7 @@ func (m *CopySource) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Selector = string(dAtA[iNdEx:postIndex]) + m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -4902,7 +7077,7 @@ func (m *CopySource) Unmarshal(dAtA []byte) error { } return nil } -func (m *SourceOp) Unmarshal(dAtA []byte) error { +func (m *Definition) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4925,17 +7100,17 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SourceOp: wiretype end group for non-group") + return fmt.Errorf("proto: Definition: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SourceOp: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Identifier", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -4945,24 +7120,24 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Identifier = string(dAtA[iNdEx:postIndex]) + m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) + copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4986,11 +7161,11 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attrs == nil { - m.Attrs = make(map[string]string) + if m.Metadata == nil { + m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) } - var mapkey string - var mapvalue string + var mapkey github_com_opencontainers_go_digest.Digest + mapvalue := &OpMetadata{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -5033,10 +7208,10 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) iNdEx = postStringIndexmapkey } else if fieldNum == 2 { - var stringLenmapvalue uint64 + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5046,21 +7221,26 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift + mapmsglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { + if mapmsglen < 0 { return ErrInvalidLengthOps } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthOps + } + if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue + mapvalue = &OpMetadata{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex } else { iNdEx = entryPreIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -5076,7 +7256,196 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { iNdEx += skippy } } - m.Attrs[mapkey] = mapvalue + m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FileOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FileOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FileOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Actions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Actions = append(m.Actions, &FileAction{}) + if err := m.Actions[len(m.Actions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5099,7 +7468,7 @@ func (m *SourceOp) Unmarshal(dAtA []byte) error { } return nil } -func (m *BuildOp) Unmarshal(dAtA []byte) error { +func (m *FileAction) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5122,17 +7491,17 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: BuildOp: wiretype end group for non-group") + return fmt.Errorf("proto: FileAction: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: BuildOp: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileAction: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Builder", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) } - m.Builder = 0 + m.Input = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5142,16 +7511,16 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Builder |= (InputIndex(b) & 0x7F) << shift + m.Input |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } } case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SecondaryInput", wireType) } - var msglen int + m.SecondaryInput = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5161,120 +7530,16 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.SecondaryInput |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Inputs == nil { - m.Inputs = make(map[string]*BuildInput) - } - var mapkey string - var mapvalue *BuildInput - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &BuildInput{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Inputs[mapkey] = mapvalue - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Output", wireType) } - var msglen int + m.Output = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5284,28 +7549,14 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + m.Output |= (OutputIndex(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Def == nil { - m.Def = &Definition{} - } - if err := m.Def.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attrs", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Copy", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5329,153 +7580,81 @@ func (m *BuildOp) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Attrs == nil { - m.Attrs = make(map[string]string) + v := &FileActionCopy{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + m.Action = &FileAction_Copy{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mkfile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Attrs[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - if (iNdEx + skippy) > l { + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *BuildInput) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps + v := &FileActionMkFile{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - if iNdEx >= l { + m.Action = &FileAction_Mkfile{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mkdir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + v := &FileActionMkDir{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BuildInput: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BuildInput: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + m.Action = &FileAction_Mkdir{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rm", wireType) } - m.Input = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5485,11 +7664,24 @@ func (m *BuildInput) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &FileActionRm{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Action = &FileAction_Rm{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -5511,7 +7703,7 @@ func (m *BuildInput) Unmarshal(dAtA []byte) error { } return nil } -func (m *OpMetadata) Unmarshal(dAtA []byte) error { +func (m *FileActionCopy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5534,17 +7726,17 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: OpMetadata: wiretype end group for non-group") + return fmt.Errorf("proto: FileActionCopy: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: OpMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileActionCopy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IgnoreCache", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Src", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5554,15 +7746,53 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.IgnoreCache = bool(v != 0) + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Src = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Dest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Dest = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -5586,103 +7816,37 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Description == nil { - m.Description = make(map[string]string) + if m.Owner == nil { + m.Owner = &ChownOpt{} } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + m.Mode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Mode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Description[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExportCache", wireType) + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FollowSymlink", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5692,30 +7856,57 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps + m.FollowSymlink = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DirCopyContents", wireType) } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if m.ExportCache == nil { - m.ExportCache = &ExportCache{} + m.DirCopyContents = bool(v != 0) + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpack", wireType) } - if err := m.ExportCache.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) + m.AttemptUnpack = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreateDestPath", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5725,101 +7916,52 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= (int(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthOps + m.CreateDestPath = bool(v != 0) + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if m.Caps == nil { - m.Caps = make(map[github_com_moby_buildkit_util_apicaps.CapID]bool) + m.AllowWildcard = bool(v != 0) + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyWildcard", wireType) } - var mapkey github_com_moby_buildkit_util_apicaps.CapID - var mapvalue bool - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_moby_buildkit_util_apicaps.CapID(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapvaluetemp int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapvaluetemp |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - mapvalue = bool(mapvaluetemp != 0) - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue - iNdEx = postIndex + m.AllowEmptyWildcard = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -5841,7 +7983,7 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { } return nil } -func (m *ExportCache) Unmarshal(dAtA []byte) error { +func (m *FileActionMkFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5864,17 +8006,46 @@ func (m *ExportCache) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExportCache: wiretype end group for non-group") + return fmt.Errorf("proto: FileActionMkFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExportCache: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileActionMkFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var v int + m.Mode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5884,12 +8055,75 @@ func (m *ExportCache) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= (int(b) & 0x7F) << shift + m.Mode |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Value = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Owner == nil { + m.Owner = &ChownOpt{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -5911,7 +8145,7 @@ func (m *ExportCache) Unmarshal(dAtA []byte) error { } return nil } -func (m *ProxyEnv) Unmarshal(dAtA []byte) error { +func (m *FileActionMkDir) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5934,15 +8168,15 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ProxyEnv: wiretype end group for non-group") + return fmt.Errorf("proto: FileActionMkDir: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ProxyEnv: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileActionMkDir: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpProxy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5967,13 +8201,13 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.HttpProxy = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpsProxy", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } - var stringLen uint64 + m.Mode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -5983,26 +8217,16 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Mode |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HttpsProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FtpProxy", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MakeParents", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6012,26 +8236,17 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FtpProxy = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.MakeParents = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NoProxy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6041,20 +8256,24 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.NoProxy = string(dAtA[iNdEx:postIndex]) + if m.Owner == nil { + m.Owner = &ChownOpt{} + } + if err := m.Owner.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6077,7 +8296,7 @@ func (m *ProxyEnv) Unmarshal(dAtA []byte) error { } return nil } -func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { +func (m *FileActionRm) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6100,15 +8319,15 @@ func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: WorkerConstraints: wiretype end group for non-group") + return fmt.Errorf("proto: FileActionRm: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: WorkerConstraints: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FileActionRm: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Filter", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6133,8 +8352,48 @@ func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Filter = append(m.Filter, string(dAtA[iNdEx:postIndex])) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowNotFound", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowNotFound = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWildcard = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -6156,7 +8415,7 @@ func (m *WorkerConstraints) Unmarshal(dAtA []byte) error { } return nil } -func (m *Definition) Unmarshal(dAtA []byte) error { +func (m *ChownOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6179,17 +8438,17 @@ func (m *Definition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Definition: wiretype end group for non-group") + return fmt.Errorf("proto: ChownOpt: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Definition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ChownOpt: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Def", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6199,24 +8458,28 @@ func (m *Definition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Def = append(m.Def, make([]byte, postIndex-iNdEx)) - copy(m.Def[len(m.Def)-1], dAtA[iNdEx:postIndex]) + if m.User == nil { + m.User = &UserOpt{} + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -6240,102 +8503,12 @@ func (m *Definition) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - if m.Metadata == nil { - m.Metadata = make(map[github_com_opencontainers_go_digest.Digest]OpMetadata) + if m.Group == nil { + m.Group = &UserOpt{} } - var mapkey github_com_opencontainers_go_digest.Digest - mapvalue := &OpMetadata{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthOps - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthOps - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &OpMetadata{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - m.Metadata[github_com_opencontainers_go_digest.Digest(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -6358,7 +8531,7 @@ func (m *Definition) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostIP) Unmarshal(dAtA []byte) error { +func (m *UserOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6381,15 +8554,15 @@ func (m *HostIP) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + return fmt.Errorf("proto: UserOpt: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: UserOpt: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6414,13 +8587,13 @@ func (m *HostIP) Unmarshal(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Host = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) } - var stringLen uint64 + m.Input = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -6430,21 +8603,30 @@ func (m *HostIP) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + m.Input |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF + m.Id = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Id |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -6571,99 +8753,124 @@ var ( ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_821a7942fdf920e6) } - -var fileDescriptor_ops_821a7942fdf920e6 = []byte{ - // 1452 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0x1b, 0xc9, - 0x11, 0x16, 0x87, 0xcf, 0x29, 0x4a, 0x32, 0xd3, 0x7e, 0x84, 0x51, 0x14, 0x4a, 0x19, 0x27, 0x81, - 0x2c, 0x59, 0x14, 0x40, 0x03, 0xb6, 0x91, 0x83, 0x11, 0xf1, 0x61, 0x88, 0x71, 0x24, 0x0a, 0x4d, - 0x45, 0x39, 0x1a, 0xa3, 0x61, 0x93, 0x1a, 0x88, 0x9a, 0x1e, 0xcc, 0x34, 0x6d, 0xf1, 0x92, 0x83, - 0x7f, 0x41, 0x80, 0x00, 0xb9, 0xe7, 0x98, 0x1f, 0x91, 0xbb, 0x8f, 0x46, 0x4e, 0x4e, 0x0e, 0xce, - 0x42, 0xfe, 0x23, 0x8b, 0xaa, 0xee, 0xe1, 0x8c, 0x1f, 0x8b, 0xb5, 0xb1, 0x8b, 0x3d, 0xb1, 0xba, - 0xea, 0xeb, 0xaf, 0xeb, 0xd5, 0x5d, 0x43, 0xb0, 0x65, 0x18, 0x37, 0xc3, 0x48, 0x2a, 0xc9, 0xac, - 0xf0, 0x6c, 0x6d, 0x77, 0xe2, 0xab, 0xf3, 0xd9, 0x59, 0xd3, 0x93, 0x97, 0x7b, 0x13, 0x39, 0x91, - 0x7b, 0x64, 0x3a, 0x9b, 0x8d, 0x69, 0x45, 0x0b, 0x92, 0xf4, 0x16, 0xe7, 0x9f, 0x16, 0x58, 0x83, - 0x90, 0xfd, 0x1a, 0x4a, 0x7e, 0x10, 0xce, 0x54, 0x5c, 0xcf, 0x6d, 0xe6, 0xb7, 0xaa, 0x2d, 0xbb, - 0x19, 0x9e, 0x35, 0xfb, 0xa8, 0xe1, 0xc6, 0xc0, 0x36, 0xa1, 0x20, 0xae, 0x84, 0x57, 0xb7, 0x36, - 0x73, 0x5b, 0xd5, 0x16, 0x20, 0xa0, 0x77, 0x25, 0xbc, 0x41, 0x78, 0xb0, 0xc4, 0xc9, 0xc2, 0x7e, - 0x07, 0xa5, 0x58, 0xce, 0x22, 0x4f, 0xd4, 0xf3, 0x84, 0x59, 0x46, 0xcc, 0x90, 0x34, 0x84, 0x32, - 0x56, 0x64, 0xf2, 0x64, 0x38, 0xaf, 0x17, 0x52, 0xa6, 0x8e, 0x0c, 0xe7, 0x9a, 0x09, 0x2d, 0xec, - 0x2e, 0x14, 0xcf, 0x66, 0xfe, 0x74, 0x54, 0x2f, 0x12, 0xa4, 0x8a, 0x90, 0x36, 0x2a, 0x08, 0xa3, - 0x6d, 0x6c, 0x0b, 0x2a, 0xe1, 0xd4, 0x55, 0x63, 0x19, 0x5d, 0xd6, 0x21, 0x3d, 0xf0, 0xd8, 0xe8, - 0xf8, 0xc2, 0xca, 0x1e, 0x41, 0xd5, 0x93, 0x41, 0xac, 0x22, 0xd7, 0x0f, 0x54, 0x5c, 0xaf, 0x12, - 0xf8, 0x36, 0x82, 0xff, 0x22, 0xa3, 0x0b, 0x11, 0x75, 0x52, 0x23, 0xcf, 0x22, 0xdb, 0x05, 0xb0, - 0x64, 0xe8, 0xfc, 0x23, 0x07, 0x95, 0x84, 0x95, 0x39, 0xb0, 0xbc, 0x1f, 0x79, 0xe7, 0xbe, 0x12, - 0x9e, 0x9a, 0x45, 0xa2, 0x9e, 0xdb, 0xcc, 0x6d, 0xd9, 0xfc, 0x03, 0x1d, 0x5b, 0x05, 0x6b, 0x30, - 0xa4, 0x44, 0xd9, 0xdc, 0x1a, 0x0c, 0x59, 0x1d, 0xca, 0xa7, 0x6e, 0xe4, 0xbb, 0x81, 0xa2, 0xcc, - 0xd8, 0x3c, 0x59, 0xb2, 0x75, 0xb0, 0x07, 0xc3, 0x53, 0x11, 0xc5, 0xbe, 0x0c, 0x28, 0x1f, 0x36, - 0x4f, 0x15, 0xac, 0x01, 0x30, 0x18, 0x3e, 0x15, 0x2e, 0x92, 0xc6, 0xf5, 0xe2, 0x66, 0x7e, 0xcb, - 0xe6, 0x19, 0x8d, 0xf3, 0x57, 0x28, 0x52, 0x8d, 0xd8, 0x1f, 0xa1, 0x34, 0xf2, 0x27, 0x22, 0x56, - 0xda, 0x9d, 0x76, 0xeb, 0xf5, 0xbb, 0x8d, 0xa5, 0xff, 0xbd, 0xdb, 0xd8, 0xce, 0x34, 0x83, 0x0c, - 0x45, 0xe0, 0xc9, 0x40, 0xb9, 0x7e, 0x20, 0xa2, 0x78, 0x6f, 0x22, 0x77, 0xf5, 0x96, 0x66, 0x97, - 0x7e, 0xb8, 0x61, 0x60, 0xf7, 0xa0, 0xe8, 0x07, 0x23, 0x71, 0x45, 0xfe, 0xe7, 0xdb, 0x37, 0x0d, - 0x55, 0x75, 0x30, 0x53, 0xe1, 0x4c, 0xf5, 0xd1, 0xc4, 0x35, 0xc2, 0x09, 0xa1, 0xa4, 0x5b, 0x80, - 0xad, 0x43, 0xe1, 0x52, 0x28, 0x97, 0x8e, 0xaf, 0xb6, 0x2a, 0x98, 0xda, 0x43, 0xa1, 0x5c, 0x4e, - 0x5a, 0xec, 0xae, 0x4b, 0x39, 0xc3, 0xd4, 0x5b, 0x69, 0x77, 0x1d, 0xa2, 0x86, 0x1b, 0x03, 0xfb, - 0x2d, 0x94, 0x03, 0xa1, 0x5e, 0xca, 0xe8, 0x82, 0x52, 0xb4, 0xaa, 0x6b, 0x7e, 0x24, 0xd4, 0xa1, - 0x1c, 0x09, 0x9e, 0xd8, 0x9c, 0x7f, 0xe5, 0xa0, 0x80, 0xc4, 0x8c, 0x41, 0xc1, 0x8d, 0x26, 0xba, - 0x5d, 0x6d, 0x4e, 0x32, 0xab, 0x41, 0x5e, 0x04, 0x2f, 0xe8, 0x0c, 0x9b, 0xa3, 0x88, 0x1a, 0xef, - 0xe5, 0xc8, 0x24, 0x1d, 0x45, 0xdc, 0x37, 0x8b, 0x45, 0x64, 0x72, 0x4d, 0x32, 0xbb, 0x07, 0x76, - 0x18, 0xc9, 0xab, 0xf9, 0x73, 0xdc, 0x5d, 0xcc, 0x74, 0x12, 0x2a, 0x7b, 0xc1, 0x0b, 0x5e, 0x09, - 0x8d, 0xc4, 0xb6, 0x01, 0xc4, 0x95, 0x8a, 0xdc, 0x03, 0x19, 0xab, 0xb8, 0x5e, 0xa2, 0x68, 0xa8, - 0x81, 0x51, 0xd1, 0x3f, 0xe6, 0x19, 0xab, 0xf3, 0x1f, 0x0b, 0x8a, 0x14, 0x24, 0xdb, 0xc2, 0x94, - 0x86, 0x33, 0x5d, 0x9d, 0x7c, 0x9b, 0x99, 0x94, 0x02, 0x15, 0x6f, 0x91, 0x51, 0x2c, 0xe4, 0x1a, - 0x54, 0x62, 0x31, 0x15, 0x9e, 0x92, 0x91, 0xe9, 0x9f, 0xc5, 0x1a, 0x5d, 0x1f, 0x61, 0x89, 0x75, - 0x34, 0x24, 0xb3, 0x1d, 0x28, 0x49, 0xaa, 0x0b, 0x05, 0xf4, 0x1d, 0xd5, 0x32, 0x10, 0x24, 0x8f, - 0x84, 0x3b, 0x92, 0xc1, 0x74, 0x4e, 0x61, 0x56, 0xf8, 0x62, 0xcd, 0x76, 0xc0, 0xa6, 0x4a, 0x9c, - 0xcc, 0x43, 0x51, 0x2f, 0x51, 0x05, 0x56, 0x16, 0x55, 0x42, 0x25, 0x4f, 0xed, 0x78, 0xf3, 0x3c, - 0xd7, 0x3b, 0x17, 0x83, 0x50, 0xd5, 0x6f, 0xa5, 0xf9, 0xea, 0x18, 0x1d, 0x5f, 0x58, 0x91, 0x36, - 0x16, 0x5e, 0x24, 0x14, 0x42, 0x6f, 0x13, 0x94, 0x68, 0x87, 0x89, 0x92, 0xa7, 0x76, 0xe6, 0x40, - 0x69, 0x38, 0x3c, 0x40, 0xe4, 0x9d, 0xf4, 0x65, 0xd0, 0x1a, 0x6e, 0x2c, 0x4e, 0x1f, 0x2a, 0xc9, - 0x31, 0x78, 0xcd, 0xfa, 0x5d, 0x73, 0x01, 0xad, 0x7e, 0x97, 0xed, 0x42, 0x39, 0x3e, 0x77, 0x23, - 0x3f, 0x98, 0x50, 0xee, 0x56, 0x5b, 0x37, 0x17, 0x5e, 0x0d, 0xb5, 0x1e, 0x99, 0x12, 0x8c, 0x23, - 0xc1, 0x5e, 0xb8, 0xf1, 0x09, 0x57, 0x0d, 0xf2, 0x33, 0x7f, 0x44, 0x3c, 0x2b, 0x1c, 0x45, 0xd4, - 0x4c, 0x7c, 0xdd, 0x4b, 0x2b, 0x1c, 0x45, 0x2c, 0xc8, 0xa5, 0x1c, 0x09, 0x4a, 0xfd, 0x0a, 0x27, - 0x19, 0x73, 0x2c, 0x43, 0xe5, 0xcb, 0xc0, 0x9d, 0x26, 0x39, 0x4e, 0xd6, 0xce, 0x34, 0x89, 0xef, - 0x27, 0x39, 0xed, 0x09, 0x94, 0xf4, 0xab, 0xca, 0x36, 0x21, 0x1f, 0x47, 0x9e, 0x79, 0xd9, 0x57, - 0x93, 0xe7, 0x56, 0x3f, 0xcc, 0x1c, 0x4d, 0x8b, 0xd6, 0xb2, 0xd2, 0xd6, 0x72, 0x38, 0x40, 0x0a, - 0xfb, 0x71, 0x5a, 0xd8, 0xf9, 0x7b, 0x0e, 0x2a, 0xc9, 0x40, 0xc0, 0xd7, 0xcd, 0x1f, 0x89, 0x40, - 0xf9, 0x63, 0x5f, 0x44, 0x26, 0x19, 0x19, 0x0d, 0xdb, 0x85, 0xa2, 0xab, 0x54, 0x94, 0x3c, 0x1a, - 0x3f, 0xcf, 0x4e, 0x93, 0xe6, 0x3e, 0x5a, 0x7a, 0x81, 0x8a, 0xe6, 0x5c, 0xa3, 0xd6, 0x1e, 0x03, - 0xa4, 0x4a, 0xcc, 0xdf, 0x85, 0x98, 0x1b, 0x56, 0x14, 0xd9, 0x2d, 0x28, 0xbe, 0x70, 0xa7, 0x33, - 0x61, 0x9c, 0xd2, 0x8b, 0xdf, 0x5b, 0x8f, 0x73, 0xce, 0xbf, 0x2d, 0x28, 0x9b, 0xe9, 0xc2, 0xee, - 0x43, 0x99, 0xa6, 0x8b, 0xf1, 0xe8, 0xf3, 0x91, 0x26, 0x10, 0xb6, 0xb7, 0x18, 0x9b, 0x19, 0x1f, - 0x0d, 0x95, 0x1e, 0x9f, 0xc6, 0xc7, 0x74, 0x88, 0xe6, 0x47, 0x62, 0x6c, 0xe6, 0x23, 0x95, 0xa2, - 0x2b, 0xc6, 0x7e, 0xe0, 0x63, 0xcd, 0x38, 0x9a, 0xd8, 0xfd, 0x24, 0xea, 0x02, 0x31, 0xde, 0xc9, - 0x32, 0x7e, 0x1a, 0x74, 0x1f, 0xaa, 0x99, 0x63, 0x3e, 0x13, 0xf5, 0x6f, 0xb2, 0x51, 0x9b, 0x23, - 0x89, 0x4e, 0x0f, 0xf7, 0x34, 0x0b, 0x3f, 0x20, 0x7f, 0x0f, 0x01, 0x52, 0xca, 0x2f, 0xef, 0x14, - 0xe7, 0x55, 0x1e, 0x60, 0x10, 0xe2, 0x73, 0x3e, 0x72, 0x69, 0x4a, 0x2c, 0xfb, 0x93, 0x40, 0x46, - 0xe2, 0x39, 0x3d, 0x1f, 0xb4, 0xbf, 0xc2, 0xab, 0x5a, 0x47, 0xb7, 0x98, 0xed, 0x43, 0x75, 0x24, - 0x62, 0x2f, 0xf2, 0xa9, 0xc9, 0x4d, 0xd2, 0x37, 0x30, 0xa6, 0x94, 0xa7, 0xd9, 0x4d, 0x11, 0x3a, - 0x57, 0xd9, 0x3d, 0xac, 0x05, 0xcb, 0xe2, 0x2a, 0x94, 0x91, 0x32, 0xa7, 0xe8, 0x8f, 0x90, 0x1b, - 0xfa, 0x73, 0x06, 0xf5, 0x74, 0x12, 0xaf, 0x8a, 0x74, 0xc1, 0x5c, 0x28, 0x78, 0x6e, 0xa8, 0x27, - 0x70, 0xb5, 0x55, 0xff, 0xe8, 0xbc, 0x8e, 0x1b, 0xea, 0xa4, 0xb5, 0x1f, 0x60, 0xac, 0xaf, 0xfe, - 0xbf, 0xb1, 0x93, 0x19, 0xbb, 0x97, 0xf2, 0x6c, 0xbe, 0x47, 0xfd, 0x72, 0xe1, 0xab, 0xbd, 0x99, - 0xf2, 0xa7, 0x7b, 0x6e, 0xe8, 0x23, 0x1d, 0x6e, 0xec, 0x77, 0x39, 0x51, 0xaf, 0x3d, 0x81, 0xda, - 0xc7, 0x7e, 0x7f, 0x4d, 0x0d, 0xd6, 0x1e, 0x81, 0xbd, 0xf0, 0xe3, 0xfb, 0x36, 0x56, 0xb2, 0xc5, - 0xbb, 0x0b, 0xd5, 0x4c, 0xdc, 0x08, 0x3c, 0x25, 0xa0, 0xce, 0xbe, 0x5e, 0x38, 0xaf, 0xf0, 0x0b, - 0x28, 0x99, 0x81, 0xbf, 0x02, 0x38, 0x57, 0x2a, 0x7c, 0x4e, 0x43, 0xd1, 0x1c, 0x62, 0xa3, 0x86, - 0x10, 0x6c, 0x03, 0xaa, 0xb8, 0x88, 0x8d, 0x5d, 0x7b, 0x4a, 0x3b, 0x62, 0x0d, 0xf8, 0x25, 0xd8, - 0xe3, 0xc5, 0x76, 0x3d, 0xcc, 0x2a, 0xe3, 0x64, 0xf7, 0x2f, 0xa0, 0x12, 0x48, 0x63, 0xd3, 0x33, - 0xba, 0x1c, 0x48, 0x32, 0x39, 0x3b, 0xf0, 0xb3, 0x4f, 0x3e, 0xd7, 0xd8, 0x1d, 0x28, 0x8d, 0xfd, - 0xa9, 0xa2, 0xeb, 0x8a, 0x63, 0xdf, 0xac, 0x9c, 0xff, 0xe6, 0x00, 0xd2, 0xab, 0x85, 0x19, 0xc1, - 0x7b, 0x87, 0x98, 0x65, 0x7d, 0xcf, 0xa6, 0x50, 0xb9, 0x34, 0x15, 0x34, 0x7d, 0xb4, 0xfe, 0xe1, - 0x75, 0x6c, 0x26, 0x05, 0xd6, 0xb5, 0x6d, 0x99, 0xda, 0x7e, 0xcd, 0x27, 0xd5, 0xe2, 0x84, 0xb5, - 0x67, 0xb0, 0xf2, 0x01, 0xdd, 0x17, 0xde, 0xd4, 0xb4, 0xcb, 0xb2, 0x25, 0xbb, 0x0f, 0x25, 0xfd, - 0xb9, 0x81, 0xef, 0x36, 0x4a, 0x86, 0x86, 0x64, 0x9a, 0x2d, 0xc7, 0xc9, 0xc7, 0x67, 0xff, 0x78, - 0x7b, 0x0b, 0xca, 0xe6, 0x33, 0x8a, 0xd9, 0x50, 0xfc, 0xf3, 0xd1, 0xb0, 0x77, 0x52, 0x5b, 0x62, - 0x15, 0x28, 0x1c, 0x0c, 0x86, 0x27, 0xb5, 0x1c, 0x4a, 0x47, 0x83, 0xa3, 0x5e, 0xcd, 0xda, 0xfe, - 0x03, 0xd8, 0x8b, 0x71, 0x8f, 0xea, 0x76, 0xff, 0xa8, 0x5b, 0x5b, 0x62, 0x00, 0xa5, 0x61, 0xaf, - 0xc3, 0x7b, 0x08, 0x2e, 0x43, 0x7e, 0x38, 0x3c, 0xa8, 0x59, 0x48, 0xd5, 0xd9, 0xef, 0x1c, 0xf4, - 0x6a, 0x79, 0x14, 0x4f, 0x0e, 0x8f, 0x9f, 0x0e, 0x6b, 0x85, 0xed, 0x87, 0x70, 0xe3, 0xa3, 0x71, - 0x4b, 0xbb, 0x0f, 0xf6, 0x79, 0x0f, 0x99, 0xaa, 0x50, 0x3e, 0xe6, 0xfd, 0xd3, 0xfd, 0x93, 0x5e, - 0x2d, 0x87, 0x86, 0x3f, 0x0d, 0x3a, 0xcf, 0x7a, 0xdd, 0x9a, 0xd5, 0x5e, 0x7f, 0x7d, 0xdd, 0xc8, - 0xbd, 0xb9, 0x6e, 0xe4, 0xde, 0x5e, 0x37, 0x72, 0xdf, 0x5c, 0x37, 0x72, 0x7f, 0x7b, 0xdf, 0x58, - 0x7a, 0xf3, 0xbe, 0xb1, 0xf4, 0xf6, 0x7d, 0x63, 0xe9, 0xac, 0x44, 0x7f, 0x55, 0x1e, 0x7c, 0x1b, - 0x00, 0x00, 0xff, 0xff, 0x26, 0x19, 0xc9, 0x11, 0xea, 0x0c, 0x00, 0x00, +func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_68b9efdb358e5df5) } + +var fileDescriptor_ops_68b9efdb358e5df5 = []byte{ + // 1851 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6e, 0x23, 0xc7, + 0x11, 0x16, 0x87, 0xff, 0x45, 0x49, 0xcb, 0xb4, 0xed, 0x0d, 0xa3, 0x6c, 0x24, 0x79, 0xec, 0x18, + 0xf4, 0xfe, 0x50, 0x80, 0x0c, 0xd8, 0x86, 0x0f, 0x41, 0x24, 0x91, 0x0b, 0x31, 0xce, 0x8a, 0x42, + 0x73, 0x7f, 0x8e, 0x8b, 0xd1, 0x4c, 0x93, 0x1a, 0x90, 0x9c, 0x1e, 0xf4, 0x34, 0x57, 0xe2, 0x25, + 0x01, 0xf6, 0x09, 0x02, 0x04, 0xc8, 0x3d, 0xc7, 0x3c, 0x44, 0xee, 0x3e, 0x1a, 0x41, 0x0e, 0x4e, + 0x0e, 0x4e, 0xa0, 0x7d, 0x91, 0xa0, 0xaa, 0x7b, 0x38, 0x43, 0xad, 0x82, 0xd5, 0x22, 0x40, 0x4e, + 0xac, 0xae, 0xfa, 0xba, 0xba, 0xba, 0xaa, 0xba, 0xaa, 0x86, 0x50, 0x97, 0x71, 0xd2, 0x89, 0x95, + 0xd4, 0x92, 0x39, 0xf1, 0xd9, 0xd6, 0xa3, 0x71, 0xa8, 0xcf, 0xe7, 0x67, 0x1d, 0x5f, 0xce, 0xf6, + 0xc6, 0x72, 0x2c, 0xf7, 0x48, 0x74, 0x36, 0x1f, 0xd1, 0x8a, 0x16, 0x44, 0x99, 0x2d, 0xee, 0x9f, + 0x1d, 0x70, 0x06, 0x31, 0xfb, 0x18, 0x2a, 0x61, 0x14, 0xcf, 0x75, 0xd2, 0x2a, 0xec, 0x16, 0xdb, + 0x8d, 0xfd, 0x7a, 0x27, 0x3e, 0xeb, 0xf4, 0x91, 0xc3, 0xad, 0x80, 0xed, 0x42, 0x49, 0x5c, 0x0a, + 0xbf, 0xe5, 0xec, 0x16, 0xda, 0x8d, 0x7d, 0x40, 0x40, 0xef, 0x52, 0xf8, 0x83, 0xf8, 0x78, 0x8d, + 0x93, 0x84, 0x7d, 0x06, 0x95, 0x44, 0xce, 0x95, 0x2f, 0x5a, 0x45, 0xc2, 0xac, 0x23, 0x66, 0x48, + 0x1c, 0x42, 0x59, 0x29, 0x6a, 0x1a, 0x85, 0x53, 0xd1, 0x2a, 0x65, 0x9a, 0x1e, 0x87, 0x53, 0x83, + 0x21, 0x09, 0xfb, 0x04, 0xca, 0x67, 0xf3, 0x70, 0x1a, 0xb4, 0xca, 0x04, 0x69, 0x20, 0xe4, 0x10, + 0x19, 0x84, 0x31, 0x32, 0xd6, 0x86, 0x5a, 0x3c, 0xf5, 0xf4, 0x48, 0xaa, 0x59, 0x0b, 0xb2, 0x03, + 0x4f, 0x2d, 0x8f, 0x2f, 0xa5, 0xec, 0x2b, 0x68, 0xf8, 0x32, 0x4a, 0xb4, 0xf2, 0xc2, 0x48, 0x27, + 0xad, 0x06, 0x81, 0x3f, 0x42, 0xf0, 0x0b, 0xa9, 0x26, 0x42, 0x1d, 0x65, 0x42, 0x9e, 0x47, 0x1e, + 0x96, 0xc0, 0x91, 0xb1, 0xfb, 0xa7, 0x02, 0xd4, 0x52, 0xad, 0xcc, 0x85, 0xf5, 0x03, 0xe5, 0x9f, + 0x87, 0x5a, 0xf8, 0x7a, 0xae, 0x44, 0xab, 0xb0, 0x5b, 0x68, 0xd7, 0xf9, 0x0a, 0x8f, 0x6d, 0x82, + 0x33, 0x18, 0x92, 0xa3, 0xea, 0xdc, 0x19, 0x0c, 0x59, 0x0b, 0xaa, 0xcf, 0x3d, 0x15, 0x7a, 0x91, + 0x26, 0xcf, 0xd4, 0x79, 0xba, 0x64, 0xf7, 0xa0, 0x3e, 0x18, 0x3e, 0x17, 0x2a, 0x09, 0x65, 0x44, + 0xfe, 0xa8, 0xf3, 0x8c, 0xc1, 0xb6, 0x01, 0x06, 0xc3, 0xc7, 0xc2, 0x43, 0xa5, 0x49, 0xab, 0xbc, + 0x5b, 0x6c, 0xd7, 0x79, 0x8e, 0xe3, 0xfe, 0x0e, 0xca, 0x14, 0x23, 0xf6, 0x1b, 0xa8, 0x04, 0xe1, + 0x58, 0x24, 0xda, 0x98, 0x73, 0xb8, 0xff, 0xdd, 0x8f, 0x3b, 0x6b, 0xff, 0xfc, 0x71, 0xe7, 0x7e, + 0x2e, 0x19, 0x64, 0x2c, 0x22, 0x5f, 0x46, 0xda, 0x0b, 0x23, 0xa1, 0x92, 0xbd, 0xb1, 0x7c, 0x64, + 0xb6, 0x74, 0xba, 0xf4, 0xc3, 0xad, 0x06, 0xf6, 0x39, 0x94, 0xc3, 0x28, 0x10, 0x97, 0x64, 0x7f, + 0xf1, 0xf0, 0x03, 0xab, 0xaa, 0x31, 0x98, 0xeb, 0x78, 0xae, 0xfb, 0x28, 0xe2, 0x06, 0xe1, 0xc6, + 0x50, 0x31, 0x29, 0xc0, 0xee, 0x41, 0x69, 0x26, 0xb4, 0x47, 0xc7, 0x37, 0xf6, 0x6b, 0xe8, 0xda, + 0x27, 0x42, 0x7b, 0x9c, 0xb8, 0x98, 0x5d, 0x33, 0x39, 0x47, 0xd7, 0x3b, 0x59, 0x76, 0x3d, 0x41, + 0x0e, 0xb7, 0x02, 0xf6, 0x4b, 0xa8, 0x46, 0x42, 0x5f, 0x48, 0x35, 0x21, 0x17, 0x6d, 0x9a, 0x98, + 0x9f, 0x08, 0xfd, 0x44, 0x06, 0x82, 0xa7, 0x32, 0xf7, 0x2f, 0x05, 0x28, 0xa1, 0x62, 0xc6, 0xa0, + 0xe4, 0xa9, 0xb1, 0x49, 0xd7, 0x3a, 0x27, 0x9a, 0x35, 0xa1, 0x28, 0xa2, 0x57, 0x74, 0x46, 0x9d, + 0x23, 0x89, 0x1c, 0xff, 0x22, 0xb0, 0x4e, 0x47, 0x12, 0xf7, 0xcd, 0x13, 0xa1, 0xac, 0xaf, 0x89, + 0x66, 0x9f, 0x43, 0x3d, 0x56, 0xf2, 0x72, 0xf1, 0x12, 0x77, 0x97, 0x73, 0x99, 0x84, 0xcc, 0x5e, + 0xf4, 0x8a, 0xd7, 0x62, 0x4b, 0xb1, 0xfb, 0x00, 0xe2, 0x52, 0x2b, 0xef, 0x58, 0x26, 0x3a, 0x69, + 0x55, 0xe8, 0x36, 0x94, 0xc0, 0xc8, 0xe8, 0x9f, 0xf2, 0x9c, 0xd4, 0xfd, 0x9b, 0x03, 0x65, 0xba, + 0x24, 0x6b, 0xa3, 0x4b, 0xe3, 0xb9, 0x89, 0x4e, 0xf1, 0x90, 0x59, 0x97, 0x02, 0x05, 0x6f, 0xe9, + 0x51, 0x0c, 0xe4, 0x16, 0xd4, 0x12, 0x31, 0x15, 0xbe, 0x96, 0xca, 0xe6, 0xcf, 0x72, 0x8d, 0xa6, + 0x07, 0x18, 0x62, 0x73, 0x1b, 0xa2, 0xd9, 0x03, 0xa8, 0x48, 0x8a, 0x0b, 0x5d, 0xe8, 0xbf, 0x44, + 0xcb, 0x42, 0x50, 0xb9, 0x12, 0x5e, 0x20, 0xa3, 0xe9, 0x82, 0xae, 0x59, 0xe3, 0xcb, 0x35, 0x7b, + 0x00, 0x75, 0x8a, 0xc4, 0xd3, 0x45, 0x2c, 0x5a, 0x15, 0x8a, 0xc0, 0xc6, 0x32, 0x4a, 0xc8, 0xe4, + 0x99, 0x1c, 0x5f, 0x9e, 0xef, 0xf9, 0xe7, 0x62, 0x10, 0xeb, 0xd6, 0x87, 0x99, 0xbf, 0x8e, 0x2c, + 0x8f, 0x2f, 0xa5, 0xa8, 0x36, 0x11, 0xbe, 0x12, 0x1a, 0xa1, 0x1f, 0x11, 0x94, 0xd4, 0x0e, 0x53, + 0x26, 0xcf, 0xe4, 0xcc, 0x85, 0xca, 0x70, 0x78, 0x8c, 0xc8, 0xbb, 0x59, 0x65, 0x30, 0x1c, 0x6e, + 0x25, 0x6e, 0x1f, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0xfa, 0x5d, 0xfb, 0x00, 0x9d, 0x7e, 0x97, 0x3d, + 0x82, 0x6a, 0x72, 0xee, 0xa9, 0x30, 0x1a, 0x93, 0xef, 0x36, 0xf7, 0x3f, 0x58, 0x5a, 0x35, 0x34, + 0x7c, 0xd4, 0x94, 0x62, 0x5c, 0x09, 0xf5, 0xa5, 0x19, 0x6f, 0xe9, 0x6a, 0x42, 0x71, 0x1e, 0x06, + 0xa4, 0x67, 0x83, 0x23, 0x89, 0x9c, 0x71, 0x68, 0x72, 0x69, 0x83, 0x23, 0x89, 0x01, 0x99, 0xc9, + 0xc0, 0xd4, 0xb1, 0x0d, 0x4e, 0x34, 0xfa, 0x58, 0xc6, 0x3a, 0x94, 0x91, 0x37, 0x4d, 0x7d, 0x9c, + 0xae, 0xdd, 0x69, 0x7a, 0xbf, 0xff, 0xcb, 0x69, 0x7f, 0x2c, 0x40, 0x2d, 0x2d, 0xbe, 0x58, 0x49, + 0xc2, 0x40, 0x44, 0x3a, 0x1c, 0x85, 0x42, 0xd9, 0x83, 0x73, 0x1c, 0xf6, 0x08, 0xca, 0x9e, 0xd6, + 0x2a, 0x7d, 0xa0, 0x3f, 0xcd, 0x57, 0xee, 0xce, 0x01, 0x4a, 0x7a, 0x91, 0x56, 0x0b, 0x6e, 0x50, + 0x5b, 0x5f, 0x03, 0x64, 0x4c, 0xb4, 0x75, 0x22, 0x16, 0x56, 0x2b, 0x92, 0xec, 0x43, 0x28, 0xbf, + 0xf2, 0xa6, 0x73, 0x61, 0x73, 0xd8, 0x2c, 0xbe, 0x71, 0xbe, 0x2e, 0xb8, 0x7f, 0x75, 0xa0, 0x6a, + 0x2b, 0x39, 0x7b, 0x08, 0x55, 0xaa, 0xe4, 0xd6, 0xa2, 0x9b, 0x1f, 0x46, 0x0a, 0x61, 0x7b, 0xcb, + 0x16, 0x95, 0xb3, 0xd1, 0xaa, 0x32, 0xad, 0xca, 0xda, 0x98, 0x35, 0xac, 0x62, 0x20, 0x46, 0xb6, + 0x17, 0x6d, 0x22, 0xba, 0x2b, 0x46, 0x61, 0x14, 0xa2, 0x7f, 0x38, 0x8a, 0xd8, 0xc3, 0xf4, 0xd6, + 0x25, 0xd2, 0x78, 0x37, 0xaf, 0xf1, 0xed, 0x4b, 0xf7, 0xa1, 0x91, 0x3b, 0xe6, 0x86, 0x5b, 0x7f, + 0x9a, 0xbf, 0xb5, 0x3d, 0x92, 0xd4, 0x99, 0x46, 0x9a, 0x79, 0xe1, 0x7f, 0xf0, 0xdf, 0x97, 0x00, + 0x99, 0xca, 0xdb, 0x17, 0x16, 0xf7, 0x75, 0x11, 0x60, 0x10, 0x63, 0xe9, 0x0c, 0x3c, 0xaa, 0xc8, + 0xeb, 0xe1, 0x38, 0x92, 0x4a, 0xbc, 0xa4, 0xa7, 0x4a, 0xfb, 0x6b, 0xbc, 0x61, 0x78, 0xf4, 0x62, + 0xd8, 0x01, 0x34, 0x02, 0x91, 0xf8, 0x2a, 0xa4, 0x84, 0xb2, 0x4e, 0xdf, 0xc1, 0x3b, 0x65, 0x7a, + 0x3a, 0xdd, 0x0c, 0x61, 0x7c, 0x95, 0xdf, 0xc3, 0xf6, 0x61, 0x5d, 0x5c, 0xc6, 0x52, 0x69, 0x7b, + 0x8a, 0x69, 0xf8, 0x77, 0xcc, 0xe8, 0x80, 0x7c, 0x3a, 0x89, 0x37, 0x44, 0xb6, 0x60, 0x1e, 0x94, + 0x7c, 0x2f, 0x36, 0xdd, 0xae, 0xb1, 0xdf, 0xba, 0x76, 0xde, 0x91, 0x17, 0x1b, 0xa7, 0x1d, 0x7e, + 0x81, 0x77, 0x7d, 0xfd, 0xaf, 0x9d, 0x07, 0xb9, 0x16, 0x37, 0x93, 0x67, 0x8b, 0x3d, 0xca, 0x97, + 0x49, 0xa8, 0xf7, 0xe6, 0x3a, 0x9c, 0xee, 0x79, 0x71, 0x88, 0xea, 0x70, 0x63, 0xbf, 0xcb, 0x49, + 0xf5, 0xd6, 0xaf, 0xa0, 0x79, 0xdd, 0xee, 0xf7, 0x89, 0xc1, 0xd6, 0x57, 0x50, 0x5f, 0xda, 0xf1, + 0xae, 0x8d, 0xb5, 0x7c, 0xf0, 0x3e, 0x81, 0x46, 0xee, 0xde, 0x08, 0x7c, 0x4e, 0x40, 0xe3, 0x7d, + 0xb3, 0x70, 0x5f, 0xe3, 0xb4, 0x91, 0xf6, 0x9b, 0x5f, 0x00, 0x9c, 0x6b, 0x1d, 0xbf, 0xa4, 0x06, + 0x64, 0x0f, 0xa9, 0x23, 0x87, 0x10, 0x6c, 0x07, 0x1a, 0xb8, 0x48, 0xac, 0xdc, 0x58, 0x4a, 0x3b, + 0x12, 0x03, 0xf8, 0x39, 0xd4, 0x47, 0xcb, 0xed, 0xa6, 0x71, 0xd4, 0x46, 0xe9, 0xee, 0x9f, 0x41, + 0x2d, 0x92, 0x56, 0x66, 0xfa, 0x61, 0x35, 0x92, 0x24, 0x72, 0x1f, 0xc0, 0x4f, 0xde, 0x1a, 0x8d, + 0xd8, 0x5d, 0xa8, 0x8c, 0xc2, 0xa9, 0xa6, 0xe7, 0x8a, 0x2d, 0xd6, 0xae, 0xdc, 0x7f, 0x14, 0x00, + 0xb2, 0xa7, 0x85, 0x1e, 0xc1, 0x77, 0x87, 0x98, 0x75, 0xf3, 0xce, 0xa6, 0x50, 0x9b, 0xd9, 0x08, + 0xda, 0x3c, 0xba, 0xb7, 0xfa, 0x1c, 0x3b, 0x69, 0x80, 0x4d, 0x6c, 0xf7, 0x6d, 0x6c, 0xdf, 0x67, + 0x7c, 0x59, 0x9e, 0xb0, 0xf5, 0x2d, 0x6c, 0xac, 0xa8, 0xbb, 0xe5, 0x4b, 0xcd, 0xb2, 0x2c, 0x1f, + 0xb2, 0x87, 0x50, 0x31, 0xad, 0x1d, 0xeb, 0x2f, 0x52, 0x56, 0x0d, 0xd1, 0x54, 0xc7, 0x4f, 0xd3, + 0x41, 0xaf, 0x7f, 0xea, 0xee, 0x43, 0xc5, 0x4c, 0xb2, 0xac, 0x0d, 0x55, 0xcf, 0xc7, 0xab, 0xa5, + 0xe5, 0x6a, 0x33, 0x1d, 0x73, 0x0f, 0x88, 0xcd, 0x53, 0xb1, 0xfb, 0x77, 0x07, 0x20, 0xe3, 0xbf, + 0xc7, 0xac, 0xf0, 0x0d, 0x6c, 0x26, 0xc2, 0x97, 0x51, 0xe0, 0xa9, 0x05, 0x49, 0xed, 0xc4, 0x76, + 0xd3, 0x96, 0x6b, 0xc8, 0xdc, 0xdc, 0x50, 0x7c, 0xf7, 0xdc, 0xd0, 0x86, 0x92, 0x2f, 0xe3, 0x85, + 0x7d, 0xbe, 0x6c, 0xf5, 0x22, 0x47, 0x32, 0x5e, 0xe0, 0xdc, 0x8e, 0x08, 0xd6, 0x81, 0xca, 0x6c, + 0x42, 0xb3, 0xbd, 0x19, 0xa3, 0x3e, 0x5c, 0xc5, 0x3e, 0x99, 0x20, 0x8d, 0x5f, 0x02, 0x06, 0xc5, + 0x1e, 0x40, 0x79, 0x36, 0x09, 0x42, 0x45, 0x13, 0x47, 0xc3, 0xf4, 0xeb, 0x3c, 0xbc, 0x1b, 0x2a, + 0x9c, 0xf7, 0x09, 0xc3, 0x5c, 0x70, 0xd4, 0xac, 0x55, 0x25, 0x64, 0xf3, 0x9a, 0x37, 0x67, 0xc7, + 0x6b, 0xdc, 0x51, 0xb3, 0xc3, 0x1a, 0x54, 0x8c, 0x5f, 0xdd, 0x2b, 0x07, 0x36, 0x57, 0xad, 0xc4, + 0x3c, 0x48, 0x94, 0x9f, 0xe6, 0x41, 0xa2, 0xfc, 0xe5, 0x48, 0xe5, 0xe4, 0x46, 0x2a, 0x17, 0xca, + 0xf2, 0x22, 0xb2, 0x23, 0x62, 0x3a, 0xd9, 0x9c, 0xcb, 0x8b, 0x08, 0x87, 0x07, 0x23, 0x5a, 0xf6, + 0x62, 0xbc, 0x65, 0xd9, 0xf6, 0xe2, 0x4f, 0x61, 0x63, 0x24, 0xa7, 0x53, 0x79, 0x31, 0x5c, 0xcc, + 0xa6, 0x61, 0x34, 0xa1, 0x3b, 0xd5, 0xf8, 0x2a, 0x93, 0xb5, 0xe1, 0x4e, 0x10, 0x2a, 0x34, 0xe7, + 0x48, 0x46, 0x5a, 0xe0, 0x4c, 0x5c, 0x25, 0xdc, 0x75, 0x36, 0xea, 0xf3, 0xb4, 0x16, 0xb3, 0x58, + 0x3f, 0x8b, 0x62, 0xcf, 0x9f, 0xb4, 0x6a, 0x46, 0xdf, 0x0a, 0x93, 0x7d, 0x06, 0x9b, 0xbe, 0x12, + 0x9e, 0x16, 0x5d, 0x91, 0xe8, 0x53, 0x4f, 0x9f, 0xb7, 0xea, 0x04, 0xbb, 0xc6, 0x25, 0x6d, 0x68, + 0xc7, 0x8b, 0x70, 0x1a, 0xf8, 0x9e, 0x0a, 0xe8, 0x8b, 0x09, 0xb5, 0xe5, 0x99, 0xac, 0x03, 0x8c, + 0x18, 0xbd, 0x59, 0xac, 0x17, 0x4b, 0x68, 0x83, 0xa0, 0x37, 0x48, 0x5c, 0x05, 0xcd, 0xeb, 0xd1, + 0x45, 0xdf, 0xc4, 0x68, 0x87, 0x7d, 0x27, 0x48, 0x2f, 0xfd, 0xe5, 0xe4, 0xfc, 0x85, 0xbe, 0xc7, + 0x82, 0x80, 0x09, 0xb8, 0xce, 0x89, 0xbe, 0x8d, 0xef, 0xdd, 0xdf, 0xc3, 0x9d, 0x6b, 0x29, 0x72, + 0xeb, 0x23, 0x77, 0xa1, 0x31, 0xf3, 0x26, 0xe2, 0xd4, 0x53, 0xe4, 0xf8, 0xa2, 0x69, 0x7a, 0x39, + 0xd6, 0xad, 0x0c, 0x88, 0x60, 0x3d, 0x9f, 0x79, 0x37, 0x9e, 0x9e, 0xba, 0xfb, 0x44, 0xea, 0xc7, + 0x72, 0x1e, 0x05, 0xb6, 0x17, 0xac, 0x32, 0xdf, 0x0e, 0x4a, 0xf1, 0x86, 0xa0, 0xb8, 0x27, 0x50, + 0x4b, 0x4d, 0x60, 0x3b, 0xf6, 0xf3, 0xa5, 0x90, 0x7d, 0x17, 0x3f, 0x4b, 0x84, 0x42, 0xeb, 0xcc, + 0xb7, 0xcc, 0xc7, 0x50, 0x1e, 0x2b, 0x39, 0x8f, 0x6d, 0x65, 0x5b, 0x41, 0x18, 0x89, 0xfb, 0x02, + 0xaa, 0x96, 0x83, 0xa6, 0x47, 0xde, 0x2c, 0xfd, 0x88, 0x25, 0x3a, 0x2b, 0x40, 0xce, 0xbb, 0x0a, + 0xd0, 0x26, 0x38, 0x76, 0x44, 0x2d, 0x73, 0x27, 0x0c, 0xee, 0xb7, 0xa1, 0x6a, 0x3f, 0xd8, 0x58, + 0x1d, 0xca, 0xcf, 0x4e, 0x86, 0xbd, 0xa7, 0xcd, 0x35, 0x56, 0x83, 0xd2, 0xf1, 0x60, 0xf8, 0xb4, + 0x59, 0x40, 0xea, 0x64, 0x70, 0xd2, 0x6b, 0x3a, 0xf7, 0x7f, 0x0d, 0xf5, 0xe5, 0x87, 0x05, 0xb2, + 0x0f, 0xfb, 0x27, 0xdd, 0xe6, 0x1a, 0x03, 0xa8, 0x0c, 0x7b, 0x47, 0xbc, 0x87, 0xe0, 0x2a, 0x14, + 0x87, 0xc3, 0xe3, 0xa6, 0x83, 0xaa, 0x8e, 0x0e, 0x8e, 0x8e, 0x7b, 0xcd, 0x22, 0x92, 0x4f, 0x9f, + 0x9c, 0x3e, 0x1e, 0x36, 0x4b, 0xf7, 0xbf, 0x84, 0x3b, 0xd7, 0x06, 0x7b, 0xda, 0x7d, 0x7c, 0xc0, + 0x7b, 0xa8, 0xa9, 0x01, 0xd5, 0x53, 0xde, 0x7f, 0x7e, 0xf0, 0xb4, 0xd7, 0x2c, 0xa0, 0xe0, 0xb7, + 0x83, 0xa3, 0x6f, 0x7b, 0xdd, 0xa6, 0x73, 0x78, 0xef, 0xbb, 0xab, 0xed, 0xc2, 0xf7, 0x57, 0xdb, + 0x85, 0x1f, 0xae, 0xb6, 0x0b, 0xff, 0xbe, 0xda, 0x2e, 0xfc, 0xe1, 0xcd, 0xf6, 0xda, 0xf7, 0x6f, + 0xb6, 0xd7, 0x7e, 0x78, 0xb3, 0xbd, 0x76, 0x56, 0xa1, 0x3f, 0x45, 0xbe, 0xf8, 0x4f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xd1, 0xbc, 0xef, 0x81, 0x54, 0x11, 0x00, 0x00, } diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index 09442f60c4cb..31e894f1a832 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -15,7 +15,7 @@ message Op { oneof op { ExecOp exec = 2; SourceOp source = 3; - CopyOp copy = 4; + FileOp file = 4; BuildOp build = 5; } Platform platform = 10; @@ -134,18 +134,6 @@ message SSHOpt { bool optional = 5; } -// CopyOp copies files across Ops. -message CopyOp { - repeated CopySource src = 1; - string dest = 2; -} - -// CopySource specifies a source for CopyOp. -message CopySource { - int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; - string selector = 2; -} - // SourceOp specifies a source such as build contexts and images. message SourceOp { // TODO: use source type or any type instead of URL protocol. @@ -211,4 +199,64 @@ message Definition { message HostIP { string Host = 1; string IP = 2; +} + +message FileOp { + repeated FileAction actions = 2; +} + +message FileAction { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index) + int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- + int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + oneof action { + FileActionCopy copy = 4; + FileActionMkFile mkfile = 5; + FileActionMkDir mkdir = 6; + FileActionRm rm = 7; + } +} + +message FileActionCopy { + string src = 1; + string dest = 2; + ChownOpt owner = 4; + int32 mode = 5; + bool followSymlink = 6; + bool dirCopyContents = 7; + bool attemptUnpack = 8; + bool createDestPath = 9; + bool allowWildcard = 10; + bool allowEmptyWildcard = 11; +} + +message FileActionMkFile { + string path = 1; + int32 mode = 2; + bytes data = 3; + ChownOpt owner = 4; +} + +message FileActionMkDir { + string path = 1; + int32 mode = 2; + bool makeParents = 3; + ChownOpt owner = 4; +} + +message FileActionRm { + string path = 1; + bool allowNotFound = 2; + bool allowWildcard = 3; +} + +message ChownOpt { + UserOpt user = 1; + UserOpt group = 2; +} + +message UserOpt { + string name = 1; + int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // input that contains /etc/passwd if using a name + int32 id = 3; } \ No newline at end of file diff --git a/worker/base/worker.go b/worker/base/worker.go index fe7c64fb8e78..ee93d6e34bed 100644 --- a/worker/base/worker.go +++ b/worker/base/worker.go @@ -200,6 +200,8 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w) case *pb.Op_Build: return ops.NewBuildOp(v, op, s, w) + default: + return nil, errors.Errorf("no support for %T", op) } } return nil, errors.Errorf("could not resolve %v", v) From 431d11dda35b90ea231beaceb6de588361f565ab Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 1 Feb 2019 10:52:53 -0800 Subject: [PATCH 02/25] llb: add timestamp override to fileop Signed-off-by: Tonis Tiigi --- client/llb/fileop.go | 42 +++- client/llb/fileop_test.go | 46 +++++ solver/pb/ops.pb.go | 413 ++++++++++++++++++++++++-------------- solver/pb/ops.proto | 19 +- 4 files changed, 354 insertions(+), 166 deletions(-) diff --git a/client/llb/fileop.go b/client/llb/fileop.go index 82fc87c655da..64f6ea312860 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -4,6 +4,7 @@ import ( _ "crypto/sha256" "os" "path" + "time" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" @@ -149,6 +150,7 @@ func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.Is Mode: int32(a.mode & 0777), MakeParents: a.info.MakeParents, Owner: a.info.ChownOpt.marshal(base), + Timestamp: marshalTime(a.info.CreatedTime), }, } } @@ -180,6 +182,7 @@ func WithParents(b bool) MkdirOption { type MkdirInfo struct { MakeParents bool ChownOpt *ChownOpt + CreatedTime *time.Time } func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) { @@ -260,7 +263,8 @@ type MkfileOption interface { } type MkfileInfo struct { - ChownOpt *ChownOpt + ChownOpt *ChownOpt + CreatedTime *time.Time } func (mi *MkfileInfo) SetMkfileOption(mi2 *MkfileInfo) { @@ -279,10 +283,11 @@ type fileActionMkfile struct { func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { return &pb.FileAction_Mkfile{ Mkfile: &pb.FileActionMkFile{ - Path: normalizePath(parent, a.file), - Mode: int32(a.mode & 0777), - Data: a.dt, - Owner: a.info.ChownOpt.marshal(base), + Path: normalizePath(parent, a.file), + Mode: int32(a.mode & 0777), + Data: a.dt, + Owner: a.info.ChownOpt.marshal(base), + Timestamp: marshalTime(a.info.CreatedTime), }, } } @@ -391,6 +396,7 @@ type CopyInfo struct { AllowWildcard bool AllowEmptyWildcard bool ChownOpt *ChownOpt + CreatedTime *time.Time } func (mi *CopyInfo) SetCopyOption(mi2 *CopyInfo) { @@ -418,6 +424,7 @@ func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsF DirCopyContents: a.info.CopyDirContentsOnly, AttemptUnpack: a.info.AttemptUnpack, CreateDestPath: a.info.CreateDestPath, + Timestamp: marshalTime(a.info.CreatedTime), } if a.info.Mode != nil { c.Mode = int32(*a.info.Mode) @@ -441,6 +448,31 @@ func (c *fileActionCopy) sourcePath() string { return p } +type CreatedTime time.Time + +func WithCreatedTime(t time.Time) CreatedTime { + return CreatedTime(t) +} + +func (c CreatedTime) SetMkdirOption(mi *MkdirInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func (c CreatedTime) SetMkfileOption(mi *MkfileInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func (c CreatedTime) SetCopyOption(mi *CopyInfo) { + mi.CreatedTime = (*time.Time)(&c) +} + +func marshalTime(t *time.Time) int64 { + if t == nil { + return -1 + } + return t.UnixNano() +} + type FileOp struct { MarshalCache action *FileAction diff --git a/client/llb/fileop_test.go b/client/llb/fileop_test.go index f0a433e945d9..76ae9b65f8e1 100644 --- a/client/llb/fileop_test.go +++ b/client/llb/fileop_test.go @@ -2,6 +2,7 @@ package llb import ( "testing" + "time" "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" @@ -39,6 +40,7 @@ func TestFileMkdir(t *testing.T) { require.Equal(t, "/foo", mkdir.Path) require.Equal(t, 0700, int(mkdir.Mode)) + require.Equal(t, int64(-1), mkdir.Timestamp) } func TestFileMkdirChain(t *testing.T) { @@ -126,6 +128,7 @@ func TestFileMkfile(t *testing.T) { require.Equal(t, "/foo", mkdir.Path) require.Equal(t, 0700, int(mkdir.Mode)) require.Equal(t, "data", string(mkdir.Data)) + require.Equal(t, int64(-1), mkdir.Timestamp) } func TestFileRm(t *testing.T) { @@ -270,6 +273,7 @@ func TestFileCopy(t *testing.T) { require.Equal(t, "/etc/foo", copy.Src) require.Equal(t, "/tmp/bar", copy.Dest) + require.Equal(t, int64(-1), copy.Timestamp) } func TestFileCopyFromAction(t *testing.T) { @@ -560,6 +564,48 @@ func TestFileCopyOwner(t *testing.T) { require.Equal(t, -1, int(copy.Owner.Group.Input)) } +func TestFileCreatedTime(t *testing.T) { + t.Parallel() + + dt := time.Now() + dt2 := time.Date(2009, time.November, 10, 23, 0, 0, 0, time.UTC) + dt3 := time.Date(2019, time.November, 10, 23, 0, 0, 0, time.UTC) + + st := Image("foo").File( + Mkdir("/foo", 0700, WithCreatedTime(dt)). + Mkfile("bar", 0600, []byte{}, WithCreatedTime(dt2)). + Copy(Scratch(), "src", "dst", WithCreatedTime(dt3))) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 3, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[1]) + + f := arr[1].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 3, len(f.Actions)) + + action := f.Actions[0] + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + require.Equal(t, dt.UnixNano(), mkdir.Timestamp) + + action = f.Actions[1] + mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile + require.Equal(t, dt2.UnixNano(), mkfile.Timestamp) + + action = f.Actions[2] + copy := action.Action.(*pb.FileAction_Copy).Copy + require.Equal(t, dt3.UnixNano(), copy.Timestamp) +} + func parseDef(t *testing.T, def [][]byte) (map[digest.Digest]pb.Op, []pb.Op) { m := map[digest.Digest]pb.Op{} arr := make([]pb.Op, 0, len(def)) diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 8ec78919836d..372b94a4e76d 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -54,7 +54,7 @@ func (x NetMode) String() string { return proto.EnumName(NetMode_name, int32(x)) } func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{0} + return fileDescriptor_ops_5b0fc877b949f769, []int{0} } // MountType defines a type of a mount from a supported set @@ -87,7 +87,7 @@ func (x MountType) String() string { return proto.EnumName(MountType_name, int32(x)) } func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{1} + return fileDescriptor_ops_5b0fc877b949f769, []int{1} } // CacheSharingOpt defines different sharing modes for cache mount @@ -117,7 +117,7 @@ func (x CacheSharingOpt) String() string { return proto.EnumName(CacheSharingOpt_name, int32(x)) } func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{2} + return fileDescriptor_ops_5b0fc877b949f769, []int{2} } // Op represents a vertex of the LLB DAG. @@ -138,7 +138,7 @@ func (m *Op) Reset() { *m = Op{} } func (m *Op) String() string { return proto.CompactTextString(m) } func (*Op) ProtoMessage() {} func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{0} + return fileDescriptor_ops_5b0fc877b949f769, []int{0} } func (m *Op) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -368,7 +368,7 @@ func (m *Platform) Reset() { *m = Platform{} } func (m *Platform) String() string { return proto.CompactTextString(m) } func (*Platform) ProtoMessage() {} func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{1} + return fileDescriptor_ops_5b0fc877b949f769, []int{1} } func (m *Platform) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{2} + return fileDescriptor_ops_5b0fc877b949f769, []int{2} } func (m *Input) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *ExecOp) Reset() { *m = ExecOp{} } func (m *ExecOp) String() string { return proto.CompactTextString(m) } func (*ExecOp) ProtoMessage() {} func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{3} + return fileDescriptor_ops_5b0fc877b949f769, []int{3} } func (m *ExecOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -538,7 +538,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{4} + return fileDescriptor_ops_5b0fc877b949f769, []int{4} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +622,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{5} + return fileDescriptor_ops_5b0fc877b949f769, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{6} + return fileDescriptor_ops_5b0fc877b949f769, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +766,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{7} + return fileDescriptor_ops_5b0fc877b949f769, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +845,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{8} + return fileDescriptor_ops_5b0fc877b949f769, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -918,7 +918,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{9} + return fileDescriptor_ops_5b0fc877b949f769, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -970,7 +970,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{10} + return fileDescriptor_ops_5b0fc877b949f769, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1025,7 +1025,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{11} + return fileDescriptor_ops_5b0fc877b949f769, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1066,7 +1066,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{12} + return fileDescriptor_ops_5b0fc877b949f769, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1127,7 +1127,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{13} + return fileDescriptor_ops_5b0fc877b949f769, []int{13} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1170,7 +1170,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{14} + return fileDescriptor_ops_5b0fc877b949f769, []int{14} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,7 +1232,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{15} + return fileDescriptor_ops_5b0fc877b949f769, []int{15} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1277,7 +1277,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{16} + return fileDescriptor_ops_5b0fc877b949f769, []int{16} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,7 +1325,7 @@ func (m *HostIP) Reset() { *m = HostIP{} } func (m *HostIP) String() string { return proto.CompactTextString(m) } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{17} + return fileDescriptor_ops_5b0fc877b949f769, []int{17} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1372,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{18} + return fileDescriptor_ops_5b0fc877b949f769, []int{18} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1420,7 +1420,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{19} + return fileDescriptor_ops_5b0fc877b949f769, []int{19} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1627,13 +1627,14 @@ type FileActionCopy struct { CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` + Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{20} + return fileDescriptor_ops_5b0fc877b949f769, []int{20} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1728,18 +1729,26 @@ func (m *FileActionCopy) GetAllowEmptyWildcard() bool { return false } +func (m *FileActionCopy) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + type FileActionMkFile struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{21} + return fileDescriptor_ops_5b0fc877b949f769, []int{21} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1792,18 +1801,26 @@ func (m *FileActionMkFile) GetOwner() *ChownOpt { return nil } +func (m *FileActionMkFile) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + type FileActionMkDir struct { Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{22} + return fileDescriptor_ops_5b0fc877b949f769, []int{22} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1856,6 +1873,13 @@ func (m *FileActionMkDir) GetOwner() *ChownOpt { return nil } +func (m *FileActionMkDir) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + type FileActionRm struct { Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` @@ -1866,7 +1890,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{23} + return fileDescriptor_ops_5b0fc877b949f769, []int{23} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1921,7 +1945,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{24} + return fileDescriptor_ops_5b0fc877b949f769, []int{24} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1970,7 +1994,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_68b9efdb358e5df5, []int{25} + return fileDescriptor_ops_5b0fc877b949f769, []int{25} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3251,6 +3275,11 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { } i++ } + if m.Timestamp != 0 { + dAtA[i] = 0x60 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + } return i, nil } @@ -3296,6 +3325,11 @@ func (m *FileActionMkFile) MarshalTo(dAtA []byte) (int, error) { } i += n23 } + if m.Timestamp != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + } return i, nil } @@ -3345,6 +3379,11 @@ func (m *FileActionMkDir) MarshalTo(dAtA []byte) (int, error) { } i += n24 } + if m.Timestamp != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) + } return i, nil } @@ -4080,6 +4119,9 @@ func (m *FileActionCopy) Size() (n int) { if m.AllowEmptyWildcard { n += 2 } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } return n } @@ -4104,6 +4146,9 @@ func (m *FileActionMkFile) Size() (n int) { l = m.Owner.Size() n += 1 + l + sovOps(uint64(l)) } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } return n } @@ -4127,6 +4172,9 @@ func (m *FileActionMkDir) Size() (n int) { l = m.Owner.Size() n += 1 + l + sovOps(uint64(l)) } + if m.Timestamp != 0 { + n += 1 + sovOps(uint64(m.Timestamp)) + } return n } @@ -7962,6 +8010,25 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.AllowEmptyWildcard = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -8124,6 +8191,25 @@ func (m *FileActionMkFile) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -8275,6 +8361,25 @@ func (m *FileActionMkDir) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -8753,124 +8858,126 @@ var ( ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_68b9efdb358e5df5) } - -var fileDescriptor_ops_68b9efdb358e5df5 = []byte{ - // 1851 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6e, 0x23, 0xc7, - 0x11, 0x16, 0x87, 0xff, 0x45, 0x49, 0xcb, 0xb4, 0xed, 0x0d, 0xa3, 0x6c, 0x24, 0x79, 0xec, 0x18, - 0xf4, 0xfe, 0x50, 0x80, 0x0c, 0xd8, 0x86, 0x0f, 0x41, 0x24, 0x91, 0x0b, 0x31, 0xce, 0x8a, 0x42, - 0x73, 0x7f, 0x8e, 0x8b, 0xd1, 0x4c, 0x93, 0x1a, 0x90, 0x9c, 0x1e, 0xf4, 0x34, 0x57, 0xe2, 0x25, - 0x01, 0xf6, 0x09, 0x02, 0x04, 0xc8, 0x3d, 0xc7, 0x3c, 0x44, 0xee, 0x3e, 0x1a, 0x41, 0x0e, 0x4e, - 0x0e, 0x4e, 0xa0, 0x7d, 0x91, 0xa0, 0xaa, 0x7b, 0x38, 0x43, 0xad, 0x82, 0xd5, 0x22, 0x40, 0x4e, - 0xac, 0xae, 0xfa, 0xba, 0xba, 0xba, 0xaa, 0xba, 0xaa, 0x86, 0x50, 0x97, 0x71, 0xd2, 0x89, 0x95, - 0xd4, 0x92, 0x39, 0xf1, 0xd9, 0xd6, 0xa3, 0x71, 0xa8, 0xcf, 0xe7, 0x67, 0x1d, 0x5f, 0xce, 0xf6, - 0xc6, 0x72, 0x2c, 0xf7, 0x48, 0x74, 0x36, 0x1f, 0xd1, 0x8a, 0x16, 0x44, 0x99, 0x2d, 0xee, 0x9f, - 0x1d, 0x70, 0x06, 0x31, 0xfb, 0x18, 0x2a, 0x61, 0x14, 0xcf, 0x75, 0xd2, 0x2a, 0xec, 0x16, 0xdb, - 0x8d, 0xfd, 0x7a, 0x27, 0x3e, 0xeb, 0xf4, 0x91, 0xc3, 0xad, 0x80, 0xed, 0x42, 0x49, 0x5c, 0x0a, - 0xbf, 0xe5, 0xec, 0x16, 0xda, 0x8d, 0x7d, 0x40, 0x40, 0xef, 0x52, 0xf8, 0x83, 0xf8, 0x78, 0x8d, - 0x93, 0x84, 0x7d, 0x06, 0x95, 0x44, 0xce, 0x95, 0x2f, 0x5a, 0x45, 0xc2, 0xac, 0x23, 0x66, 0x48, - 0x1c, 0x42, 0x59, 0x29, 0x6a, 0x1a, 0x85, 0x53, 0xd1, 0x2a, 0x65, 0x9a, 0x1e, 0x87, 0x53, 0x83, - 0x21, 0x09, 0xfb, 0x04, 0xca, 0x67, 0xf3, 0x70, 0x1a, 0xb4, 0xca, 0x04, 0x69, 0x20, 0xe4, 0x10, - 0x19, 0x84, 0x31, 0x32, 0xd6, 0x86, 0x5a, 0x3c, 0xf5, 0xf4, 0x48, 0xaa, 0x59, 0x0b, 0xb2, 0x03, - 0x4f, 0x2d, 0x8f, 0x2f, 0xa5, 0xec, 0x2b, 0x68, 0xf8, 0x32, 0x4a, 0xb4, 0xf2, 0xc2, 0x48, 0x27, - 0xad, 0x06, 0x81, 0x3f, 0x42, 0xf0, 0x0b, 0xa9, 0x26, 0x42, 0x1d, 0x65, 0x42, 0x9e, 0x47, 0x1e, - 0x96, 0xc0, 0x91, 0xb1, 0xfb, 0xa7, 0x02, 0xd4, 0x52, 0xad, 0xcc, 0x85, 0xf5, 0x03, 0xe5, 0x9f, - 0x87, 0x5a, 0xf8, 0x7a, 0xae, 0x44, 0xab, 0xb0, 0x5b, 0x68, 0xd7, 0xf9, 0x0a, 0x8f, 0x6d, 0x82, - 0x33, 0x18, 0x92, 0xa3, 0xea, 0xdc, 0x19, 0x0c, 0x59, 0x0b, 0xaa, 0xcf, 0x3d, 0x15, 0x7a, 0x91, - 0x26, 0xcf, 0xd4, 0x79, 0xba, 0x64, 0xf7, 0xa0, 0x3e, 0x18, 0x3e, 0x17, 0x2a, 0x09, 0x65, 0x44, - 0xfe, 0xa8, 0xf3, 0x8c, 0xc1, 0xb6, 0x01, 0x06, 0xc3, 0xc7, 0xc2, 0x43, 0xa5, 0x49, 0xab, 0xbc, - 0x5b, 0x6c, 0xd7, 0x79, 0x8e, 0xe3, 0xfe, 0x0e, 0xca, 0x14, 0x23, 0xf6, 0x1b, 0xa8, 0x04, 0xe1, - 0x58, 0x24, 0xda, 0x98, 0x73, 0xb8, 0xff, 0xdd, 0x8f, 0x3b, 0x6b, 0xff, 0xfc, 0x71, 0xe7, 0x7e, - 0x2e, 0x19, 0x64, 0x2c, 0x22, 0x5f, 0x46, 0xda, 0x0b, 0x23, 0xa1, 0x92, 0xbd, 0xb1, 0x7c, 0x64, - 0xb6, 0x74, 0xba, 0xf4, 0xc3, 0xad, 0x06, 0xf6, 0x39, 0x94, 0xc3, 0x28, 0x10, 0x97, 0x64, 0x7f, - 0xf1, 0xf0, 0x03, 0xab, 0xaa, 0x31, 0x98, 0xeb, 0x78, 0xae, 0xfb, 0x28, 0xe2, 0x06, 0xe1, 0xc6, - 0x50, 0x31, 0x29, 0xc0, 0xee, 0x41, 0x69, 0x26, 0xb4, 0x47, 0xc7, 0x37, 0xf6, 0x6b, 0xe8, 0xda, - 0x27, 0x42, 0x7b, 0x9c, 0xb8, 0x98, 0x5d, 0x33, 0x39, 0x47, 0xd7, 0x3b, 0x59, 0x76, 0x3d, 0x41, - 0x0e, 0xb7, 0x02, 0xf6, 0x4b, 0xa8, 0x46, 0x42, 0x5f, 0x48, 0x35, 0x21, 0x17, 0x6d, 0x9a, 0x98, - 0x9f, 0x08, 0xfd, 0x44, 0x06, 0x82, 0xa7, 0x32, 0xf7, 0x2f, 0x05, 0x28, 0xa1, 0x62, 0xc6, 0xa0, - 0xe4, 0xa9, 0xb1, 0x49, 0xd7, 0x3a, 0x27, 0x9a, 0x35, 0xa1, 0x28, 0xa2, 0x57, 0x74, 0x46, 0x9d, - 0x23, 0x89, 0x1c, 0xff, 0x22, 0xb0, 0x4e, 0x47, 0x12, 0xf7, 0xcd, 0x13, 0xa1, 0xac, 0xaf, 0x89, - 0x66, 0x9f, 0x43, 0x3d, 0x56, 0xf2, 0x72, 0xf1, 0x12, 0x77, 0x97, 0x73, 0x99, 0x84, 0xcc, 0x5e, - 0xf4, 0x8a, 0xd7, 0x62, 0x4b, 0xb1, 0xfb, 0x00, 0xe2, 0x52, 0x2b, 0xef, 0x58, 0x26, 0x3a, 0x69, - 0x55, 0xe8, 0x36, 0x94, 0xc0, 0xc8, 0xe8, 0x9f, 0xf2, 0x9c, 0xd4, 0xfd, 0x9b, 0x03, 0x65, 0xba, - 0x24, 0x6b, 0xa3, 0x4b, 0xe3, 0xb9, 0x89, 0x4e, 0xf1, 0x90, 0x59, 0x97, 0x02, 0x05, 0x6f, 0xe9, - 0x51, 0x0c, 0xe4, 0x16, 0xd4, 0x12, 0x31, 0x15, 0xbe, 0x96, 0xca, 0xe6, 0xcf, 0x72, 0x8d, 0xa6, - 0x07, 0x18, 0x62, 0x73, 0x1b, 0xa2, 0xd9, 0x03, 0xa8, 0x48, 0x8a, 0x0b, 0x5d, 0xe8, 0xbf, 0x44, - 0xcb, 0x42, 0x50, 0xb9, 0x12, 0x5e, 0x20, 0xa3, 0xe9, 0x82, 0xae, 0x59, 0xe3, 0xcb, 0x35, 0x7b, - 0x00, 0x75, 0x8a, 0xc4, 0xd3, 0x45, 0x2c, 0x5a, 0x15, 0x8a, 0xc0, 0xc6, 0x32, 0x4a, 0xc8, 0xe4, - 0x99, 0x1c, 0x5f, 0x9e, 0xef, 0xf9, 0xe7, 0x62, 0x10, 0xeb, 0xd6, 0x87, 0x99, 0xbf, 0x8e, 0x2c, - 0x8f, 0x2f, 0xa5, 0xa8, 0x36, 0x11, 0xbe, 0x12, 0x1a, 0xa1, 0x1f, 0x11, 0x94, 0xd4, 0x0e, 0x53, - 0x26, 0xcf, 0xe4, 0xcc, 0x85, 0xca, 0x70, 0x78, 0x8c, 0xc8, 0xbb, 0x59, 0x65, 0x30, 0x1c, 0x6e, - 0x25, 0x6e, 0x1f, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0xfa, 0x5d, 0xfb, 0x00, 0x9d, 0x7e, 0x97, 0x3d, - 0x82, 0x6a, 0x72, 0xee, 0xa9, 0x30, 0x1a, 0x93, 0xef, 0x36, 0xf7, 0x3f, 0x58, 0x5a, 0x35, 0x34, - 0x7c, 0xd4, 0x94, 0x62, 0x5c, 0x09, 0xf5, 0xa5, 0x19, 0x6f, 0xe9, 0x6a, 0x42, 0x71, 0x1e, 0x06, - 0xa4, 0x67, 0x83, 0x23, 0x89, 0x9c, 0x71, 0x68, 0x72, 0x69, 0x83, 0x23, 0x89, 0x01, 0x99, 0xc9, - 0xc0, 0xd4, 0xb1, 0x0d, 0x4e, 0x34, 0xfa, 0x58, 0xc6, 0x3a, 0x94, 0x91, 0x37, 0x4d, 0x7d, 0x9c, - 0xae, 0xdd, 0x69, 0x7a, 0xbf, 0xff, 0xcb, 0x69, 0x7f, 0x2c, 0x40, 0x2d, 0x2d, 0xbe, 0x58, 0x49, - 0xc2, 0x40, 0x44, 0x3a, 0x1c, 0x85, 0x42, 0xd9, 0x83, 0x73, 0x1c, 0xf6, 0x08, 0xca, 0x9e, 0xd6, - 0x2a, 0x7d, 0xa0, 0x3f, 0xcd, 0x57, 0xee, 0xce, 0x01, 0x4a, 0x7a, 0x91, 0x56, 0x0b, 0x6e, 0x50, - 0x5b, 0x5f, 0x03, 0x64, 0x4c, 0xb4, 0x75, 0x22, 0x16, 0x56, 0x2b, 0x92, 0xec, 0x43, 0x28, 0xbf, - 0xf2, 0xa6, 0x73, 0x61, 0x73, 0xd8, 0x2c, 0xbe, 0x71, 0xbe, 0x2e, 0xb8, 0x7f, 0x75, 0xa0, 0x6a, - 0x2b, 0x39, 0x7b, 0x08, 0x55, 0xaa, 0xe4, 0xd6, 0xa2, 0x9b, 0x1f, 0x46, 0x0a, 0x61, 0x7b, 0xcb, - 0x16, 0x95, 0xb3, 0xd1, 0xaa, 0x32, 0xad, 0xca, 0xda, 0x98, 0x35, 0xac, 0x62, 0x20, 0x46, 0xb6, - 0x17, 0x6d, 0x22, 0xba, 0x2b, 0x46, 0x61, 0x14, 0xa2, 0x7f, 0x38, 0x8a, 0xd8, 0xc3, 0xf4, 0xd6, - 0x25, 0xd2, 0x78, 0x37, 0xaf, 0xf1, 0xed, 0x4b, 0xf7, 0xa1, 0x91, 0x3b, 0xe6, 0x86, 0x5b, 0x7f, - 0x9a, 0xbf, 0xb5, 0x3d, 0x92, 0xd4, 0x99, 0x46, 0x9a, 0x79, 0xe1, 0x7f, 0xf0, 0xdf, 0x97, 0x00, - 0x99, 0xca, 0xdb, 0x17, 0x16, 0xf7, 0x75, 0x11, 0x60, 0x10, 0x63, 0xe9, 0x0c, 0x3c, 0xaa, 0xc8, - 0xeb, 0xe1, 0x38, 0x92, 0x4a, 0xbc, 0xa4, 0xa7, 0x4a, 0xfb, 0x6b, 0xbc, 0x61, 0x78, 0xf4, 0x62, - 0xd8, 0x01, 0x34, 0x02, 0x91, 0xf8, 0x2a, 0xa4, 0x84, 0xb2, 0x4e, 0xdf, 0xc1, 0x3b, 0x65, 0x7a, - 0x3a, 0xdd, 0x0c, 0x61, 0x7c, 0x95, 0xdf, 0xc3, 0xf6, 0x61, 0x5d, 0x5c, 0xc6, 0x52, 0x69, 0x7b, - 0x8a, 0x69, 0xf8, 0x77, 0xcc, 0xe8, 0x80, 0x7c, 0x3a, 0x89, 0x37, 0x44, 0xb6, 0x60, 0x1e, 0x94, - 0x7c, 0x2f, 0x36, 0xdd, 0xae, 0xb1, 0xdf, 0xba, 0x76, 0xde, 0x91, 0x17, 0x1b, 0xa7, 0x1d, 0x7e, - 0x81, 0x77, 0x7d, 0xfd, 0xaf, 0x9d, 0x07, 0xb9, 0x16, 0x37, 0x93, 0x67, 0x8b, 0x3d, 0xca, 0x97, - 0x49, 0xa8, 0xf7, 0xe6, 0x3a, 0x9c, 0xee, 0x79, 0x71, 0x88, 0xea, 0x70, 0x63, 0xbf, 0xcb, 0x49, - 0xf5, 0xd6, 0xaf, 0xa0, 0x79, 0xdd, 0xee, 0xf7, 0x89, 0xc1, 0xd6, 0x57, 0x50, 0x5f, 0xda, 0xf1, - 0xae, 0x8d, 0xb5, 0x7c, 0xf0, 0x3e, 0x81, 0x46, 0xee, 0xde, 0x08, 0x7c, 0x4e, 0x40, 0xe3, 0x7d, - 0xb3, 0x70, 0x5f, 0xe3, 0xb4, 0x91, 0xf6, 0x9b, 0x5f, 0x00, 0x9c, 0x6b, 0x1d, 0xbf, 0xa4, 0x06, - 0x64, 0x0f, 0xa9, 0x23, 0x87, 0x10, 0x6c, 0x07, 0x1a, 0xb8, 0x48, 0xac, 0xdc, 0x58, 0x4a, 0x3b, - 0x12, 0x03, 0xf8, 0x39, 0xd4, 0x47, 0xcb, 0xed, 0xa6, 0x71, 0xd4, 0x46, 0xe9, 0xee, 0x9f, 0x41, - 0x2d, 0x92, 0x56, 0x66, 0xfa, 0x61, 0x35, 0x92, 0x24, 0x72, 0x1f, 0xc0, 0x4f, 0xde, 0x1a, 0x8d, - 0xd8, 0x5d, 0xa8, 0x8c, 0xc2, 0xa9, 0xa6, 0xe7, 0x8a, 0x2d, 0xd6, 0xae, 0xdc, 0x7f, 0x14, 0x00, - 0xb2, 0xa7, 0x85, 0x1e, 0xc1, 0x77, 0x87, 0x98, 0x75, 0xf3, 0xce, 0xa6, 0x50, 0x9b, 0xd9, 0x08, - 0xda, 0x3c, 0xba, 0xb7, 0xfa, 0x1c, 0x3b, 0x69, 0x80, 0x4d, 0x6c, 0xf7, 0x6d, 0x6c, 0xdf, 0x67, - 0x7c, 0x59, 0x9e, 0xb0, 0xf5, 0x2d, 0x6c, 0xac, 0xa8, 0xbb, 0xe5, 0x4b, 0xcd, 0xb2, 0x2c, 0x1f, - 0xb2, 0x87, 0x50, 0x31, 0xad, 0x1d, 0xeb, 0x2f, 0x52, 0x56, 0x0d, 0xd1, 0x54, 0xc7, 0x4f, 0xd3, - 0x41, 0xaf, 0x7f, 0xea, 0xee, 0x43, 0xc5, 0x4c, 0xb2, 0xac, 0x0d, 0x55, 0xcf, 0xc7, 0xab, 0xa5, - 0xe5, 0x6a, 0x33, 0x1d, 0x73, 0x0f, 0x88, 0xcd, 0x53, 0xb1, 0xfb, 0x77, 0x07, 0x20, 0xe3, 0xbf, - 0xc7, 0xac, 0xf0, 0x0d, 0x6c, 0x26, 0xc2, 0x97, 0x51, 0xe0, 0xa9, 0x05, 0x49, 0xed, 0xc4, 0x76, - 0xd3, 0x96, 0x6b, 0xc8, 0xdc, 0xdc, 0x50, 0x7c, 0xf7, 0xdc, 0xd0, 0x86, 0x92, 0x2f, 0xe3, 0x85, - 0x7d, 0xbe, 0x6c, 0xf5, 0x22, 0x47, 0x32, 0x5e, 0xe0, 0xdc, 0x8e, 0x08, 0xd6, 0x81, 0xca, 0x6c, - 0x42, 0xb3, 0xbd, 0x19, 0xa3, 0x3e, 0x5c, 0xc5, 0x3e, 0x99, 0x20, 0x8d, 0x5f, 0x02, 0x06, 0xc5, - 0x1e, 0x40, 0x79, 0x36, 0x09, 0x42, 0x45, 0x13, 0x47, 0xc3, 0xf4, 0xeb, 0x3c, 0xbc, 0x1b, 0x2a, - 0x9c, 0xf7, 0x09, 0xc3, 0x5c, 0x70, 0xd4, 0xac, 0x55, 0x25, 0x64, 0xf3, 0x9a, 0x37, 0x67, 0xc7, - 0x6b, 0xdc, 0x51, 0xb3, 0xc3, 0x1a, 0x54, 0x8c, 0x5f, 0xdd, 0x2b, 0x07, 0x36, 0x57, 0xad, 0xc4, - 0x3c, 0x48, 0x94, 0x9f, 0xe6, 0x41, 0xa2, 0xfc, 0xe5, 0x48, 0xe5, 0xe4, 0x46, 0x2a, 0x17, 0xca, - 0xf2, 0x22, 0xb2, 0x23, 0x62, 0x3a, 0xd9, 0x9c, 0xcb, 0x8b, 0x08, 0x87, 0x07, 0x23, 0x5a, 0xf6, - 0x62, 0xbc, 0x65, 0xd9, 0xf6, 0xe2, 0x4f, 0x61, 0x63, 0x24, 0xa7, 0x53, 0x79, 0x31, 0x5c, 0xcc, - 0xa6, 0x61, 0x34, 0xa1, 0x3b, 0xd5, 0xf8, 0x2a, 0x93, 0xb5, 0xe1, 0x4e, 0x10, 0x2a, 0x34, 0xe7, - 0x48, 0x46, 0x5a, 0xe0, 0x4c, 0x5c, 0x25, 0xdc, 0x75, 0x36, 0xea, 0xf3, 0xb4, 0x16, 0xb3, 0x58, - 0x3f, 0x8b, 0x62, 0xcf, 0x9f, 0xb4, 0x6a, 0x46, 0xdf, 0x0a, 0x93, 0x7d, 0x06, 0x9b, 0xbe, 0x12, - 0x9e, 0x16, 0x5d, 0x91, 0xe8, 0x53, 0x4f, 0x9f, 0xb7, 0xea, 0x04, 0xbb, 0xc6, 0x25, 0x6d, 0x68, - 0xc7, 0x8b, 0x70, 0x1a, 0xf8, 0x9e, 0x0a, 0xe8, 0x8b, 0x09, 0xb5, 0xe5, 0x99, 0xac, 0x03, 0x8c, - 0x18, 0xbd, 0x59, 0xac, 0x17, 0x4b, 0x68, 0x83, 0xa0, 0x37, 0x48, 0x5c, 0x05, 0xcd, 0xeb, 0xd1, - 0x45, 0xdf, 0xc4, 0x68, 0x87, 0x7d, 0x27, 0x48, 0x2f, 0xfd, 0xe5, 0xe4, 0xfc, 0x85, 0xbe, 0xc7, - 0x82, 0x80, 0x09, 0xb8, 0xce, 0x89, 0xbe, 0x8d, 0xef, 0xdd, 0xdf, 0xc3, 0x9d, 0x6b, 0x29, 0x72, - 0xeb, 0x23, 0x77, 0xa1, 0x31, 0xf3, 0x26, 0xe2, 0xd4, 0x53, 0xe4, 0xf8, 0xa2, 0x69, 0x7a, 0x39, - 0xd6, 0xad, 0x0c, 0x88, 0x60, 0x3d, 0x9f, 0x79, 0x37, 0x9e, 0x9e, 0xba, 0xfb, 0x44, 0xea, 0xc7, - 0x72, 0x1e, 0x05, 0xb6, 0x17, 0xac, 0x32, 0xdf, 0x0e, 0x4a, 0xf1, 0x86, 0xa0, 0xb8, 0x27, 0x50, - 0x4b, 0x4d, 0x60, 0x3b, 0xf6, 0xf3, 0xa5, 0x90, 0x7d, 0x17, 0x3f, 0x4b, 0x84, 0x42, 0xeb, 0xcc, - 0xb7, 0xcc, 0xc7, 0x50, 0x1e, 0x2b, 0x39, 0x8f, 0x6d, 0x65, 0x5b, 0x41, 0x18, 0x89, 0xfb, 0x02, - 0xaa, 0x96, 0x83, 0xa6, 0x47, 0xde, 0x2c, 0xfd, 0x88, 0x25, 0x3a, 0x2b, 0x40, 0xce, 0xbb, 0x0a, - 0xd0, 0x26, 0x38, 0x76, 0x44, 0x2d, 0x73, 0x27, 0x0c, 0xee, 0xb7, 0xa1, 0x6a, 0x3f, 0xd8, 0x58, - 0x1d, 0xca, 0xcf, 0x4e, 0x86, 0xbd, 0xa7, 0xcd, 0x35, 0x56, 0x83, 0xd2, 0xf1, 0x60, 0xf8, 0xb4, - 0x59, 0x40, 0xea, 0x64, 0x70, 0xd2, 0x6b, 0x3a, 0xf7, 0x7f, 0x0d, 0xf5, 0xe5, 0x87, 0x05, 0xb2, - 0x0f, 0xfb, 0x27, 0xdd, 0xe6, 0x1a, 0x03, 0xa8, 0x0c, 0x7b, 0x47, 0xbc, 0x87, 0xe0, 0x2a, 0x14, - 0x87, 0xc3, 0xe3, 0xa6, 0x83, 0xaa, 0x8e, 0x0e, 0x8e, 0x8e, 0x7b, 0xcd, 0x22, 0x92, 0x4f, 0x9f, - 0x9c, 0x3e, 0x1e, 0x36, 0x4b, 0xf7, 0xbf, 0x84, 0x3b, 0xd7, 0x06, 0x7b, 0xda, 0x7d, 0x7c, 0xc0, - 0x7b, 0xa8, 0xa9, 0x01, 0xd5, 0x53, 0xde, 0x7f, 0x7e, 0xf0, 0xb4, 0xd7, 0x2c, 0xa0, 0xe0, 0xb7, - 0x83, 0xa3, 0x6f, 0x7b, 0xdd, 0xa6, 0x73, 0x78, 0xef, 0xbb, 0xab, 0xed, 0xc2, 0xf7, 0x57, 0xdb, - 0x85, 0x1f, 0xae, 0xb6, 0x0b, 0xff, 0xbe, 0xda, 0x2e, 0xfc, 0xe1, 0xcd, 0xf6, 0xda, 0xf7, 0x6f, - 0xb6, 0xd7, 0x7e, 0x78, 0xb3, 0xbd, 0x76, 0x56, 0xa1, 0x3f, 0x45, 0xbe, 0xf8, 0x4f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0xd1, 0xbc, 0xef, 0x81, 0x54, 0x11, 0x00, 0x00, +func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_5b0fc877b949f769) } + +var fileDescriptor_ops_5b0fc877b949f769 = []byte{ + // 1877 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0x17, 0x97, 0x7f, 0xf7, 0x51, 0x92, 0xd9, 0x89, 0xe3, 0xb2, 0xaa, 0x2b, 0x29, 0x9b, 0x34, + 0x60, 0x24, 0x9b, 0x02, 0x14, 0x20, 0x09, 0x72, 0x28, 0x2a, 0x91, 0x34, 0xc4, 0xa6, 0x12, 0x85, + 0xa1, 0x6c, 0x1f, 0x8d, 0xd5, 0xee, 0x90, 0x5a, 0x90, 0xbb, 0xb3, 0x98, 0x1d, 0x5a, 0xe2, 0xa5, + 0x28, 0xfc, 0x09, 0x02, 0x14, 0xe8, 0xad, 0x87, 0x1e, 0xfb, 0x21, 0x7a, 0xcf, 0x31, 0x28, 0x7a, + 0x48, 0x7b, 0x70, 0x0b, 0xfb, 0x8b, 0x14, 0x6f, 0x66, 0x96, 0xbb, 0xa4, 0x55, 0xd8, 0x46, 0x8b, + 0x9c, 0x38, 0xf3, 0x7b, 0xbf, 0x79, 0xf3, 0xe6, 0xbd, 0x37, 0xf3, 0xde, 0x12, 0x6c, 0x1e, 0x27, + 0xed, 0x58, 0x70, 0xc9, 0x89, 0x15, 0x5f, 0x6e, 0x3d, 0x1c, 0x07, 0xf2, 0x6a, 0x76, 0xd9, 0xf6, + 0x78, 0x78, 0x30, 0xe6, 0x63, 0x7e, 0xa0, 0x44, 0x97, 0xb3, 0x91, 0x9a, 0xa9, 0x89, 0x1a, 0xe9, + 0x25, 0xce, 0x9f, 0x2d, 0xb0, 0x06, 0x31, 0xf9, 0x08, 0x2a, 0x41, 0x14, 0xcf, 0x64, 0xd2, 0x2c, + 0xec, 0x16, 0x5b, 0xf5, 0x43, 0xbb, 0x1d, 0x5f, 0xb6, 0xfb, 0x88, 0x50, 0x23, 0x20, 0xbb, 0x50, + 0x62, 0x37, 0xcc, 0x6b, 0x5a, 0xbb, 0x85, 0x56, 0xfd, 0x10, 0x90, 0xd0, 0xbb, 0x61, 0xde, 0x20, + 0x3e, 0x59, 0xa3, 0x4a, 0x42, 0x3e, 0x85, 0x4a, 0xc2, 0x67, 0xc2, 0x63, 0xcd, 0xa2, 0xe2, 0xac, + 0x23, 0x67, 0xa8, 0x10, 0xc5, 0x32, 0x52, 0xd4, 0x34, 0x0a, 0xa6, 0xac, 0x59, 0xca, 0x34, 0x3d, + 0x0a, 0xa6, 0x9a, 0xa3, 0x24, 0xe4, 0x63, 0x28, 0x5f, 0xce, 0x82, 0xa9, 0xdf, 0x2c, 0x2b, 0x4a, + 0x1d, 0x29, 0xc7, 0x08, 0x28, 0x8e, 0x96, 0x91, 0x16, 0xd4, 0xe2, 0xa9, 0x2b, 0x47, 0x5c, 0x84, + 0x4d, 0xc8, 0x36, 0x3c, 0x37, 0x18, 0x5d, 0x48, 0xc9, 0x97, 0x50, 0xf7, 0x78, 0x94, 0x48, 0xe1, + 0x06, 0x91, 0x4c, 0x9a, 0x75, 0x45, 0xfe, 0x10, 0xc9, 0x4f, 0xb9, 0x98, 0x30, 0xd1, 0xc9, 0x84, + 0x34, 0xcf, 0x3c, 0x2e, 0x81, 0xc5, 0x63, 0xe7, 0x8f, 0x05, 0xa8, 0xa5, 0x5a, 0x89, 0x03, 0xeb, + 0x47, 0xc2, 0xbb, 0x0a, 0x24, 0xf3, 0xe4, 0x4c, 0xb0, 0x66, 0x61, 0xb7, 0xd0, 0xb2, 0xe9, 0x12, + 0x46, 0x36, 0xc1, 0x1a, 0x0c, 0x95, 0xa3, 0x6c, 0x6a, 0x0d, 0x86, 0xa4, 0x09, 0xd5, 0x27, 0xae, + 0x08, 0xdc, 0x48, 0x2a, 0xcf, 0xd8, 0x34, 0x9d, 0x92, 0xfb, 0x60, 0x0f, 0x86, 0x4f, 0x98, 0x48, + 0x02, 0x1e, 0x29, 0x7f, 0xd8, 0x34, 0x03, 0xc8, 0x36, 0xc0, 0x60, 0xf8, 0x88, 0xb9, 0xa8, 0x34, + 0x69, 0x96, 0x77, 0x8b, 0x2d, 0x9b, 0xe6, 0x10, 0xe7, 0x77, 0x50, 0x56, 0x31, 0x22, 0xbf, 0x81, + 0x8a, 0x1f, 0x8c, 0x59, 0x22, 0xb5, 0x39, 0xc7, 0x87, 0xdf, 0xbd, 0xdc, 0x59, 0xfb, 0xe7, 0xcb, + 0x9d, 0xbd, 0x5c, 0x32, 0xf0, 0x98, 0x45, 0x1e, 0x8f, 0xa4, 0x1b, 0x44, 0x4c, 0x24, 0x07, 0x63, + 0xfe, 0x50, 0x2f, 0x69, 0x77, 0xd5, 0x0f, 0x35, 0x1a, 0xc8, 0x67, 0x50, 0x0e, 0x22, 0x9f, 0xdd, + 0x28, 0xfb, 0x8b, 0xc7, 0x1f, 0x18, 0x55, 0xf5, 0xc1, 0x4c, 0xc6, 0x33, 0xd9, 0x47, 0x11, 0xd5, + 0x0c, 0x27, 0x86, 0x8a, 0x4e, 0x01, 0x72, 0x1f, 0x4a, 0x21, 0x93, 0xae, 0xda, 0xbe, 0x7e, 0x58, + 0x43, 0xd7, 0x9e, 0x32, 0xe9, 0x52, 0x85, 0x62, 0x76, 0x85, 0x7c, 0x86, 0xae, 0xb7, 0xb2, 0xec, + 0x3a, 0x45, 0x84, 0x1a, 0x01, 0xf9, 0x25, 0x54, 0x23, 0x26, 0xaf, 0xb9, 0x98, 0x28, 0x17, 0x6d, + 0xea, 0x98, 0x9f, 0x31, 0x79, 0xca, 0x7d, 0x46, 0x53, 0x99, 0xf3, 0x97, 0x02, 0x94, 0x50, 0x31, + 0x21, 0x50, 0x72, 0xc5, 0x58, 0xa7, 0xab, 0x4d, 0xd5, 0x98, 0x34, 0xa0, 0xc8, 0xa2, 0xe7, 0x6a, + 0x0f, 0x9b, 0xe2, 0x10, 0x11, 0xef, 0xda, 0x37, 0x4e, 0xc7, 0x21, 0xae, 0x9b, 0x25, 0x4c, 0x18, + 0x5f, 0xab, 0x31, 0xf9, 0x0c, 0xec, 0x58, 0xf0, 0x9b, 0xf9, 0x33, 0x5c, 0x5d, 0xce, 0x65, 0x12, + 0x82, 0xbd, 0xe8, 0x39, 0xad, 0xc5, 0x66, 0x44, 0xf6, 0x00, 0xd8, 0x8d, 0x14, 0xee, 0x09, 0x4f, + 0x64, 0xd2, 0xac, 0xa8, 0xd3, 0xa8, 0x04, 0x46, 0xa0, 0x7f, 0x4e, 0x73, 0x52, 0xe7, 0x6f, 0x16, + 0x94, 0xd5, 0x21, 0x49, 0x0b, 0x5d, 0x1a, 0xcf, 0x74, 0x74, 0x8a, 0xc7, 0xc4, 0xb8, 0x14, 0x54, + 0xf0, 0x16, 0x1e, 0xc5, 0x40, 0x6e, 0x41, 0x2d, 0x61, 0x53, 0xe6, 0x49, 0x2e, 0x4c, 0xfe, 0x2c, + 0xe6, 0x68, 0xba, 0x8f, 0x21, 0xd6, 0xa7, 0x51, 0x63, 0xb2, 0x0f, 0x15, 0xae, 0xe2, 0xa2, 0x0e, + 0xf4, 0x5f, 0xa2, 0x65, 0x28, 0xa8, 0x5c, 0x30, 0xd7, 0xe7, 0xd1, 0x74, 0xae, 0x8e, 0x59, 0xa3, + 0x8b, 0x39, 0xd9, 0x07, 0x5b, 0x45, 0xe2, 0x62, 0x1e, 0xb3, 0x66, 0x45, 0x45, 0x60, 0x63, 0x11, + 0x25, 0x04, 0x69, 0x26, 0xc7, 0x9b, 0xe7, 0xb9, 0xde, 0x15, 0x1b, 0xc4, 0xb2, 0x79, 0x37, 0xf3, + 0x57, 0xc7, 0x60, 0x74, 0x21, 0x45, 0xb5, 0x09, 0xf3, 0x04, 0x93, 0x48, 0xfd, 0x50, 0x51, 0x95, + 0xda, 0x61, 0x0a, 0xd2, 0x4c, 0x4e, 0x1c, 0xa8, 0x0c, 0x87, 0x27, 0xc8, 0xbc, 0x97, 0xbd, 0x0c, + 0x1a, 0xa1, 0x46, 0xe2, 0xf4, 0xa1, 0x96, 0x6e, 0x83, 0xd7, 0xac, 0xdf, 0x35, 0x17, 0xd0, 0xea, + 0x77, 0xc9, 0x43, 0xa8, 0x26, 0x57, 0xae, 0x08, 0xa2, 0xb1, 0xf2, 0xdd, 0xe6, 0xe1, 0x07, 0x0b, + 0xab, 0x86, 0x1a, 0x47, 0x4d, 0x29, 0xc7, 0xe1, 0x60, 0x2f, 0xcc, 0x78, 0x43, 0x57, 0x03, 0x8a, + 0xb3, 0xc0, 0x57, 0x7a, 0x36, 0x28, 0x0e, 0x11, 0x19, 0x07, 0x3a, 0x97, 0x36, 0x28, 0x0e, 0x31, + 0x20, 0x21, 0xf7, 0xf5, 0x3b, 0xb6, 0x41, 0xd5, 0x18, 0x7d, 0xcc, 0x63, 0x19, 0xf0, 0xc8, 0x9d, + 0xa6, 0x3e, 0x4e, 0xe7, 0xce, 0x34, 0x3d, 0xdf, 0x8f, 0xb2, 0xdb, 0x1f, 0x0a, 0x50, 0x4b, 0x1f, + 0x5f, 0x7c, 0x49, 0x02, 0x9f, 0x45, 0x32, 0x18, 0x05, 0x4c, 0x98, 0x8d, 0x73, 0x08, 0x79, 0x08, + 0x65, 0x57, 0x4a, 0x91, 0x5e, 0xd0, 0x9f, 0xe6, 0x5f, 0xee, 0xf6, 0x11, 0x4a, 0x7a, 0x91, 0x14, + 0x73, 0xaa, 0x59, 0x5b, 0x5f, 0x01, 0x64, 0x20, 0xda, 0x3a, 0x61, 0x73, 0xa3, 0x15, 0x87, 0xe4, + 0x2e, 0x94, 0x9f, 0xbb, 0xd3, 0x19, 0x33, 0x39, 0xac, 0x27, 0x5f, 0x5b, 0x5f, 0x15, 0x9c, 0xbf, + 0x5a, 0x50, 0x35, 0x2f, 0x39, 0x79, 0x00, 0x55, 0xf5, 0x92, 0x1b, 0x8b, 0x6e, 0xbf, 0x18, 0x29, + 0x85, 0x1c, 0x2c, 0x4a, 0x54, 0xce, 0x46, 0xa3, 0x4a, 0x97, 0x2a, 0x63, 0x63, 0x56, 0xb0, 0x8a, + 0x3e, 0x1b, 0x99, 0x5a, 0xb4, 0x89, 0xec, 0x2e, 0x1b, 0x05, 0x51, 0x80, 0xfe, 0xa1, 0x28, 0x22, + 0x0f, 0xd2, 0x53, 0x97, 0x94, 0xc6, 0x7b, 0x79, 0x8d, 0x6f, 0x1e, 0xba, 0x0f, 0xf5, 0xdc, 0x36, + 0xb7, 0x9c, 0xfa, 0x93, 0xfc, 0xa9, 0xcd, 0x96, 0x4a, 0x9d, 0x2e, 0xa4, 0x99, 0x17, 0xfe, 0x07, + 0xff, 0x7d, 0x01, 0x90, 0xa9, 0x7c, 0xf7, 0x87, 0xc5, 0x79, 0x51, 0x04, 0x18, 0xc4, 0xf8, 0x74, + 0xfa, 0xae, 0x7a, 0x91, 0xd7, 0x83, 0x71, 0xc4, 0x05, 0x7b, 0xa6, 0xae, 0xaa, 0x5a, 0x5f, 0xa3, + 0x75, 0x8d, 0xa9, 0x1b, 0x43, 0x8e, 0xa0, 0xee, 0xb3, 0xc4, 0x13, 0x81, 0x4a, 0x28, 0xe3, 0xf4, + 0x1d, 0x3c, 0x53, 0xa6, 0xa7, 0xdd, 0xcd, 0x18, 0xda, 0x57, 0xf9, 0x35, 0xe4, 0x10, 0xd6, 0xd9, + 0x4d, 0xcc, 0x85, 0x34, 0xbb, 0xe8, 0x82, 0x7f, 0x47, 0xb7, 0x0e, 0x88, 0xab, 0x9d, 0x68, 0x9d, + 0x65, 0x13, 0xe2, 0x42, 0xc9, 0x73, 0x63, 0x5d, 0xed, 0xea, 0x87, 0xcd, 0x95, 0xfd, 0x3a, 0x6e, + 0xac, 0x9d, 0x76, 0xfc, 0x39, 0x9e, 0xf5, 0xc5, 0xbf, 0x76, 0xf6, 0x73, 0x25, 0x2e, 0xe4, 0x97, + 0xf3, 0x03, 0x95, 0x2f, 0x93, 0x40, 0x1e, 0xcc, 0x64, 0x30, 0x3d, 0x70, 0xe3, 0x00, 0xd5, 0xe1, + 0xc2, 0x7e, 0x97, 0x2a, 0xd5, 0x5b, 0xbf, 0x82, 0xc6, 0xaa, 0xdd, 0xef, 0x13, 0x83, 0xad, 0x2f, + 0xc1, 0x5e, 0xd8, 0xf1, 0xb6, 0x85, 0xb5, 0x7c, 0xf0, 0x3e, 0x86, 0x7a, 0xee, 0xdc, 0x48, 0x7c, + 0xa2, 0x88, 0xda, 0xfb, 0x7a, 0xe2, 0xbc, 0xc0, 0x6e, 0x23, 0xad, 0x37, 0xbf, 0x00, 0xb8, 0x92, + 0x32, 0x7e, 0xa6, 0x0a, 0x90, 0xd9, 0xc4, 0x46, 0x44, 0x31, 0xc8, 0x0e, 0xd4, 0x71, 0x92, 0x18, + 0xb9, 0xb6, 0x54, 0xad, 0x48, 0x34, 0xe1, 0xe7, 0x60, 0x8f, 0x16, 0xcb, 0x75, 0xe1, 0xa8, 0x8d, + 0xd2, 0xd5, 0x3f, 0x83, 0x5a, 0xc4, 0x8d, 0x4c, 0xd7, 0xc3, 0x6a, 0xc4, 0x95, 0xc8, 0xd9, 0x87, + 0x9f, 0xbc, 0xd1, 0x1a, 0x91, 0x7b, 0x50, 0x19, 0x05, 0x53, 0xa9, 0xae, 0x2b, 0x96, 0x58, 0x33, + 0x73, 0xfe, 0x51, 0x00, 0xc8, 0xae, 0x16, 0x7a, 0x04, 0xef, 0x1d, 0x72, 0xd6, 0xf5, 0x3d, 0x9b, + 0x42, 0x2d, 0x34, 0x11, 0x34, 0x79, 0x74, 0x7f, 0xf9, 0x3a, 0xb6, 0xd3, 0x00, 0xeb, 0xd8, 0x1e, + 0x9a, 0xd8, 0xbe, 0x4f, 0xfb, 0xb2, 0xd8, 0x61, 0xeb, 0x1b, 0xd8, 0x58, 0x52, 0xf7, 0x8e, 0x37, + 0x35, 0xcb, 0xb2, 0x7c, 0xc8, 0x1e, 0x40, 0x45, 0x97, 0x76, 0x7c, 0x7f, 0x71, 0x64, 0xd4, 0xa8, + 0xb1, 0x7a, 0xc7, 0xcf, 0xd3, 0x46, 0xaf, 0x7f, 0xee, 0x1c, 0x42, 0x45, 0x77, 0xb2, 0xa4, 0x05, + 0x55, 0xd7, 0xc3, 0xa3, 0xa5, 0xcf, 0xd5, 0x66, 0xda, 0xe6, 0x1e, 0x29, 0x98, 0xa6, 0x62, 0xe7, + 0xef, 0x16, 0x40, 0x86, 0xbf, 0x47, 0xaf, 0xf0, 0x35, 0x6c, 0x26, 0xcc, 0xe3, 0x91, 0xef, 0x8a, + 0xb9, 0x92, 0x9a, 0x8e, 0xed, 0xb6, 0x25, 0x2b, 0xcc, 0x5c, 0xdf, 0x50, 0x7c, 0x7b, 0xdf, 0xd0, + 0x82, 0x92, 0xc7, 0xe3, 0xb9, 0xb9, 0xbe, 0x64, 0xf9, 0x20, 0x1d, 0x1e, 0xcf, 0xb1, 0x6f, 0x47, + 0x06, 0x69, 0x43, 0x25, 0x9c, 0xa8, 0xde, 0x5e, 0xb7, 0x51, 0x77, 0x97, 0xb9, 0xa7, 0x13, 0x1c, + 0xe3, 0x97, 0x80, 0x66, 0x91, 0x7d, 0x28, 0x87, 0x13, 0x3f, 0x10, 0xaa, 0xe3, 0xa8, 0xeb, 0x7a, + 0x9d, 0xa7, 0x77, 0x03, 0x81, 0xfd, 0xbe, 0xe2, 0x10, 0x07, 0x2c, 0x11, 0x36, 0xab, 0x8a, 0xd9, + 0x58, 0xf1, 0x66, 0x78, 0xb2, 0x46, 0x2d, 0x11, 0x1e, 0xd7, 0xa0, 0xa2, 0xfd, 0xea, 0xfc, 0xbe, + 0x08, 0x9b, 0xcb, 0x56, 0x62, 0x1e, 0x24, 0xc2, 0x4b, 0xf3, 0x20, 0x11, 0xde, 0xa2, 0xa5, 0xb2, + 0x72, 0x2d, 0x95, 0x03, 0x65, 0x7e, 0x1d, 0x99, 0x16, 0x31, 0xed, 0x6c, 0xae, 0xf8, 0x75, 0x84, + 0xcd, 0x83, 0x16, 0x2d, 0x6a, 0x31, 0x9e, 0xb2, 0x6c, 0x6a, 0xf1, 0x27, 0xb0, 0x31, 0xe2, 0xd3, + 0x29, 0xbf, 0x1e, 0xce, 0xc3, 0x69, 0x10, 0x4d, 0xd4, 0x99, 0x6a, 0x74, 0x19, 0x24, 0x2d, 0xb8, + 0xe3, 0x07, 0x02, 0xcd, 0xe9, 0xf0, 0x48, 0x32, 0xec, 0x89, 0xab, 0x8a, 0xb7, 0x0a, 0xa3, 0x3e, + 0x57, 0x4a, 0x16, 0xc6, 0xf2, 0x71, 0x14, 0xbb, 0xde, 0xa4, 0x59, 0xd3, 0xfa, 0x96, 0x40, 0xf2, + 0x29, 0x6c, 0x7a, 0x82, 0xb9, 0x92, 0x75, 0x59, 0x22, 0xcf, 0x5d, 0x79, 0xd5, 0xb4, 0x15, 0x6d, + 0x05, 0x55, 0xda, 0xd0, 0x8e, 0xa7, 0xc1, 0xd4, 0xf7, 0x5c, 0xe1, 0xab, 0x2f, 0x26, 0xd4, 0x96, + 0x07, 0x49, 0x1b, 0x88, 0x02, 0x7a, 0x61, 0x2c, 0xe7, 0x0b, 0x6a, 0x5d, 0x51, 0x6f, 0x91, 0xe0, + 0xe7, 0x8b, 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0x9b, 0xeb, 0x98, 0x49, 0x34, 0x03, 0x9c, 0x6f, + 0x0b, 0xd0, 0x58, 0x0d, 0x3e, 0xba, 0x2e, 0x46, 0x33, 0xcd, 0x35, 0xc2, 0xf1, 0xc2, 0x9d, 0x56, + 0xce, 0x9d, 0x18, 0x1a, 0x7c, 0x2f, 0x30, 0x3f, 0xd7, 0xa9, 0x1a, 0xbf, 0x53, 0x68, 0x96, 0x4c, + 0x2a, 0xaf, 0x9a, 0xf4, 0xa7, 0x02, 0xdc, 0x59, 0x49, 0xb0, 0x77, 0xb6, 0x68, 0x17, 0xea, 0xa1, + 0x3b, 0x61, 0xe7, 0xae, 0x50, 0x61, 0x2b, 0xea, 0x92, 0x99, 0x83, 0xfe, 0x0f, 0xf6, 0x45, 0xb0, + 0x9e, 0xcf, 0xea, 0x5b, 0x6d, 0x4b, 0x43, 0x79, 0xc6, 0xe5, 0x23, 0x3e, 0x8b, 0x7c, 0x53, 0x67, + 0x96, 0xc1, 0x37, 0x03, 0x5e, 0xbc, 0x25, 0xe0, 0xce, 0x19, 0xd4, 0x52, 0x03, 0xc9, 0x8e, 0xf9, + 0x34, 0x2a, 0x64, 0xdf, 0xdc, 0x8f, 0x13, 0x26, 0xd0, 0x76, 0xfd, 0x9d, 0xf4, 0x11, 0x94, 0xc7, + 0x82, 0xcf, 0x62, 0xf3, 0x6a, 0x2e, 0x31, 0xb4, 0xc4, 0x79, 0x0a, 0x55, 0x83, 0xa0, 0xe9, 0x91, + 0x1b, 0xa6, 0x1f, 0xc8, 0x6a, 0x9c, 0x3d, 0x6e, 0xd6, 0xdb, 0x1e, 0xb7, 0x4d, 0xb0, 0x4c, 0xfb, + 0x5b, 0xa6, 0x56, 0xe0, 0xef, 0xb5, 0xa0, 0x6a, 0x3e, 0x06, 0x89, 0x0d, 0xe5, 0xc7, 0x67, 0xc3, + 0xde, 0x45, 0x63, 0x8d, 0xd4, 0xa0, 0x74, 0x32, 0x18, 0x5e, 0x34, 0x0a, 0x38, 0x3a, 0x1b, 0x9c, + 0xf5, 0x1a, 0xd6, 0xde, 0xaf, 0xc1, 0x5e, 0x7c, 0xb4, 0x20, 0x7c, 0xdc, 0x3f, 0xeb, 0x36, 0xd6, + 0x08, 0x40, 0x65, 0xd8, 0xeb, 0xd0, 0x1e, 0x92, 0xab, 0x50, 0x1c, 0x0e, 0x4f, 0x1a, 0x16, 0xaa, + 0xea, 0x1c, 0x75, 0x4e, 0x7a, 0x8d, 0x22, 0x0e, 0x2f, 0x4e, 0xcf, 0x1f, 0x0d, 0x1b, 0xa5, 0xbd, + 0x2f, 0xe0, 0xce, 0xca, 0x47, 0x83, 0x5a, 0x7d, 0x72, 0x44, 0x7b, 0xa8, 0xa9, 0x0e, 0xd5, 0x73, + 0xda, 0x7f, 0x72, 0x74, 0xd1, 0x6b, 0x14, 0x50, 0xf0, 0xdb, 0x41, 0xe7, 0x9b, 0x5e, 0xb7, 0x61, + 0x1d, 0xdf, 0xff, 0xee, 0xd5, 0x76, 0xe1, 0xfb, 0x57, 0xdb, 0x85, 0x1f, 0x5e, 0x6d, 0x17, 0xfe, + 0xfd, 0x6a, 0xbb, 0xf0, 0xed, 0xeb, 0xed, 0xb5, 0xef, 0x5f, 0x6f, 0xaf, 0xfd, 0xf0, 0x7a, 0x7b, + 0xed, 0xb2, 0xa2, 0xfe, 0x70, 0xf9, 0xfc, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x80, 0xe3, 0x0a, + 0xb5, 0xb0, 0x11, 0x00, 0x00, } diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index 31e894f1a832..fa78c4aa875f 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -221,13 +221,14 @@ message FileActionCopy { string src = 1; string dest = 2; ChownOpt owner = 4; - int32 mode = 5; + int32 mode = 5; bool followSymlink = 6; bool dirCopyContents = 7; - bool attemptUnpack = 8; - bool createDestPath = 9; - bool allowWildcard = 10; - bool allowEmptyWildcard = 11; + bool attemptUnpack = 8; + bool createDestPath = 9; + bool allowWildcard = 10; + bool allowEmptyWildcard = 11; + int64 timestamp = 12; } message FileActionMkFile { @@ -235,19 +236,21 @@ message FileActionMkFile { int32 mode = 2; bytes data = 3; ChownOpt owner = 4; + int64 timestamp = 5; } message FileActionMkDir { string path = 1; int32 mode = 2; - bool makeParents = 3; + bool makeParents = 3; ChownOpt owner = 4; + int64 timestamp = 5; } message FileActionRm { string path = 1; - bool allowNotFound = 2; - bool allowWildcard = 3; + bool allowNotFound = 2; + bool allowWildcard = 3; } message ChownOpt { From 89e6614b38538b35bb63fec2b90962ee80c16580 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 1 Feb 2019 11:04:44 -0800 Subject: [PATCH 03/25] solver: change uid to uint Signed-off-by: Tonis Tiigi --- client/llb/fileop.go | 2 +- solver/pb/ops.pb.go | 94 ++++++++++++++++++++++---------------------- solver/pb/ops.proto | 2 +- 3 files changed, 49 insertions(+), 49 deletions(-) diff --git a/client/llb/fileop.go b/client/llb/fileop.go index 64f6ea312860..d41f9e4cb09b 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -239,7 +239,7 @@ func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { if up.Name != "" { return &pb.UserOpt{Name: up.Name, Input: base} } - return &pb.UserOpt{Id: int32(up.UID), Input: -1} + return &pb.UserOpt{Id: uint32(up.UID), Input: -1} } func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 372b94a4e76d..617cd3893865 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -54,7 +54,7 @@ func (x NetMode) String() string { return proto.EnumName(NetMode_name, int32(x)) } func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{0} + return fileDescriptor_ops_02f745fcf5a0d290, []int{0} } // MountType defines a type of a mount from a supported set @@ -87,7 +87,7 @@ func (x MountType) String() string { return proto.EnumName(MountType_name, int32(x)) } func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{1} + return fileDescriptor_ops_02f745fcf5a0d290, []int{1} } // CacheSharingOpt defines different sharing modes for cache mount @@ -117,7 +117,7 @@ func (x CacheSharingOpt) String() string { return proto.EnumName(CacheSharingOpt_name, int32(x)) } func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{2} + return fileDescriptor_ops_02f745fcf5a0d290, []int{2} } // Op represents a vertex of the LLB DAG. @@ -138,7 +138,7 @@ func (m *Op) Reset() { *m = Op{} } func (m *Op) String() string { return proto.CompactTextString(m) } func (*Op) ProtoMessage() {} func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{0} + return fileDescriptor_ops_02f745fcf5a0d290, []int{0} } func (m *Op) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -368,7 +368,7 @@ func (m *Platform) Reset() { *m = Platform{} } func (m *Platform) String() string { return proto.CompactTextString(m) } func (*Platform) ProtoMessage() {} func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{1} + return fileDescriptor_ops_02f745fcf5a0d290, []int{1} } func (m *Platform) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{2} + return fileDescriptor_ops_02f745fcf5a0d290, []int{2} } func (m *Input) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *ExecOp) Reset() { *m = ExecOp{} } func (m *ExecOp) String() string { return proto.CompactTextString(m) } func (*ExecOp) ProtoMessage() {} func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{3} + return fileDescriptor_ops_02f745fcf5a0d290, []int{3} } func (m *ExecOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -538,7 +538,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{4} + return fileDescriptor_ops_02f745fcf5a0d290, []int{4} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +622,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{5} + return fileDescriptor_ops_02f745fcf5a0d290, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{6} + return fileDescriptor_ops_02f745fcf5a0d290, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +766,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{7} + return fileDescriptor_ops_02f745fcf5a0d290, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +845,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{8} + return fileDescriptor_ops_02f745fcf5a0d290, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -918,7 +918,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{9} + return fileDescriptor_ops_02f745fcf5a0d290, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -970,7 +970,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{10} + return fileDescriptor_ops_02f745fcf5a0d290, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1025,7 +1025,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{11} + return fileDescriptor_ops_02f745fcf5a0d290, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1066,7 +1066,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{12} + return fileDescriptor_ops_02f745fcf5a0d290, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1127,7 +1127,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{13} + return fileDescriptor_ops_02f745fcf5a0d290, []int{13} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1170,7 +1170,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{14} + return fileDescriptor_ops_02f745fcf5a0d290, []int{14} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,7 +1232,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{15} + return fileDescriptor_ops_02f745fcf5a0d290, []int{15} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1277,7 +1277,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{16} + return fileDescriptor_ops_02f745fcf5a0d290, []int{16} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,7 +1325,7 @@ func (m *HostIP) Reset() { *m = HostIP{} } func (m *HostIP) String() string { return proto.CompactTextString(m) } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{17} + return fileDescriptor_ops_02f745fcf5a0d290, []int{17} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1372,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{18} + return fileDescriptor_ops_02f745fcf5a0d290, []int{18} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1420,7 +1420,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{19} + return fileDescriptor_ops_02f745fcf5a0d290, []int{19} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1634,7 +1634,7 @@ func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{20} + return fileDescriptor_ops_02f745fcf5a0d290, []int{20} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1748,7 +1748,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{21} + return fileDescriptor_ops_02f745fcf5a0d290, []int{21} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1820,7 +1820,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{22} + return fileDescriptor_ops_02f745fcf5a0d290, []int{22} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1890,7 +1890,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{23} + return fileDescriptor_ops_02f745fcf5a0d290, []int{23} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1945,7 +1945,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{24} + return fileDescriptor_ops_02f745fcf5a0d290, []int{24} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1987,14 +1987,14 @@ func (m *ChownOpt) GetGroup() *UserOpt { type UserOpt struct { Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Id int32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` + Id uint32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` } func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_5b0fc877b949f769, []int{25} + return fileDescriptor_ops_02f745fcf5a0d290, []int{25} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2026,7 +2026,7 @@ func (m *UserOpt) GetName() string { return "" } -func (m *UserOpt) GetId() int32 { +func (m *UserOpt) GetId() uint32 { if m != nil { return m.Id } @@ -8727,7 +8727,7 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= (int32(b) & 0x7F) << shift + m.Id |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8858,10 +8858,10 @@ var ( ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_5b0fc877b949f769) } +func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_02f745fcf5a0d290) } -var fileDescriptor_ops_5b0fc877b949f769 = []byte{ - // 1877 bytes of a gzipped FileDescriptorProto +var fileDescriptor_ops_02f745fcf5a0d290 = []byte{ + // 1876 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, 0x15, 0x17, 0x97, 0x7f, 0xf7, 0x51, 0x92, 0xd9, 0x89, 0xe3, 0xb2, 0xaa, 0x2b, 0x29, 0x9b, 0x34, 0x60, 0x24, 0x9b, 0x02, 0x14, 0x20, 0x09, 0x72, 0x28, 0x2a, 0x91, 0x34, 0xc4, 0xa6, 0x12, 0x85, @@ -8968,16 +8968,16 @@ var fileDescriptor_ops_5b0fc877b949f769 = []byte{ 0x96, 0xc1, 0x37, 0x03, 0x5e, 0xbc, 0x25, 0xe0, 0xce, 0x19, 0xd4, 0x52, 0x03, 0xc9, 0x8e, 0xf9, 0x34, 0x2a, 0x64, 0xdf, 0xdc, 0x8f, 0x13, 0x26, 0xd0, 0x76, 0xfd, 0x9d, 0xf4, 0x11, 0x94, 0xc7, 0x82, 0xcf, 0x62, 0xf3, 0x6a, 0x2e, 0x31, 0xb4, 0xc4, 0x79, 0x0a, 0x55, 0x83, 0xa0, 0xe9, 0x91, - 0x1b, 0xa6, 0x1f, 0xc8, 0x6a, 0x9c, 0x3d, 0x6e, 0xd6, 0xdb, 0x1e, 0xb7, 0x4d, 0xb0, 0x4c, 0xfb, - 0x5b, 0xa6, 0x56, 0xe0, 0xef, 0xb5, 0xa0, 0x6a, 0x3e, 0x06, 0x89, 0x0d, 0xe5, 0xc7, 0x67, 0xc3, - 0xde, 0x45, 0x63, 0x8d, 0xd4, 0xa0, 0x74, 0x32, 0x18, 0x5e, 0x34, 0x0a, 0x38, 0x3a, 0x1b, 0x9c, - 0xf5, 0x1a, 0xd6, 0xde, 0xaf, 0xc1, 0x5e, 0x7c, 0xb4, 0x20, 0x7c, 0xdc, 0x3f, 0xeb, 0x36, 0xd6, - 0x08, 0x40, 0x65, 0xd8, 0xeb, 0xd0, 0x1e, 0x92, 0xab, 0x50, 0x1c, 0x0e, 0x4f, 0x1a, 0x16, 0xaa, - 0xea, 0x1c, 0x75, 0x4e, 0x7a, 0x8d, 0x22, 0x0e, 0x2f, 0x4e, 0xcf, 0x1f, 0x0d, 0x1b, 0xa5, 0xbd, - 0x2f, 0xe0, 0xce, 0xca, 0x47, 0x83, 0x5a, 0x7d, 0x72, 0x44, 0x7b, 0xa8, 0xa9, 0x0e, 0xd5, 0x73, - 0xda, 0x7f, 0x72, 0x74, 0xd1, 0x6b, 0x14, 0x50, 0xf0, 0xdb, 0x41, 0xe7, 0x9b, 0x5e, 0xb7, 0x61, - 0x1d, 0xdf, 0xff, 0xee, 0xd5, 0x76, 0xe1, 0xfb, 0x57, 0xdb, 0x85, 0x1f, 0x5e, 0x6d, 0x17, 0xfe, - 0xfd, 0x6a, 0xbb, 0xf0, 0xed, 0xeb, 0xed, 0xb5, 0xef, 0x5f, 0x6f, 0xaf, 0xfd, 0xf0, 0x7a, 0x7b, - 0xed, 0xb2, 0xa2, 0xfe, 0x70, 0xf9, 0xfc, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x80, 0xe3, 0x0a, - 0xb5, 0xb0, 0x11, 0x00, 0x00, + 0x1b, 0xa6, 0x1f, 0xc8, 0x6a, 0x9c, 0x3d, 0x6e, 0xd6, 0xdb, 0x1e, 0xb7, 0x4d, 0xb0, 0x16, 0xed, + 0xaf, 0x15, 0xf8, 0x7b, 0x2d, 0xa8, 0x9a, 0x8f, 0x41, 0x62, 0x43, 0xf9, 0xf1, 0xd9, 0xb0, 0x77, + 0xd1, 0x58, 0x23, 0x35, 0x28, 0x9d, 0x0c, 0x86, 0x17, 0x8d, 0x02, 0x8e, 0xce, 0x06, 0x67, 0xbd, + 0x86, 0xb5, 0xf7, 0x6b, 0xb0, 0x17, 0x1f, 0x2d, 0x08, 0x1f, 0xf7, 0xcf, 0xba, 0x8d, 0x35, 0x02, + 0x50, 0x19, 0xf6, 0x3a, 0xb4, 0x87, 0xe4, 0x2a, 0x14, 0x87, 0xc3, 0x93, 0x86, 0x85, 0xaa, 0x3a, + 0x47, 0x9d, 0x93, 0x5e, 0xa3, 0x88, 0xc3, 0x8b, 0xd3, 0xf3, 0x47, 0xc3, 0x46, 0x69, 0xef, 0x0b, + 0xb8, 0xb3, 0xf2, 0xd1, 0xa0, 0x56, 0x9f, 0x1c, 0xd1, 0x1e, 0x6a, 0xaa, 0x43, 0xf5, 0x9c, 0xf6, + 0x9f, 0x1c, 0x5d, 0xf4, 0x1a, 0x05, 0x14, 0xfc, 0x76, 0xd0, 0xf9, 0xa6, 0xd7, 0x6d, 0x58, 0xc7, + 0xf7, 0xbf, 0x7b, 0xb5, 0x5d, 0xf8, 0xfe, 0xd5, 0x76, 0xe1, 0x87, 0x57, 0xdb, 0x85, 0x7f, 0xbf, + 0xda, 0x2e, 0x7c, 0xfb, 0x7a, 0x7b, 0xed, 0xfb, 0xd7, 0xdb, 0x6b, 0x3f, 0xbc, 0xde, 0x5e, 0xbb, + 0xac, 0xa8, 0x3f, 0x5c, 0x3e, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0x44, 0xbb, 0x6b, + 0xb0, 0x11, 0x00, 0x00, } diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index fa78c4aa875f..9051053d24f3 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -261,5 +261,5 @@ message ChownOpt { message UserOpt { string name = 1; int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // input that contains /etc/passwd if using a name - int32 id = 3; + uint32 id = 3; } \ No newline at end of file From a443cfff054dcf7be50d6c0ee8e7fabaf62e41fc Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 4 Feb 2019 17:36:01 -0800 Subject: [PATCH 04/25] fileop: resolve review comments Signed-off-by: Tonis Tiigi --- client/llb/fileop.go | 25 +- client/llb/fileop_test.go | 42 +-- solver/pb/ops.pb.go | 742 +++++++++++++++++++++++++++----------- solver/pb/ops.proto | 40 +- 4 files changed, 594 insertions(+), 255 deletions(-) diff --git a/client/llb/fileop.go b/client/llb/fileop.go index d41f9e4cb09b..074fb35edc56 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -237,9 +237,10 @@ func (up *UserOpt) marshal(base pb.InputIndex) *pb.UserOpt { return nil } if up.Name != "" { - return &pb.UserOpt{Name: up.Name, Input: base} + return &pb.UserOpt{User: &pb.UserOpt_ByName{ByName: &pb.NamedUserOpt{ + Name: up.Name, Input: base}}} } - return &pb.UserOpt{Id: uint32(up.UID), Input: -1} + return &pb.UserOpt{User: &pb.UserOpt_ByID{ByID: uint32(up.UID)}} } func Mkfile(p string, m os.FileMode, dt []byte, opts ...MkfileOption) *FileAction { @@ -415,16 +416,16 @@ type fileActionCopy struct { func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { c := &pb.FileActionCopy{ - Src: a.sourcePath(), - Dest: normalizePath(parent, a.dest), - Owner: a.info.ChownOpt.marshal(base), - AllowWildcard: a.info.AllowWildcard, - AllowEmptyWildcard: a.info.AllowEmptyWildcard, - FollowSymlink: a.info.FollowSymlinks, - DirCopyContents: a.info.CopyDirContentsOnly, - AttemptUnpack: a.info.AttemptUnpack, - CreateDestPath: a.info.CreateDestPath, - Timestamp: marshalTime(a.info.CreatedTime), + Src: a.sourcePath(), + Dest: normalizePath(parent, a.dest), + Owner: a.info.ChownOpt.marshal(base), + AllowWildcard: a.info.AllowWildcard, + AllowEmptyWildcard: a.info.AllowEmptyWildcard, + FollowSymlink: a.info.FollowSymlinks, + DirCopyContents: a.info.CopyDirContentsOnly, + AttemptUnpackDockerCompatibility: a.info.AttemptUnpack, + CreateDestPath: a.info.CreateDestPath, + Timestamp: marshalTime(a.info.CreatedTime), } if a.info.Mode != nil { c.Mode = int32(*a.info.Mode) diff --git a/client/llb/fileop_test.go b/client/llb/fileop_test.go index 76ae9b65f8e1..6576530437c5 100644 --- a/client/llb/fileop_test.go +++ b/client/llb/fileop_test.go @@ -478,18 +478,14 @@ func TestFileOwner(t *testing.T) { action = f.Actions[1] mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir - require.Equal(t, 123, int(mkdir.Owner.User.Id)) - require.Equal(t, "", mkdir.Owner.User.Name) - require.Equal(t, -1, int(mkdir.Owner.User.Input)) - require.Equal(t, 456, int(mkdir.Owner.Group.Id)) - require.Equal(t, "", mkdir.Owner.Group.Name) - require.Equal(t, -1, int(mkdir.Owner.Group.Input)) + require.Equal(t, 123, int(mkdir.Owner.User.User.(*pb.UserOpt_ByID).ByID)) + require.Equal(t, 456, int(mkdir.Owner.Group.User.(*pb.UserOpt_ByID).ByID)) action = f.Actions[2] mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir - require.Equal(t, 0, int(mkdir.Owner.User.Id)) - require.Equal(t, "foouser", mkdir.Owner.User.Name) - require.Equal(t, 0, int(mkdir.Owner.User.Input)) + + require.Equal(t, "foouser", mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name) + require.Equal(t, 0, int(mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input)) require.Nil(t, mkdir.Owner.Group) } @@ -524,44 +520,36 @@ func TestFileCopyOwner(t *testing.T) { action := f.Actions[0] mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir - require.Equal(t, 0, int(mkdir.Owner.User.Id)) - require.Equal(t, "user1", mkdir.Owner.User.Name) - require.Equal(t, -1, int(mkdir.Owner.User.Input)) + require.Equal(t, "user1", mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name) + require.Equal(t, -1, int(mkdir.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input)) require.Nil(t, mkdir.Owner.Group) action = f.Actions[1] copy := action.Action.(*pb.FileAction_Copy).Copy require.Equal(t, "/src1", copy.Src) - require.Equal(t, 0, int(copy.Owner.User.Id)) - require.Equal(t, "user2", copy.Owner.User.Name) - require.Equal(t, -1, int(copy.Owner.User.Input)) + require.Equal(t, "user2", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name) + require.Equal(t, -1, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input)) require.Nil(t, copy.Owner.Group) action = f.Actions[2] copy = action.Action.(*pb.FileAction_Copy).Copy require.Equal(t, "/src0", copy.Src) - require.Equal(t, 0, int(copy.Owner.User.Id)) - require.Equal(t, "user3", copy.Owner.User.Name) - require.Equal(t, 0, int(copy.Owner.User.Input)) + require.Equal(t, "user3", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name) + require.Equal(t, 0, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input)) require.Nil(t, copy.Owner.Group) action = f.Actions[3] copy = action.Action.(*pb.FileAction_Copy).Copy require.Equal(t, "/src2", copy.Src) - require.Equal(t, 0, int(copy.Owner.User.Id)) - require.Equal(t, "user4", copy.Owner.User.Name) - require.Equal(t, -1, int(copy.Owner.User.Input)) + require.Equal(t, "user4", copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Name) + require.Equal(t, -1, int(copy.Owner.User.User.(*pb.UserOpt_ByName).ByName.Input)) require.Nil(t, copy.Owner.Group) action = f.Actions[4] copy = action.Action.(*pb.FileAction_Copy).Copy require.Equal(t, "/src3", copy.Src) - require.Equal(t, 1, int(copy.Owner.User.Id)) - require.Equal(t, "", copy.Owner.User.Name) - require.Equal(t, -1, int(copy.Owner.User.Input)) - require.Equal(t, 2, int(copy.Owner.Group.Id)) - require.Equal(t, "", copy.Owner.Group.Name) - require.Equal(t, -1, int(copy.Owner.Group.Input)) + require.Equal(t, 1, int(copy.Owner.User.User.(*pb.UserOpt_ByID).ByID)) + require.Equal(t, 2, int(copy.Owner.Group.User.(*pb.UserOpt_ByID).ByID)) } func TestFileCreatedTime(t *testing.T) { diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index 617cd3893865..b8ee30029033 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -54,7 +54,7 @@ func (x NetMode) String() string { return proto.EnumName(NetMode_name, int32(x)) } func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{0} + return fileDescriptor_ops_7f9890b817ed58ee, []int{0} } // MountType defines a type of a mount from a supported set @@ -87,7 +87,7 @@ func (x MountType) String() string { return proto.EnumName(MountType_name, int32(x)) } func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{1} + return fileDescriptor_ops_7f9890b817ed58ee, []int{1} } // CacheSharingOpt defines different sharing modes for cache mount @@ -117,7 +117,7 @@ func (x CacheSharingOpt) String() string { return proto.EnumName(CacheSharingOpt_name, int32(x)) } func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{2} + return fileDescriptor_ops_7f9890b817ed58ee, []int{2} } // Op represents a vertex of the LLB DAG. @@ -138,7 +138,7 @@ func (m *Op) Reset() { *m = Op{} } func (m *Op) String() string { return proto.CompactTextString(m) } func (*Op) ProtoMessage() {} func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{0} + return fileDescriptor_ops_7f9890b817ed58ee, []int{0} } func (m *Op) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -368,7 +368,7 @@ func (m *Platform) Reset() { *m = Platform{} } func (m *Platform) String() string { return proto.CompactTextString(m) } func (*Platform) ProtoMessage() {} func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{1} + return fileDescriptor_ops_7f9890b817ed58ee, []int{1} } func (m *Platform) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{2} + return fileDescriptor_ops_7f9890b817ed58ee, []int{2} } func (m *Input) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *ExecOp) Reset() { *m = ExecOp{} } func (m *ExecOp) String() string { return proto.CompactTextString(m) } func (*ExecOp) ProtoMessage() {} func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{3} + return fileDescriptor_ops_7f9890b817ed58ee, []int{3} } func (m *ExecOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -538,7 +538,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{4} + return fileDescriptor_ops_7f9890b817ed58ee, []int{4} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +622,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{5} + return fileDescriptor_ops_7f9890b817ed58ee, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{6} + return fileDescriptor_ops_7f9890b817ed58ee, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +766,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{7} + return fileDescriptor_ops_7f9890b817ed58ee, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +845,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{8} + return fileDescriptor_ops_7f9890b817ed58ee, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -918,7 +918,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{9} + return fileDescriptor_ops_7f9890b817ed58ee, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -970,7 +970,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{10} + return fileDescriptor_ops_7f9890b817ed58ee, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1025,7 +1025,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{11} + return fileDescriptor_ops_7f9890b817ed58ee, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1066,7 +1066,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{12} + return fileDescriptor_ops_7f9890b817ed58ee, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1127,7 +1127,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{13} + return fileDescriptor_ops_7f9890b817ed58ee, []int{13} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1170,7 +1170,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{14} + return fileDescriptor_ops_7f9890b817ed58ee, []int{14} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,7 +1232,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{15} + return fileDescriptor_ops_7f9890b817ed58ee, []int{15} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1277,7 +1277,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{16} + return fileDescriptor_ops_7f9890b817ed58ee, []int{16} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,7 +1325,7 @@ func (m *HostIP) Reset() { *m = HostIP{} } func (m *HostIP) String() string { return proto.CompactTextString(m) } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{17} + return fileDescriptor_ops_7f9890b817ed58ee, []int{17} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1372,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{18} + return fileDescriptor_ops_7f9890b817ed58ee, []int{18} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1420,7 +1420,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{19} + return fileDescriptor_ops_7f9890b817ed58ee, []int{19} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1617,24 +1617,35 @@ func _FileAction_OneofSizer(msg proto.Message) (n int) { } type FileActionCopy struct { - Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` - Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` - FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` - DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` - AttemptUnpack bool `protobuf:"varint,8,opt,name=attemptUnpack,proto3" json:"attemptUnpack,omitempty"` - CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` - AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` - AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` - Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // src is the source path + Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` + // dest path + Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` + // optional owner override + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // optional permission bits override + Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + // followSymlink resolves symlinks in src + FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` + // dirCopyContents only copies contents if src is a directory + DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` + // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead + AttemptUnpackDockerCompatibility bool `protobuf:"varint,8,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` + // createDestPath creates dest path directories if needed + CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` + // allowWildcard allows filepath.Match wildcards in src path + AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files + AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{20} + return fileDescriptor_ops_7f9890b817ed58ee, []int{20} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1701,9 +1712,9 @@ func (m *FileActionCopy) GetDirCopyContents() bool { return false } -func (m *FileActionCopy) GetAttemptUnpack() bool { +func (m *FileActionCopy) GetAttemptUnpackDockerCompatibility() bool { if m != nil { - return m.AttemptUnpack + return m.AttemptUnpackDockerCompatibility } return false } @@ -1737,18 +1748,23 @@ func (m *FileActionCopy) GetTimestamp() int64 { } type FileActionMkFile struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // path for the new file + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // permission bits + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + // data is the new file contents + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + // optional owner for the new file + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{21} + return fileDescriptor_ops_7f9890b817ed58ee, []int{21} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1809,18 +1825,23 @@ func (m *FileActionMkFile) GetTimestamp() int64 { } type FileActionMkDir struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` - MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` - Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // path for the new directory + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // permission bits + Mode int32 `protobuf:"varint,2,opt,name=mode,proto3" json:"mode,omitempty"` + // makeParents creates parent directories as well if needed + MakeParents bool `protobuf:"varint,3,opt,name=makeParents,proto3" json:"makeParents,omitempty"` + // optional owner for the new directory + Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + // optional created time override + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{22} + return fileDescriptor_ops_7f9890b817ed58ee, []int{22} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1881,16 +1902,19 @@ func (m *FileActionMkDir) GetTimestamp() int64 { } type FileActionRm struct { - Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` - AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` - AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + // path to remove + Path string `protobuf:"bytes,1,opt,name=path,proto3" json:"path,omitempty"` + // allowNotFound doesn't fail the rm if file is not found + AllowNotFound bool `protobuf:"varint,2,opt,name=allowNotFound,proto3" json:"allowNotFound,omitempty"` + // allowWildcard allows filepath.Match wildcards in path + AllowWildcard bool `protobuf:"varint,3,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` } func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{23} + return fileDescriptor_ops_7f9890b817ed58ee, []int{23} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1945,7 +1969,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{24} + return fileDescriptor_ops_7f9890b817ed58ee, []int{24} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1985,16 +2009,17 @@ func (m *ChownOpt) GetGroup() *UserOpt { } type UserOpt struct { - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` - Id uint32 `protobuf:"varint,3,opt,name=id,proto3" json:"id,omitempty"` + // Types that are valid to be assigned to User: + // *UserOpt_ByName + // *UserOpt_ByID + User isUserOpt_User `protobuf_oneof:"user"` } func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_02f745fcf5a0d290, []int{25} + return fileDescriptor_ops_7f9890b817ed58ee, []int{25} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2019,20 +2044,153 @@ func (m *UserOpt) XXX_DiscardUnknown() { var xxx_messageInfo_UserOpt proto.InternalMessageInfo -func (m *UserOpt) GetName() string { +type isUserOpt_User interface { + isUserOpt_User() + MarshalTo([]byte) (int, error) + Size() int +} + +type UserOpt_ByName struct { + ByName *NamedUserOpt `protobuf:"bytes,1,opt,name=byName,proto3,oneof"` +} +type UserOpt_ByID struct { + ByID uint32 `protobuf:"varint,2,opt,name=byID,proto3,oneof"` +} + +func (*UserOpt_ByName) isUserOpt_User() {} +func (*UserOpt_ByID) isUserOpt_User() {} + +func (m *UserOpt) GetUser() isUserOpt_User { if m != nil { - return m.Name + return m.User } - return "" + return nil } -func (m *UserOpt) GetId() uint32 { - if m != nil { - return m.Id +func (m *UserOpt) GetByName() *NamedUserOpt { + if x, ok := m.GetUser().(*UserOpt_ByName); ok { + return x.ByName + } + return nil +} + +func (m *UserOpt) GetByID() uint32 { + if x, ok := m.GetUser().(*UserOpt_ByID); ok { + return x.ByID } return 0 } +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UserOpt) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UserOpt_OneofMarshaler, _UserOpt_OneofUnmarshaler, _UserOpt_OneofSizer, []interface{}{ + (*UserOpt_ByName)(nil), + (*UserOpt_ByID)(nil), + } +} + +func _UserOpt_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UserOpt) + // user + switch x := m.User.(type) { + case *UserOpt_ByName: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ByName); err != nil { + return err + } + case *UserOpt_ByID: + _ = b.EncodeVarint(2<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.ByID)) + case nil: + default: + return fmt.Errorf("UserOpt.User has unexpected type %T", x) + } + return nil +} + +func _UserOpt_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UserOpt) + switch tag { + case 1: // user.byName + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NamedUserOpt) + err := b.DecodeMessage(msg) + m.User = &UserOpt_ByName{msg} + return true, err + case 2: // user.byID + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.User = &UserOpt_ByID{uint32(x)} + return true, err + default: + return false, nil + } +} + +func _UserOpt_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UserOpt) + // user + switch x := m.User.(type) { + case *UserOpt_ByName: + s := proto.Size(x.ByName) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *UserOpt_ByID: + n += 1 // tag and wire + n += proto.SizeVarint(uint64(x.ByID)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type NamedUserOpt struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Input InputIndex `protobuf:"varint,2,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } +func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } +func (*NamedUserOpt) ProtoMessage() {} +func (*NamedUserOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_ops_7f9890b817ed58ee, []int{26} +} +func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamedUserOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (dst *NamedUserOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamedUserOpt.Merge(dst, src) +} +func (m *NamedUserOpt) XXX_Size() int { + return m.Size() +} +func (m *NamedUserOpt) XXX_DiscardUnknown() { + xxx_messageInfo_NamedUserOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_NamedUserOpt proto.InternalMessageInfo + +func (m *NamedUserOpt) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func init() { proto.RegisterType((*Op)(nil), "pb.Op") proto.RegisterType((*Platform)(nil), "pb.Platform") @@ -2066,6 +2224,7 @@ func init() { proto.RegisterType((*FileActionRm)(nil), "pb.FileActionRm") proto.RegisterType((*ChownOpt)(nil), "pb.ChownOpt") proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") + proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt") proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) proto.RegisterEnum("pb.MountType", MountType_name, MountType_value) proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value) @@ -3235,10 +3394,10 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { } i++ } - if m.AttemptUnpack { + if m.AttemptUnpackDockerCompatibility { dAtA[i] = 0x40 i++ - if m.AttemptUnpack { + if m.AttemptUnpackDockerCompatibility { dAtA[i] = 1 } else { dAtA[i] = 0 @@ -3480,6 +3639,52 @@ func (m *UserOpt) Marshal() (dAtA []byte, err error) { } func (m *UserOpt) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.User != nil { + nn27, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn27 + } + return i, nil +} + +func (m *UserOpt_ByName) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ByName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ByName.Size())) + n28, err := m.ByName.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil +} +func (m *UserOpt_ByID) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x10 + i++ + i = encodeVarintOps(dAtA, i, uint64(m.ByID)) + return i, nil +} +func (m *NamedUserOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamedUserOpt) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -3495,11 +3700,6 @@ func (m *UserOpt) MarshalTo(dAtA []byte) (int, error) { i++ i = encodeVarintOps(dAtA, i, uint64(m.Input)) } - if m.Id != 0 { - dAtA[i] = 0x18 - i++ - i = encodeVarintOps(dAtA, i, uint64(m.Id)) - } return i, nil } @@ -4107,7 +4307,7 @@ func (m *FileActionCopy) Size() (n int) { if m.DirCopyContents { n += 2 } - if m.AttemptUnpack { + if m.AttemptUnpackDockerCompatibility { n += 2 } if m.CreateDestPath { @@ -4215,6 +4415,39 @@ func (m *ChownOpt) Size() (n int) { } func (m *UserOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.User != nil { + n += m.User.Size() + } + return n +} + +func (m *UserOpt_ByName) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ByName != nil { + l = m.ByName.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *UserOpt_ByID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovOps(uint64(m.ByID)) + return n +} +func (m *NamedUserOpt) Size() (n int) { if m == nil { return 0 } @@ -4227,9 +4460,6 @@ func (m *UserOpt) Size() (n int) { if m.Input != 0 { n += 1 + sovOps(uint64(m.Input)) } - if m.Id != 0 { - n += 1 + sovOps(uint64(m.Id)) - } return n } @@ -7932,7 +8162,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { m.DirCopyContents = bool(v != 0) case 8: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpack", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpackDockerCompatibility", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -7949,7 +8179,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { break } } - m.AttemptUnpack = bool(v != 0) + m.AttemptUnpackDockerCompatibility = bool(v != 0) case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CreateDestPath", wireType) @@ -8667,9 +8897,9 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error { switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByName", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -8679,26 +8909,29 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthOps } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + v := &NamedUserOpt{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.User = &UserOpt_ByName{v} iNdEx = postIndex case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ByID", wireType) } - m.Input = 0 + var v uint32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -8708,16 +8941,96 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Input |= (InputIndex(b) & 0x7F) << shift + v |= (uint32(b) & 0x7F) << shift if b < 0x80 { break } } - case 3: + m.User = &UserOpt_ByID{v} + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamedUserOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamedUserOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamedUserOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) } - m.Id = 0 + m.Input = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowOps @@ -8727,7 +9040,7 @@ func (m *UserOpt) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Id |= (uint32(b) & 0x7F) << shift + m.Input |= (InputIndex(b) & 0x7F) << shift if b < 0x80 { break } @@ -8858,126 +9171,129 @@ var ( ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_02f745fcf5a0d290) } - -var fileDescriptor_ops_02f745fcf5a0d290 = []byte{ - // 1876 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0x17, 0x97, 0x7f, 0xf7, 0x51, 0x92, 0xd9, 0x89, 0xe3, 0xb2, 0xaa, 0x2b, 0x29, 0x9b, 0x34, - 0x60, 0x24, 0x9b, 0x02, 0x14, 0x20, 0x09, 0x72, 0x28, 0x2a, 0x91, 0x34, 0xc4, 0xa6, 0x12, 0x85, - 0xa1, 0x6c, 0x1f, 0x8d, 0xd5, 0xee, 0x90, 0x5a, 0x90, 0xbb, 0xb3, 0x98, 0x1d, 0x5a, 0xe2, 0xa5, - 0x28, 0xfc, 0x09, 0x02, 0x14, 0xe8, 0xad, 0x87, 0x1e, 0xfb, 0x21, 0x7a, 0xcf, 0x31, 0x28, 0x7a, - 0x48, 0x7b, 0x70, 0x0b, 0xfb, 0x8b, 0x14, 0x6f, 0x66, 0x96, 0xbb, 0xa4, 0x55, 0xd8, 0x46, 0x8b, - 0x9c, 0x38, 0xf3, 0x7b, 0xbf, 0x79, 0xf3, 0xe6, 0xbd, 0x37, 0xf3, 0xde, 0x12, 0x6c, 0x1e, 0x27, - 0xed, 0x58, 0x70, 0xc9, 0x89, 0x15, 0x5f, 0x6e, 0x3d, 0x1c, 0x07, 0xf2, 0x6a, 0x76, 0xd9, 0xf6, - 0x78, 0x78, 0x30, 0xe6, 0x63, 0x7e, 0xa0, 0x44, 0x97, 0xb3, 0x91, 0x9a, 0xa9, 0x89, 0x1a, 0xe9, - 0x25, 0xce, 0x9f, 0x2d, 0xb0, 0x06, 0x31, 0xf9, 0x08, 0x2a, 0x41, 0x14, 0xcf, 0x64, 0xd2, 0x2c, - 0xec, 0x16, 0x5b, 0xf5, 0x43, 0xbb, 0x1d, 0x5f, 0xb6, 0xfb, 0x88, 0x50, 0x23, 0x20, 0xbb, 0x50, - 0x62, 0x37, 0xcc, 0x6b, 0x5a, 0xbb, 0x85, 0x56, 0xfd, 0x10, 0x90, 0xd0, 0xbb, 0x61, 0xde, 0x20, - 0x3e, 0x59, 0xa3, 0x4a, 0x42, 0x3e, 0x85, 0x4a, 0xc2, 0x67, 0xc2, 0x63, 0xcd, 0xa2, 0xe2, 0xac, - 0x23, 0x67, 0xa8, 0x10, 0xc5, 0x32, 0x52, 0xd4, 0x34, 0x0a, 0xa6, 0xac, 0x59, 0xca, 0x34, 0x3d, - 0x0a, 0xa6, 0x9a, 0xa3, 0x24, 0xe4, 0x63, 0x28, 0x5f, 0xce, 0x82, 0xa9, 0xdf, 0x2c, 0x2b, 0x4a, - 0x1d, 0x29, 0xc7, 0x08, 0x28, 0x8e, 0x96, 0x91, 0x16, 0xd4, 0xe2, 0xa9, 0x2b, 0x47, 0x5c, 0x84, - 0x4d, 0xc8, 0x36, 0x3c, 0x37, 0x18, 0x5d, 0x48, 0xc9, 0x97, 0x50, 0xf7, 0x78, 0x94, 0x48, 0xe1, - 0x06, 0x91, 0x4c, 0x9a, 0x75, 0x45, 0xfe, 0x10, 0xc9, 0x4f, 0xb9, 0x98, 0x30, 0xd1, 0xc9, 0x84, - 0x34, 0xcf, 0x3c, 0x2e, 0x81, 0xc5, 0x63, 0xe7, 0x8f, 0x05, 0xa8, 0xa5, 0x5a, 0x89, 0x03, 0xeb, - 0x47, 0xc2, 0xbb, 0x0a, 0x24, 0xf3, 0xe4, 0x4c, 0xb0, 0x66, 0x61, 0xb7, 0xd0, 0xb2, 0xe9, 0x12, - 0x46, 0x36, 0xc1, 0x1a, 0x0c, 0x95, 0xa3, 0x6c, 0x6a, 0x0d, 0x86, 0xa4, 0x09, 0xd5, 0x27, 0xae, - 0x08, 0xdc, 0x48, 0x2a, 0xcf, 0xd8, 0x34, 0x9d, 0x92, 0xfb, 0x60, 0x0f, 0x86, 0x4f, 0x98, 0x48, - 0x02, 0x1e, 0x29, 0x7f, 0xd8, 0x34, 0x03, 0xc8, 0x36, 0xc0, 0x60, 0xf8, 0x88, 0xb9, 0xa8, 0x34, - 0x69, 0x96, 0x77, 0x8b, 0x2d, 0x9b, 0xe6, 0x10, 0xe7, 0x77, 0x50, 0x56, 0x31, 0x22, 0xbf, 0x81, - 0x8a, 0x1f, 0x8c, 0x59, 0x22, 0xb5, 0x39, 0xc7, 0x87, 0xdf, 0xbd, 0xdc, 0x59, 0xfb, 0xe7, 0xcb, - 0x9d, 0xbd, 0x5c, 0x32, 0xf0, 0x98, 0x45, 0x1e, 0x8f, 0xa4, 0x1b, 0x44, 0x4c, 0x24, 0x07, 0x63, - 0xfe, 0x50, 0x2f, 0x69, 0x77, 0xd5, 0x0f, 0x35, 0x1a, 0xc8, 0x67, 0x50, 0x0e, 0x22, 0x9f, 0xdd, - 0x28, 0xfb, 0x8b, 0xc7, 0x1f, 0x18, 0x55, 0xf5, 0xc1, 0x4c, 0xc6, 0x33, 0xd9, 0x47, 0x11, 0xd5, - 0x0c, 0x27, 0x86, 0x8a, 0x4e, 0x01, 0x72, 0x1f, 0x4a, 0x21, 0x93, 0xae, 0xda, 0xbe, 0x7e, 0x58, - 0x43, 0xd7, 0x9e, 0x32, 0xe9, 0x52, 0x85, 0x62, 0x76, 0x85, 0x7c, 0x86, 0xae, 0xb7, 0xb2, 0xec, - 0x3a, 0x45, 0x84, 0x1a, 0x01, 0xf9, 0x25, 0x54, 0x23, 0x26, 0xaf, 0xb9, 0x98, 0x28, 0x17, 0x6d, - 0xea, 0x98, 0x9f, 0x31, 0x79, 0xca, 0x7d, 0x46, 0x53, 0x99, 0xf3, 0x97, 0x02, 0x94, 0x50, 0x31, - 0x21, 0x50, 0x72, 0xc5, 0x58, 0xa7, 0xab, 0x4d, 0xd5, 0x98, 0x34, 0xa0, 0xc8, 0xa2, 0xe7, 0x6a, - 0x0f, 0x9b, 0xe2, 0x10, 0x11, 0xef, 0xda, 0x37, 0x4e, 0xc7, 0x21, 0xae, 0x9b, 0x25, 0x4c, 0x18, - 0x5f, 0xab, 0x31, 0xf9, 0x0c, 0xec, 0x58, 0xf0, 0x9b, 0xf9, 0x33, 0x5c, 0x5d, 0xce, 0x65, 0x12, - 0x82, 0xbd, 0xe8, 0x39, 0xad, 0xc5, 0x66, 0x44, 0xf6, 0x00, 0xd8, 0x8d, 0x14, 0xee, 0x09, 0x4f, - 0x64, 0xd2, 0xac, 0xa8, 0xd3, 0xa8, 0x04, 0x46, 0xa0, 0x7f, 0x4e, 0x73, 0x52, 0xe7, 0x6f, 0x16, - 0x94, 0xd5, 0x21, 0x49, 0x0b, 0x5d, 0x1a, 0xcf, 0x74, 0x74, 0x8a, 0xc7, 0xc4, 0xb8, 0x14, 0x54, - 0xf0, 0x16, 0x1e, 0xc5, 0x40, 0x6e, 0x41, 0x2d, 0x61, 0x53, 0xe6, 0x49, 0x2e, 0x4c, 0xfe, 0x2c, - 0xe6, 0x68, 0xba, 0x8f, 0x21, 0xd6, 0xa7, 0x51, 0x63, 0xb2, 0x0f, 0x15, 0xae, 0xe2, 0xa2, 0x0e, - 0xf4, 0x5f, 0xa2, 0x65, 0x28, 0xa8, 0x5c, 0x30, 0xd7, 0xe7, 0xd1, 0x74, 0xae, 0x8e, 0x59, 0xa3, - 0x8b, 0x39, 0xd9, 0x07, 0x5b, 0x45, 0xe2, 0x62, 0x1e, 0xb3, 0x66, 0x45, 0x45, 0x60, 0x63, 0x11, - 0x25, 0x04, 0x69, 0x26, 0xc7, 0x9b, 0xe7, 0xb9, 0xde, 0x15, 0x1b, 0xc4, 0xb2, 0x79, 0x37, 0xf3, - 0x57, 0xc7, 0x60, 0x74, 0x21, 0x45, 0xb5, 0x09, 0xf3, 0x04, 0x93, 0x48, 0xfd, 0x50, 0x51, 0x95, - 0xda, 0x61, 0x0a, 0xd2, 0x4c, 0x4e, 0x1c, 0xa8, 0x0c, 0x87, 0x27, 0xc8, 0xbc, 0x97, 0xbd, 0x0c, - 0x1a, 0xa1, 0x46, 0xe2, 0xf4, 0xa1, 0x96, 0x6e, 0x83, 0xd7, 0xac, 0xdf, 0x35, 0x17, 0xd0, 0xea, - 0x77, 0xc9, 0x43, 0xa8, 0x26, 0x57, 0xae, 0x08, 0xa2, 0xb1, 0xf2, 0xdd, 0xe6, 0xe1, 0x07, 0x0b, - 0xab, 0x86, 0x1a, 0x47, 0x4d, 0x29, 0xc7, 0xe1, 0x60, 0x2f, 0xcc, 0x78, 0x43, 0x57, 0x03, 0x8a, - 0xb3, 0xc0, 0x57, 0x7a, 0x36, 0x28, 0x0e, 0x11, 0x19, 0x07, 0x3a, 0x97, 0x36, 0x28, 0x0e, 0x31, - 0x20, 0x21, 0xf7, 0xf5, 0x3b, 0xb6, 0x41, 0xd5, 0x18, 0x7d, 0xcc, 0x63, 0x19, 0xf0, 0xc8, 0x9d, - 0xa6, 0x3e, 0x4e, 0xe7, 0xce, 0x34, 0x3d, 0xdf, 0x8f, 0xb2, 0xdb, 0x1f, 0x0a, 0x50, 0x4b, 0x1f, - 0x5f, 0x7c, 0x49, 0x02, 0x9f, 0x45, 0x32, 0x18, 0x05, 0x4c, 0x98, 0x8d, 0x73, 0x08, 0x79, 0x08, - 0x65, 0x57, 0x4a, 0x91, 0x5e, 0xd0, 0x9f, 0xe6, 0x5f, 0xee, 0xf6, 0x11, 0x4a, 0x7a, 0x91, 0x14, - 0x73, 0xaa, 0x59, 0x5b, 0x5f, 0x01, 0x64, 0x20, 0xda, 0x3a, 0x61, 0x73, 0xa3, 0x15, 0x87, 0xe4, - 0x2e, 0x94, 0x9f, 0xbb, 0xd3, 0x19, 0x33, 0x39, 0xac, 0x27, 0x5f, 0x5b, 0x5f, 0x15, 0x9c, 0xbf, - 0x5a, 0x50, 0x35, 0x2f, 0x39, 0x79, 0x00, 0x55, 0xf5, 0x92, 0x1b, 0x8b, 0x6e, 0xbf, 0x18, 0x29, - 0x85, 0x1c, 0x2c, 0x4a, 0x54, 0xce, 0x46, 0xa3, 0x4a, 0x97, 0x2a, 0x63, 0x63, 0x56, 0xb0, 0x8a, - 0x3e, 0x1b, 0x99, 0x5a, 0xb4, 0x89, 0xec, 0x2e, 0x1b, 0x05, 0x51, 0x80, 0xfe, 0xa1, 0x28, 0x22, - 0x0f, 0xd2, 0x53, 0x97, 0x94, 0xc6, 0x7b, 0x79, 0x8d, 0x6f, 0x1e, 0xba, 0x0f, 0xf5, 0xdc, 0x36, - 0xb7, 0x9c, 0xfa, 0x93, 0xfc, 0xa9, 0xcd, 0x96, 0x4a, 0x9d, 0x2e, 0xa4, 0x99, 0x17, 0xfe, 0x07, - 0xff, 0x7d, 0x01, 0x90, 0xa9, 0x7c, 0xf7, 0x87, 0xc5, 0x79, 0x51, 0x04, 0x18, 0xc4, 0xf8, 0x74, - 0xfa, 0xae, 0x7a, 0x91, 0xd7, 0x83, 0x71, 0xc4, 0x05, 0x7b, 0xa6, 0xae, 0xaa, 0x5a, 0x5f, 0xa3, - 0x75, 0x8d, 0xa9, 0x1b, 0x43, 0x8e, 0xa0, 0xee, 0xb3, 0xc4, 0x13, 0x81, 0x4a, 0x28, 0xe3, 0xf4, - 0x1d, 0x3c, 0x53, 0xa6, 0xa7, 0xdd, 0xcd, 0x18, 0xda, 0x57, 0xf9, 0x35, 0xe4, 0x10, 0xd6, 0xd9, - 0x4d, 0xcc, 0x85, 0x34, 0xbb, 0xe8, 0x82, 0x7f, 0x47, 0xb7, 0x0e, 0x88, 0xab, 0x9d, 0x68, 0x9d, - 0x65, 0x13, 0xe2, 0x42, 0xc9, 0x73, 0x63, 0x5d, 0xed, 0xea, 0x87, 0xcd, 0x95, 0xfd, 0x3a, 0x6e, - 0xac, 0x9d, 0x76, 0xfc, 0x39, 0x9e, 0xf5, 0xc5, 0xbf, 0x76, 0xf6, 0x73, 0x25, 0x2e, 0xe4, 0x97, - 0xf3, 0x03, 0x95, 0x2f, 0x93, 0x40, 0x1e, 0xcc, 0x64, 0x30, 0x3d, 0x70, 0xe3, 0x00, 0xd5, 0xe1, - 0xc2, 0x7e, 0x97, 0x2a, 0xd5, 0x5b, 0xbf, 0x82, 0xc6, 0xaa, 0xdd, 0xef, 0x13, 0x83, 0xad, 0x2f, - 0xc1, 0x5e, 0xd8, 0xf1, 0xb6, 0x85, 0xb5, 0x7c, 0xf0, 0x3e, 0x86, 0x7a, 0xee, 0xdc, 0x48, 0x7c, - 0xa2, 0x88, 0xda, 0xfb, 0x7a, 0xe2, 0xbc, 0xc0, 0x6e, 0x23, 0xad, 0x37, 0xbf, 0x00, 0xb8, 0x92, - 0x32, 0x7e, 0xa6, 0x0a, 0x90, 0xd9, 0xc4, 0x46, 0x44, 0x31, 0xc8, 0x0e, 0xd4, 0x71, 0x92, 0x18, - 0xb9, 0xb6, 0x54, 0xad, 0x48, 0x34, 0xe1, 0xe7, 0x60, 0x8f, 0x16, 0xcb, 0x75, 0xe1, 0xa8, 0x8d, - 0xd2, 0xd5, 0x3f, 0x83, 0x5a, 0xc4, 0x8d, 0x4c, 0xd7, 0xc3, 0x6a, 0xc4, 0x95, 0xc8, 0xd9, 0x87, - 0x9f, 0xbc, 0xd1, 0x1a, 0x91, 0x7b, 0x50, 0x19, 0x05, 0x53, 0xa9, 0xae, 0x2b, 0x96, 0x58, 0x33, - 0x73, 0xfe, 0x51, 0x00, 0xc8, 0xae, 0x16, 0x7a, 0x04, 0xef, 0x1d, 0x72, 0xd6, 0xf5, 0x3d, 0x9b, - 0x42, 0x2d, 0x34, 0x11, 0x34, 0x79, 0x74, 0x7f, 0xf9, 0x3a, 0xb6, 0xd3, 0x00, 0xeb, 0xd8, 0x1e, - 0x9a, 0xd8, 0xbe, 0x4f, 0xfb, 0xb2, 0xd8, 0x61, 0xeb, 0x1b, 0xd8, 0x58, 0x52, 0xf7, 0x8e, 0x37, - 0x35, 0xcb, 0xb2, 0x7c, 0xc8, 0x1e, 0x40, 0x45, 0x97, 0x76, 0x7c, 0x7f, 0x71, 0x64, 0xd4, 0xa8, - 0xb1, 0x7a, 0xc7, 0xcf, 0xd3, 0x46, 0xaf, 0x7f, 0xee, 0x1c, 0x42, 0x45, 0x77, 0xb2, 0xa4, 0x05, - 0x55, 0xd7, 0xc3, 0xa3, 0xa5, 0xcf, 0xd5, 0x66, 0xda, 0xe6, 0x1e, 0x29, 0x98, 0xa6, 0x62, 0xe7, - 0xef, 0x16, 0x40, 0x86, 0xbf, 0x47, 0xaf, 0xf0, 0x35, 0x6c, 0x26, 0xcc, 0xe3, 0x91, 0xef, 0x8a, - 0xb9, 0x92, 0x9a, 0x8e, 0xed, 0xb6, 0x25, 0x2b, 0xcc, 0x5c, 0xdf, 0x50, 0x7c, 0x7b, 0xdf, 0xd0, - 0x82, 0x92, 0xc7, 0xe3, 0xb9, 0xb9, 0xbe, 0x64, 0xf9, 0x20, 0x1d, 0x1e, 0xcf, 0xb1, 0x6f, 0x47, - 0x06, 0x69, 0x43, 0x25, 0x9c, 0xa8, 0xde, 0x5e, 0xb7, 0x51, 0x77, 0x97, 0xb9, 0xa7, 0x13, 0x1c, - 0xe3, 0x97, 0x80, 0x66, 0x91, 0x7d, 0x28, 0x87, 0x13, 0x3f, 0x10, 0xaa, 0xe3, 0xa8, 0xeb, 0x7a, - 0x9d, 0xa7, 0x77, 0x03, 0x81, 0xfd, 0xbe, 0xe2, 0x10, 0x07, 0x2c, 0x11, 0x36, 0xab, 0x8a, 0xd9, - 0x58, 0xf1, 0x66, 0x78, 0xb2, 0x46, 0x2d, 0x11, 0x1e, 0xd7, 0xa0, 0xa2, 0xfd, 0xea, 0xfc, 0xbe, - 0x08, 0x9b, 0xcb, 0x56, 0x62, 0x1e, 0x24, 0xc2, 0x4b, 0xf3, 0x20, 0x11, 0xde, 0xa2, 0xa5, 0xb2, - 0x72, 0x2d, 0x95, 0x03, 0x65, 0x7e, 0x1d, 0x99, 0x16, 0x31, 0xed, 0x6c, 0xae, 0xf8, 0x75, 0x84, - 0xcd, 0x83, 0x16, 0x2d, 0x6a, 0x31, 0x9e, 0xb2, 0x6c, 0x6a, 0xf1, 0x27, 0xb0, 0x31, 0xe2, 0xd3, - 0x29, 0xbf, 0x1e, 0xce, 0xc3, 0x69, 0x10, 0x4d, 0xd4, 0x99, 0x6a, 0x74, 0x19, 0x24, 0x2d, 0xb8, - 0xe3, 0x07, 0x02, 0xcd, 0xe9, 0xf0, 0x48, 0x32, 0xec, 0x89, 0xab, 0x8a, 0xb7, 0x0a, 0xa3, 0x3e, - 0x57, 0x4a, 0x16, 0xc6, 0xf2, 0x71, 0x14, 0xbb, 0xde, 0xa4, 0x59, 0xd3, 0xfa, 0x96, 0x40, 0xf2, - 0x29, 0x6c, 0x7a, 0x82, 0xb9, 0x92, 0x75, 0x59, 0x22, 0xcf, 0x5d, 0x79, 0xd5, 0xb4, 0x15, 0x6d, - 0x05, 0x55, 0xda, 0xd0, 0x8e, 0xa7, 0xc1, 0xd4, 0xf7, 0x5c, 0xe1, 0xab, 0x2f, 0x26, 0xd4, 0x96, - 0x07, 0x49, 0x1b, 0x88, 0x02, 0x7a, 0x61, 0x2c, 0xe7, 0x0b, 0x6a, 0x5d, 0x51, 0x6f, 0x91, 0xe0, - 0xe7, 0x8b, 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0x9b, 0xeb, 0x98, 0x49, 0x34, 0x03, 0x9c, 0x6f, - 0x0b, 0xd0, 0x58, 0x0d, 0x3e, 0xba, 0x2e, 0x46, 0x33, 0xcd, 0x35, 0xc2, 0xf1, 0xc2, 0x9d, 0x56, - 0xce, 0x9d, 0x18, 0x1a, 0x7c, 0x2f, 0x30, 0x3f, 0xd7, 0xa9, 0x1a, 0xbf, 0x53, 0x68, 0x96, 0x4c, - 0x2a, 0xaf, 0x9a, 0xf4, 0xa7, 0x02, 0xdc, 0x59, 0x49, 0xb0, 0x77, 0xb6, 0x68, 0x17, 0xea, 0xa1, - 0x3b, 0x61, 0xe7, 0xae, 0x50, 0x61, 0x2b, 0xea, 0x92, 0x99, 0x83, 0xfe, 0x0f, 0xf6, 0x45, 0xb0, - 0x9e, 0xcf, 0xea, 0x5b, 0x6d, 0x4b, 0x43, 0x79, 0xc6, 0xe5, 0x23, 0x3e, 0x8b, 0x7c, 0x53, 0x67, - 0x96, 0xc1, 0x37, 0x03, 0x5e, 0xbc, 0x25, 0xe0, 0xce, 0x19, 0xd4, 0x52, 0x03, 0xc9, 0x8e, 0xf9, - 0x34, 0x2a, 0x64, 0xdf, 0xdc, 0x8f, 0x13, 0x26, 0xd0, 0x76, 0xfd, 0x9d, 0xf4, 0x11, 0x94, 0xc7, - 0x82, 0xcf, 0x62, 0xf3, 0x6a, 0x2e, 0x31, 0xb4, 0xc4, 0x79, 0x0a, 0x55, 0x83, 0xa0, 0xe9, 0x91, - 0x1b, 0xa6, 0x1f, 0xc8, 0x6a, 0x9c, 0x3d, 0x6e, 0xd6, 0xdb, 0x1e, 0xb7, 0x4d, 0xb0, 0x16, 0xed, - 0xaf, 0x15, 0xf8, 0x7b, 0x2d, 0xa8, 0x9a, 0x8f, 0x41, 0x62, 0x43, 0xf9, 0xf1, 0xd9, 0xb0, 0x77, - 0xd1, 0x58, 0x23, 0x35, 0x28, 0x9d, 0x0c, 0x86, 0x17, 0x8d, 0x02, 0x8e, 0xce, 0x06, 0x67, 0xbd, - 0x86, 0xb5, 0xf7, 0x6b, 0xb0, 0x17, 0x1f, 0x2d, 0x08, 0x1f, 0xf7, 0xcf, 0xba, 0x8d, 0x35, 0x02, - 0x50, 0x19, 0xf6, 0x3a, 0xb4, 0x87, 0xe4, 0x2a, 0x14, 0x87, 0xc3, 0x93, 0x86, 0x85, 0xaa, 0x3a, - 0x47, 0x9d, 0x93, 0x5e, 0xa3, 0x88, 0xc3, 0x8b, 0xd3, 0xf3, 0x47, 0xc3, 0x46, 0x69, 0xef, 0x0b, - 0xb8, 0xb3, 0xf2, 0xd1, 0xa0, 0x56, 0x9f, 0x1c, 0xd1, 0x1e, 0x6a, 0xaa, 0x43, 0xf5, 0x9c, 0xf6, - 0x9f, 0x1c, 0x5d, 0xf4, 0x1a, 0x05, 0x14, 0xfc, 0x76, 0xd0, 0xf9, 0xa6, 0xd7, 0x6d, 0x58, 0xc7, - 0xf7, 0xbf, 0x7b, 0xb5, 0x5d, 0xf8, 0xfe, 0xd5, 0x76, 0xe1, 0x87, 0x57, 0xdb, 0x85, 0x7f, 0xbf, - 0xda, 0x2e, 0x7c, 0xfb, 0x7a, 0x7b, 0xed, 0xfb, 0xd7, 0xdb, 0x6b, 0x3f, 0xbc, 0xde, 0x5e, 0xbb, - 0xac, 0xa8, 0x3f, 0x5c, 0x3e, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x14, 0x44, 0xbb, 0x6b, - 0xb0, 0x11, 0x00, 0x00, +func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_7f9890b817ed58ee) } + +var fileDescriptor_ops_7f9890b817ed58ee = []byte{ + // 1927 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcf, 0x6e, 0x23, 0xc7, + 0xd1, 0x17, 0x87, 0x7f, 0xa7, 0x28, 0x69, 0xf9, 0xb5, 0xd7, 0xfb, 0x31, 0xca, 0x46, 0x92, 0xc7, + 0x8e, 0x41, 0x6b, 0x77, 0x29, 0x40, 0x06, 0x6c, 0xc3, 0x87, 0x20, 0xa2, 0xc8, 0x85, 0x68, 0x5b, + 0xa2, 0xd0, 0xd4, 0xae, 0x8f, 0x8b, 0xe1, 0x4c, 0x93, 0x1a, 0x90, 0x33, 0x3d, 0xe8, 0x69, 0xae, + 0xc4, 0x4b, 0x0e, 0xfb, 0x04, 0x06, 0x02, 0xe4, 0x96, 0x43, 0x2e, 0x01, 0xf2, 0x10, 0xb9, 0xfb, + 0x68, 0x04, 0x39, 0x38, 0x39, 0x38, 0xc1, 0xee, 0x8b, 0x04, 0xd5, 0xdd, 0xc3, 0x19, 0x72, 0x15, + 0xec, 0x2e, 0x12, 0xe4, 0xc4, 0xea, 0xaa, 0x5f, 0x57, 0x57, 0x57, 0x55, 0x57, 0xd5, 0x10, 0x6c, + 0x1e, 0x27, 0xed, 0x58, 0x70, 0xc9, 0x89, 0x15, 0x8f, 0x76, 0x1e, 0x4d, 0x02, 0x79, 0x35, 0x1f, + 0xb5, 0x3d, 0x1e, 0x1e, 0x4e, 0xf8, 0x84, 0x1f, 0x2a, 0xd1, 0x68, 0x3e, 0x56, 0x2b, 0xb5, 0x50, + 0x94, 0xde, 0xe2, 0xfc, 0xc1, 0x02, 0x6b, 0x10, 0x93, 0x0f, 0xa0, 0x12, 0x44, 0xf1, 0x5c, 0x26, + 0xcd, 0xc2, 0x7e, 0xb1, 0x55, 0x3f, 0xb2, 0xdb, 0xf1, 0xa8, 0xdd, 0x47, 0x0e, 0x35, 0x02, 0xb2, + 0x0f, 0x25, 0x76, 0xc3, 0xbc, 0xa6, 0xb5, 0x5f, 0x68, 0xd5, 0x8f, 0x00, 0x01, 0xbd, 0x1b, 0xe6, + 0x0d, 0xe2, 0xd3, 0x0d, 0xaa, 0x24, 0xe4, 0x63, 0xa8, 0x24, 0x7c, 0x2e, 0x3c, 0xd6, 0x2c, 0x2a, + 0xcc, 0x26, 0x62, 0x86, 0x8a, 0xa3, 0x50, 0x46, 0x8a, 0x9a, 0xc6, 0xc1, 0x8c, 0x35, 0x4b, 0x99, + 0xa6, 0xc7, 0xc1, 0x4c, 0x63, 0x94, 0x84, 0x7c, 0x08, 0xe5, 0xd1, 0x3c, 0x98, 0xf9, 0xcd, 0xb2, + 0x82, 0xd4, 0x11, 0xd2, 0x41, 0x86, 0xc2, 0x68, 0x19, 0x69, 0x41, 0x2d, 0x9e, 0xb9, 0x72, 0xcc, + 0x45, 0xd8, 0x84, 0xec, 0xc0, 0x0b, 0xc3, 0xa3, 0x4b, 0x29, 0xf9, 0x1c, 0xea, 0x1e, 0x8f, 0x12, + 0x29, 0xdc, 0x20, 0x92, 0x49, 0xb3, 0xae, 0xc0, 0xef, 0x23, 0xf8, 0x5b, 0x2e, 0xa6, 0x4c, 0x9c, + 0x64, 0x42, 0x9a, 0x47, 0x76, 0x4a, 0x60, 0xf1, 0xd8, 0xf9, 0x5d, 0x01, 0x6a, 0xa9, 0x56, 0xe2, + 0xc0, 0xe6, 0xb1, 0xf0, 0xae, 0x02, 0xc9, 0x3c, 0x39, 0x17, 0xac, 0x59, 0xd8, 0x2f, 0xb4, 0x6c, + 0xba, 0xc2, 0x23, 0xdb, 0x60, 0x0d, 0x86, 0xca, 0x51, 0x36, 0xb5, 0x06, 0x43, 0xd2, 0x84, 0xea, + 0x53, 0x57, 0x04, 0x6e, 0x24, 0x95, 0x67, 0x6c, 0x9a, 0x2e, 0xc9, 0x7d, 0xb0, 0x07, 0xc3, 0xa7, + 0x4c, 0x24, 0x01, 0x8f, 0x94, 0x3f, 0x6c, 0x9a, 0x31, 0xc8, 0x2e, 0xc0, 0x60, 0xf8, 0x98, 0xb9, + 0xa8, 0x34, 0x69, 0x96, 0xf7, 0x8b, 0x2d, 0x9b, 0xe6, 0x38, 0xce, 0x6f, 0xa0, 0xac, 0x62, 0x44, + 0xbe, 0x82, 0x8a, 0x1f, 0x4c, 0x58, 0x22, 0xb5, 0x39, 0x9d, 0xa3, 0xef, 0x7f, 0xda, 0xdb, 0xf8, + 0xfb, 0x4f, 0x7b, 0x07, 0xb9, 0x64, 0xe0, 0x31, 0x8b, 0x3c, 0x1e, 0x49, 0x37, 0x88, 0x98, 0x48, + 0x0e, 0x27, 0xfc, 0x91, 0xde, 0xd2, 0xee, 0xaa, 0x1f, 0x6a, 0x34, 0x90, 0x4f, 0xa0, 0x1c, 0x44, + 0x3e, 0xbb, 0x51, 0xf6, 0x17, 0x3b, 0xef, 0x19, 0x55, 0xf5, 0xc1, 0x5c, 0xc6, 0x73, 0xd9, 0x47, + 0x11, 0xd5, 0x08, 0x27, 0x86, 0x8a, 0x4e, 0x01, 0x72, 0x1f, 0x4a, 0x21, 0x93, 0xae, 0x3a, 0xbe, + 0x7e, 0x54, 0x43, 0xd7, 0x9e, 0x31, 0xe9, 0x52, 0xc5, 0xc5, 0xec, 0x0a, 0xf9, 0x1c, 0x5d, 0x6f, + 0x65, 0xd9, 0x75, 0x86, 0x1c, 0x6a, 0x04, 0xe4, 0x97, 0x50, 0x8d, 0x98, 0xbc, 0xe6, 0x62, 0xaa, + 0x5c, 0xb4, 0xad, 0x63, 0x7e, 0xce, 0xe4, 0x19, 0xf7, 0x19, 0x4d, 0x65, 0xce, 0x9f, 0x0a, 0x50, + 0x42, 0xc5, 0x84, 0x40, 0xc9, 0x15, 0x13, 0x9d, 0xae, 0x36, 0x55, 0x34, 0x69, 0x40, 0x91, 0x45, + 0xcf, 0xd5, 0x19, 0x36, 0x45, 0x12, 0x39, 0xde, 0xb5, 0x6f, 0x9c, 0x8e, 0x24, 0xee, 0x9b, 0x27, + 0x4c, 0x18, 0x5f, 0x2b, 0x9a, 0x7c, 0x02, 0x76, 0x2c, 0xf8, 0xcd, 0xe2, 0x19, 0xee, 0x2e, 0xe7, + 0x32, 0x09, 0x99, 0xbd, 0xe8, 0x39, 0xad, 0xc5, 0x86, 0x22, 0x07, 0x00, 0xec, 0x46, 0x0a, 0xf7, + 0x94, 0x27, 0x32, 0x69, 0x56, 0xd4, 0x6d, 0x54, 0x02, 0x23, 0xa3, 0x7f, 0x41, 0x73, 0x52, 0xe7, + 0x2f, 0x16, 0x94, 0xd5, 0x25, 0x49, 0x0b, 0x5d, 0x1a, 0xcf, 0x75, 0x74, 0x8a, 0x1d, 0x62, 0x5c, + 0x0a, 0x2a, 0x78, 0x4b, 0x8f, 0x62, 0x20, 0x77, 0xa0, 0x96, 0xb0, 0x19, 0xf3, 0x24, 0x17, 0x26, + 0x7f, 0x96, 0x6b, 0x34, 0xdd, 0xc7, 0x10, 0xeb, 0xdb, 0x28, 0x9a, 0x3c, 0x80, 0x0a, 0x57, 0x71, + 0x51, 0x17, 0xfa, 0x37, 0xd1, 0x32, 0x10, 0x54, 0x2e, 0x98, 0xeb, 0xf3, 0x68, 0xb6, 0x50, 0xd7, + 0xac, 0xd1, 0xe5, 0x9a, 0x3c, 0x00, 0x5b, 0x45, 0xe2, 0x72, 0x11, 0xb3, 0x66, 0x45, 0x45, 0x60, + 0x6b, 0x19, 0x25, 0x64, 0xd2, 0x4c, 0x8e, 0x2f, 0xcf, 0x73, 0xbd, 0x2b, 0x36, 0x88, 0x65, 0xf3, + 0x6e, 0xe6, 0xaf, 0x13, 0xc3, 0xa3, 0x4b, 0x29, 0xaa, 0x4d, 0x98, 0x27, 0x98, 0x44, 0xe8, 0xfb, + 0x0a, 0xaa, 0xd4, 0x0e, 0x53, 0x26, 0xcd, 0xe4, 0xc4, 0x81, 0xca, 0x70, 0x78, 0x8a, 0xc8, 0x7b, + 0x59, 0x65, 0xd0, 0x1c, 0x6a, 0x24, 0x4e, 0x1f, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0xfa, 0x5d, 0xf3, + 0x00, 0xad, 0x7e, 0x97, 0x3c, 0x82, 0x6a, 0x72, 0xe5, 0x8a, 0x20, 0x9a, 0x28, 0xdf, 0x6d, 0x1f, + 0xbd, 0xb7, 0xb4, 0x6a, 0xa8, 0xf9, 0xa8, 0x29, 0xc5, 0x38, 0x1c, 0xec, 0xa5, 0x19, 0xaf, 0xe9, + 0x6a, 0x40, 0x71, 0x1e, 0xf8, 0x4a, 0xcf, 0x16, 0x45, 0x12, 0x39, 0x93, 0x40, 0xe7, 0xd2, 0x16, + 0x45, 0x12, 0x03, 0x12, 0x72, 0x5f, 0xd7, 0xb1, 0x2d, 0xaa, 0x68, 0xf4, 0x31, 0x8f, 0x65, 0xc0, + 0x23, 0x77, 0x96, 0xfa, 0x38, 0x5d, 0x3b, 0xb3, 0xf4, 0x7e, 0xff, 0x93, 0xd3, 0x7e, 0x5b, 0x80, + 0x5a, 0x5a, 0x7c, 0xb1, 0x92, 0x04, 0x3e, 0x8b, 0x64, 0x30, 0x0e, 0x98, 0x30, 0x07, 0xe7, 0x38, + 0xe4, 0x11, 0x94, 0x5d, 0x29, 0x45, 0xfa, 0x40, 0xff, 0x3f, 0x5f, 0xb9, 0xdb, 0xc7, 0x28, 0xe9, + 0x45, 0x52, 0x2c, 0xa8, 0x46, 0xed, 0x7c, 0x01, 0x90, 0x31, 0xd1, 0xd6, 0x29, 0x5b, 0x18, 0xad, + 0x48, 0x92, 0xbb, 0x50, 0x7e, 0xee, 0xce, 0xe6, 0xcc, 0xe4, 0xb0, 0x5e, 0x7c, 0x69, 0x7d, 0x51, + 0x70, 0xfe, 0x6c, 0x41, 0xd5, 0x54, 0x72, 0xf2, 0x10, 0xaa, 0xaa, 0x92, 0x1b, 0x8b, 0x6e, 0x7f, + 0x18, 0x29, 0x84, 0x1c, 0x2e, 0x5b, 0x54, 0xce, 0x46, 0xa3, 0x4a, 0xb7, 0x2a, 0x63, 0x63, 0xd6, + 0xb0, 0x8a, 0x3e, 0x1b, 0x9b, 0x5e, 0xb4, 0x8d, 0xe8, 0x2e, 0x1b, 0x07, 0x51, 0x80, 0xfe, 0xa1, + 0x28, 0x22, 0x0f, 0xd3, 0x5b, 0x97, 0x94, 0xc6, 0x7b, 0x79, 0x8d, 0xaf, 0x5f, 0xba, 0x0f, 0xf5, + 0xdc, 0x31, 0xb7, 0xdc, 0xfa, 0xa3, 0xfc, 0xad, 0xcd, 0x91, 0x4a, 0x9d, 0x6e, 0xa4, 0x99, 0x17, + 0xfe, 0x03, 0xff, 0x7d, 0x06, 0x90, 0xa9, 0x7c, 0xfb, 0xc2, 0xe2, 0xbc, 0x28, 0x02, 0x0c, 0x62, + 0x2c, 0x9d, 0xbe, 0xab, 0x2a, 0xf2, 0x66, 0x30, 0x89, 0xb8, 0x60, 0xcf, 0xd4, 0x53, 0x55, 0xfb, + 0x6b, 0xb4, 0xae, 0x79, 0xea, 0xc5, 0x90, 0x63, 0xa8, 0xfb, 0x2c, 0xf1, 0x44, 0xa0, 0x12, 0xca, + 0x38, 0x7d, 0x0f, 0xef, 0x94, 0xe9, 0x69, 0x77, 0x33, 0x84, 0xf6, 0x55, 0x7e, 0x0f, 0x39, 0x82, + 0x4d, 0x76, 0x13, 0x73, 0x21, 0xcd, 0x29, 0xba, 0xe1, 0xdf, 0xd1, 0xa3, 0x03, 0xf2, 0xd5, 0x49, + 0xb4, 0xce, 0xb2, 0x05, 0x71, 0xa1, 0xe4, 0xb9, 0xb1, 0xee, 0x76, 0xf5, 0xa3, 0xe6, 0xda, 0x79, + 0x27, 0x6e, 0xac, 0x9d, 0xd6, 0xf9, 0x14, 0xef, 0xfa, 0xe2, 0x1f, 0x7b, 0x0f, 0x72, 0x2d, 0x2e, + 0xe4, 0xa3, 0xc5, 0xa1, 0xca, 0x97, 0x69, 0x20, 0x0f, 0xe7, 0x32, 0x98, 0x1d, 0xba, 0x71, 0x80, + 0xea, 0x70, 0x63, 0xbf, 0x4b, 0x95, 0xea, 0x9d, 0x5f, 0x41, 0x63, 0xdd, 0xee, 0x77, 0x89, 0xc1, + 0xce, 0xe7, 0x60, 0x2f, 0xed, 0x78, 0xd3, 0xc6, 0x5a, 0x3e, 0x78, 0x1f, 0x42, 0x3d, 0x77, 0x6f, + 0x04, 0x3e, 0x55, 0x40, 0xed, 0x7d, 0xbd, 0x70, 0x5e, 0xe0, 0xb4, 0x91, 0xf6, 0x9b, 0x5f, 0x00, + 0x5c, 0x49, 0x19, 0x3f, 0x53, 0x0d, 0xc8, 0x1c, 0x62, 0x23, 0x47, 0x21, 0xc8, 0x1e, 0xd4, 0x71, + 0x91, 0x18, 0xb9, 0xb6, 0x54, 0xed, 0x48, 0x34, 0xe0, 0xe7, 0x60, 0x8f, 0x97, 0xdb, 0x75, 0xe3, + 0xa8, 0x8d, 0xd3, 0xdd, 0x3f, 0x83, 0x5a, 0xc4, 0x8d, 0x4c, 0xf7, 0xc3, 0x6a, 0xc4, 0x95, 0xc8, + 0x79, 0x00, 0xff, 0xf7, 0xda, 0x68, 0x44, 0xee, 0x41, 0x65, 0x1c, 0xcc, 0xa4, 0x7a, 0xae, 0xd8, + 0x62, 0xcd, 0xca, 0xf9, 0x5b, 0x01, 0x20, 0x7b, 0x5a, 0xe8, 0x11, 0x7c, 0x77, 0x88, 0xd9, 0xd4, + 0xef, 0x6c, 0x06, 0xb5, 0xd0, 0x44, 0xd0, 0xe4, 0xd1, 0xfd, 0xd5, 0xe7, 0xd8, 0x4e, 0x03, 0xac, + 0x63, 0x7b, 0x64, 0x62, 0xfb, 0x2e, 0xe3, 0xcb, 0xf2, 0x84, 0x9d, 0xaf, 0x61, 0x6b, 0x45, 0xdd, + 0x5b, 0xbe, 0xd4, 0x2c, 0xcb, 0xf2, 0x21, 0x7b, 0x08, 0x15, 0xdd, 0xda, 0xb1, 0xfe, 0x22, 0x65, + 0xd4, 0x28, 0x5a, 0xd5, 0xf1, 0x8b, 0x74, 0xd0, 0xeb, 0x5f, 0x38, 0x47, 0x50, 0xd1, 0x93, 0x2c, + 0x69, 0x41, 0xd5, 0xf5, 0xf0, 0x6a, 0x69, 0xb9, 0xda, 0x4e, 0xc7, 0xdc, 0x63, 0xc5, 0xa6, 0xa9, + 0xd8, 0xf9, 0xab, 0x05, 0x90, 0xf1, 0xdf, 0x61, 0x56, 0xf8, 0x12, 0xb6, 0x13, 0xe6, 0xf1, 0xc8, + 0x77, 0xc5, 0x42, 0x49, 0xcd, 0xc4, 0x76, 0xdb, 0x96, 0x35, 0x64, 0x6e, 0x6e, 0x28, 0xbe, 0x79, + 0x6e, 0x68, 0x41, 0xc9, 0xe3, 0xf1, 0xc2, 0x3c, 0x5f, 0xb2, 0x7a, 0x91, 0x13, 0x1e, 0x2f, 0x70, + 0x6e, 0x47, 0x04, 0x69, 0x43, 0x25, 0x9c, 0xaa, 0xd9, 0x5e, 0x8f, 0x51, 0x77, 0x57, 0xb1, 0x67, + 0x53, 0xa4, 0xf1, 0x4b, 0x40, 0xa3, 0xc8, 0x03, 0x28, 0x87, 0x53, 0x3f, 0x10, 0x6a, 0xe2, 0xa8, + 0xeb, 0x7e, 0x9d, 0x87, 0x77, 0x03, 0x81, 0xf3, 0xbe, 0xc2, 0x10, 0x07, 0x2c, 0x11, 0x36, 0xab, + 0x0a, 0xd9, 0x58, 0xf3, 0x66, 0x78, 0xba, 0x41, 0x2d, 0x11, 0x76, 0x6a, 0x50, 0xd1, 0x7e, 0x75, + 0xfe, 0x58, 0x84, 0xed, 0x55, 0x2b, 0x31, 0x0f, 0x12, 0xe1, 0xa5, 0x79, 0x90, 0x08, 0x6f, 0x39, + 0x52, 0x59, 0xb9, 0x91, 0xca, 0x81, 0x32, 0xbf, 0x8e, 0xcc, 0x88, 0x98, 0x4e, 0x36, 0x57, 0xfc, + 0x3a, 0xc2, 0xe1, 0x41, 0x8b, 0x96, 0xbd, 0x18, 0x6f, 0x59, 0x36, 0xbd, 0xf8, 0x23, 0xd8, 0x1a, + 0xf3, 0xd9, 0x8c, 0x5f, 0x0f, 0x17, 0xe1, 0x2c, 0x88, 0xa6, 0xea, 0x4e, 0x35, 0xba, 0xca, 0x24, + 0x2d, 0xb8, 0xe3, 0x07, 0x02, 0xcd, 0x39, 0xe1, 0x91, 0x64, 0x38, 0x13, 0x57, 0x15, 0x6e, 0x9d, + 0x4d, 0xbe, 0x82, 0x7d, 0x57, 0x4a, 0x16, 0xc6, 0xf2, 0x49, 0x14, 0xbb, 0xde, 0xb4, 0xcb, 0x3d, + 0xf5, 0x1e, 0xc3, 0xd8, 0x95, 0xc1, 0x28, 0x98, 0x05, 0x72, 0xd1, 0xac, 0xa9, 0xad, 0x6f, 0xc4, + 0x91, 0x8f, 0x61, 0xdb, 0x13, 0xcc, 0x95, 0xac, 0xcb, 0x12, 0x79, 0xe1, 0xca, 0xab, 0xa6, 0xad, + 0x76, 0xae, 0x71, 0xf1, 0x0e, 0x2e, 0x5a, 0xfb, 0x6d, 0x30, 0xf3, 0x3d, 0x57, 0xf8, 0xea, 0xbb, + 0xaa, 0x46, 0x57, 0x99, 0xa4, 0x0d, 0x44, 0x31, 0x7a, 0x61, 0x2c, 0x17, 0x4b, 0x68, 0x5d, 0x41, + 0x6f, 0x91, 0xe0, 0x47, 0x8e, 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0x9b, 0x9b, 0x98, 0x6f, 0x34, + 0x63, 0x38, 0xdf, 0x15, 0xa0, 0xb1, 0x9e, 0x22, 0xe8, 0xe0, 0x18, 0xcd, 0x34, 0x8f, 0x0d, 0xe9, + 0xa5, 0xd3, 0xad, 0x9c, 0xd3, 0x31, 0x80, 0x58, 0x55, 0x30, 0x8b, 0x37, 0xa9, 0xa2, 0xdf, 0x2a, + 0x80, 0x2b, 0x26, 0x95, 0xd7, 0x4d, 0xfa, 0x7d, 0x01, 0xee, 0xac, 0xa5, 0xe1, 0x5b, 0x5b, 0xb4, + 0x0f, 0xf5, 0xd0, 0x9d, 0xb2, 0x0b, 0x57, 0xa8, 0xe0, 0x16, 0x75, 0x63, 0xcd, 0xb1, 0xfe, 0x0b, + 0xf6, 0x45, 0xb0, 0x99, 0xcf, 0xfd, 0x5b, 0x6d, 0x4b, 0x43, 0x79, 0xce, 0xe5, 0x63, 0x3e, 0x8f, + 0x7c, 0xd3, 0x8d, 0x56, 0x99, 0xaf, 0x07, 0xbc, 0x78, 0x4b, 0xc0, 0x9d, 0x73, 0xa8, 0xa5, 0x06, + 0x92, 0x3d, 0xf3, 0x01, 0x55, 0xc8, 0xbe, 0xcc, 0x9f, 0x24, 0x4c, 0xa0, 0xed, 0xfa, 0x6b, 0xea, + 0x03, 0x28, 0x4f, 0x04, 0x9f, 0xc7, 0xa6, 0xb6, 0xae, 0x20, 0xb4, 0xc4, 0x19, 0x42, 0xd5, 0x70, + 0xc8, 0x01, 0x54, 0x46, 0x8b, 0x73, 0x37, 0x64, 0x46, 0xa1, 0x7a, 0xd8, 0xb8, 0xf6, 0x0d, 0x02, + 0xab, 0x85, 0x46, 0x90, 0xbb, 0x50, 0x1a, 0x2d, 0xfa, 0x5d, 0x3d, 0x26, 0x63, 0xcd, 0xc1, 0x55, + 0xa7, 0xa2, 0x0d, 0x72, 0xbe, 0x81, 0xcd, 0xfc, 0x3e, 0x74, 0x4a, 0x94, 0xea, 0xb5, 0xa9, 0xa2, + 0xb3, 0xe2, 0x6a, 0xbd, 0xa1, 0xb8, 0x1e, 0xb4, 0xa0, 0x6a, 0x3e, 0x3e, 0x89, 0x0d, 0xe5, 0x27, + 0xe7, 0xc3, 0xde, 0x65, 0x63, 0x83, 0xd4, 0xa0, 0x74, 0x3a, 0x18, 0x5e, 0x36, 0x0a, 0x48, 0x9d, + 0x0f, 0xce, 0x7b, 0x0d, 0xeb, 0xe0, 0xd7, 0x60, 0x2f, 0x3f, 0x92, 0x90, 0xdd, 0xe9, 0x9f, 0x77, + 0x1b, 0x1b, 0x04, 0xa0, 0x32, 0xec, 0x9d, 0xd0, 0x1e, 0x82, 0xab, 0x50, 0x1c, 0x0e, 0x4f, 0x1b, + 0x16, 0xaa, 0x3a, 0x39, 0x3e, 0x39, 0xed, 0x35, 0x8a, 0x48, 0x5e, 0x9e, 0x5d, 0x3c, 0x1e, 0x36, + 0x4a, 0x07, 0x9f, 0xc1, 0x9d, 0xb5, 0x8f, 0x14, 0xb5, 0xfb, 0xf4, 0x98, 0xf6, 0x50, 0x53, 0x1d, + 0xaa, 0x17, 0xb4, 0xff, 0xf4, 0xf8, 0xb2, 0xd7, 0x28, 0xa0, 0xe0, 0x9b, 0xc1, 0xc9, 0xd7, 0xbd, + 0x6e, 0xc3, 0xea, 0xdc, 0xff, 0xfe, 0xe5, 0x6e, 0xe1, 0x87, 0x97, 0xbb, 0x85, 0x1f, 0x5f, 0xee, + 0x16, 0xfe, 0xf9, 0x72, 0xb7, 0xf0, 0xdd, 0xab, 0xdd, 0x8d, 0x1f, 0x5e, 0xed, 0x6e, 0xfc, 0xf8, + 0x6a, 0x77, 0x63, 0x54, 0x51, 0x7f, 0xf0, 0x7c, 0xfa, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, + 0x15, 0x80, 0x49, 0x20, 0x12, 0x00, 0x00, } diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index 9051053d24f3..4946de2e9bbc 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -210,46 +210,74 @@ message FileAction { int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; oneof action { + // FileActionCopy copies files from secondaryInput on top of input FileActionCopy copy = 4; + // FileActionMkFile creates a new file FileActionMkFile mkfile = 5; + // FileActionMkDir creates a new directory FileActionMkDir mkdir = 6; + // FileActionRm removes a file FileActionRm rm = 7; } } message FileActionCopy { + // src is the source path string src = 1; + // dest path string dest = 2; + // optional owner override ChownOpt owner = 4; + // optional permission bits override int32 mode = 5; + // followSymlink resolves symlinks in src bool followSymlink = 6; + // dirCopyContents only copies contents if src is a directory bool dirCopyContents = 7; - bool attemptUnpack = 8; + // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead + bool attemptUnpackDockerCompatibility = 8; + // createDestPath creates dest path directories if needed bool createDestPath = 9; + // allowWildcard allows filepath.Match wildcards in src path bool allowWildcard = 10; + // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files bool allowEmptyWildcard = 11; + // optional created time override int64 timestamp = 12; } message FileActionMkFile { + // path for the new file string path = 1; + // permission bits int32 mode = 2; + // data is the new file contents bytes data = 3; + // optional owner for the new file ChownOpt owner = 4; + // optional created time override int64 timestamp = 5; } message FileActionMkDir { + // path for the new directory string path = 1; + // permission bits int32 mode = 2; + // makeParents creates parent directories as well if needed bool makeParents = 3; + // optional owner for the new directory ChownOpt owner = 4; + // optional created time override int64 timestamp = 5; } message FileActionRm { + // path to remove string path = 1; + // allowNotFound doesn't fail the rm if file is not found bool allowNotFound = 2; + // allowWildcard allows filepath.Match wildcards in path bool allowWildcard = 3; } @@ -259,7 +287,13 @@ message ChownOpt { } message UserOpt { + oneof user { + NamedUserOpt byName = 1; + uint32 byID = 2; + } +} + +message NamedUserOpt { string name = 1; - int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // input that contains /etc/passwd if using a name - uint32 id = 3; + int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; } \ No newline at end of file From 9fb1f09a1e35aa9e152c170f052311e19df7acd7 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 7 Feb 2019 17:30:57 -0800 Subject: [PATCH 05/25] llbsolver: fileop base Signed-off-by: Tonis Tiigi --- solver/llbsolver/ops/file.go | 258 ++++++++++++ solver/llbsolver/ops/file_test.go | 468 ++++++++++++++++++++++ solver/llbsolver/ops/fileoptypes/types.go | 28 ++ 3 files changed, 754 insertions(+) create mode 100644 solver/llbsolver/ops/file.go create mode 100644 solver/llbsolver/ops/file_test.go create mode 100644 solver/llbsolver/ops/fileoptypes/types.go diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go new file mode 100644 index 000000000000..0579cac2aa7d --- /dev/null +++ b/solver/llbsolver/ops/file.go @@ -0,0 +1,258 @@ +package ops + +import ( + "context" + "fmt" + "sync" + + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" +) + +func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver { + return &FileOpSolver{ + b: b, + r: r, + outs: map[int]int{}, + ins: map[int]input{}, + } +} + +type FileOpSolver struct { + b fileoptypes.Backend + r fileoptypes.RefManager + + mu sync.Mutex + outs map[int]int + ins map[int]input + g flightcontrol.Group +} + +type input struct { + requiresCommit bool + mount fileoptypes.Mount + ref fileoptypes.Ref +} + +func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) { + for i, a := range actions { + if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) { + return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)) + } + if int(a.SecondaryInput) < -1 || int(a.SecondaryInput) >= len(inputs)+len(actions) { + return nil, errors.Errorf("invalid secondary input index %d, %d provided", a.Input, len(inputs)) + } + + inp, ok := s.ins[int(a.Input)] + if ok { + inp.requiresCommit = true + } + s.ins[int(a.Input)] = inp + + inp, ok = s.ins[int(a.SecondaryInput)] + if ok { + inp.requiresCommit = true + } + s.ins[int(a.SecondaryInput)] = inp + + if a.Output != -1 { + if _, ok := s.outs[int(a.Output)]; ok { + return nil, errors.Errorf("duplicate output %d", a.Output) + } + idx := len(inputs) + i + s.outs[int(a.Output)] = idx + s.ins[idx] = input{requiresCommit: true} + } + } + + if len(s.outs) == 0 { + return nil, errors.Errorf("no outputs specified") + } + + for i := 0; i < len(s.outs); i++ { + if _, ok := s.outs[i]; !ok { + return nil, errors.Errorf("missing output index %d", i) + } + } + + outs := make([]fileoptypes.Ref, len(s.outs)) + + eg, ctx := errgroup.WithContext(ctx) + for i, idx := range s.outs { + func(i, idx int) { + eg.Go(func() error { + if err := s.validate(idx, inputs, actions, nil); err != nil { + return err + } + inp, err := s.getInput(ctx, idx, inputs, actions) + if err != nil { + return err + } + outs[i] = inp.ref + return nil + }) + }(i, idx) + } + + if err := eg.Wait(); err != nil { + return nil, err + } + + return outs, nil +} + +func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction, loaded []int) error { + for _, check := range loaded { + if idx == check { + return errors.Errorf("loop from index %d", idx) + } + } + if idx < len(inputs) { + return nil + } + loaded = append(loaded, idx) + action := actions[idx-len(inputs)] + for _, inp := range []int{int(action.Input), int(action.SecondaryInput)} { + if err := s.validate(inp, inputs, actions, loaded); err != nil { + return err + } + } + return nil +} + +func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) { + inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (interface{}, error) { + s.mu.Lock() + inp := s.ins[idx] + s.mu.Unlock() + if inp.mount != nil || inp.ref != nil { + return inp, nil + } + + if idx < len(inputs) { + inp.ref = inputs[idx] + s.mu.Lock() + s.ins[idx] = inp + s.mu.Unlock() + return inp, nil + } + + var inpMount, inpMountSecondary fileoptypes.Mount + action := actions[idx-len(inputs)] + + loadInput := func(ctx context.Context) func() error { + return func() error { + inp, err := s.getInput(ctx, int(action.Input), inputs, actions) + if err != nil { + return err + } + if inp.ref != nil { + m, err := s.r.Prepare(ctx, inp.ref, false) + if err != nil { + return err + } + inpMount = m + return nil + } + inpMount = inp.mount + return nil + } + } + + loadSecondaryInput := func(ctx context.Context) func() error { + return func() error { + inp, err := s.getInput(ctx, int(action.SecondaryInput), inputs, actions) + if err != nil { + return err + } + if inp.ref != nil { + m, err := s.r.Prepare(ctx, inp.ref, true) + if err != nil { + return err + } + inpMountSecondary = m + return nil + } + inpMountSecondary = inp.mount + return nil + } + } + + if action.Input != -1 && action.SecondaryInput != -1 { + eg, ctx := errgroup.WithContext(ctx) + eg.Go(loadInput(ctx)) + eg.Go(loadSecondaryInput(ctx)) + if err := eg.Wait(); err != nil { + return nil, err + } + } else { + if action.Input != -1 { + if err := loadInput(ctx)(); err != nil { + return nil, err + } + } + if action.SecondaryInput != -1 { + if err := loadSecondaryInput(ctx)(); err != nil { + return nil, err + } + } + } + + if inpMount == nil { + m, err := s.r.Prepare(ctx, nil, false) + if err != nil { + return nil, err + } + inpMount = m + } + + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + if err := s.b.Mkdir(ctx, inpMount, *a.Mkdir); err != nil { + return nil, err + } + case *pb.FileAction_Mkfile: + if err := s.b.Mkfile(ctx, inpMount, *a.Mkfile); err != nil { + return nil, err + } + case *pb.FileAction_Rm: + if err := s.b.Rm(ctx, inpMount, *a.Rm); err != nil { + return nil, err + } + case *pb.FileAction_Copy: + if inpMountSecondary == nil { + m, err := s.r.Prepare(ctx, nil, true) + if err != nil { + return nil, err + } + inpMountSecondary = m + } + if err := s.b.Copy(ctx, inpMountSecondary, inpMount, *a.Copy); err != nil { + return nil, err + } + default: + return nil, errors.Errorf("invalid action type %T", action.Action) + } + + if inp.requiresCommit { + ref, err := s.r.Commit(ctx, inpMount) + if err != nil { + return nil, err + } + inp.ref = ref + } else { + inp.mount = inpMount + } + s.mu.Lock() + s.ins[idx] = inp + s.mu.Unlock() + return inp, nil + }) + if err != nil { + return input{}, err + } + return inp.(input), err +} diff --git a/solver/llbsolver/ops/file_test.go b/solver/llbsolver/ops/file_test.go new file mode 100644 index 000000000000..5859499aee3d --- /dev/null +++ b/solver/llbsolver/ops/file_test.go @@ -0,0 +1,468 @@ +package ops + +import ( + "context" + "sync/atomic" + "testing" + + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" +) + +func TestMkdirMkfile(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 1, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + inp := newTestRef("ref1") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-ref1-mkdir-mkfile-commit", o.id) + require.Equal(t, 2, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) +} + +func TestInvalidNoOutput(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + require.Error(t, err) + require.Contains(t, err.Error(), "no outputs specified") +} + +func TestInvalidDuplicateOutput(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 1, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + require.Error(t, err) + require.Contains(t, err.Error(), "duplicate output") +} + +func TestActionInvalidIndex(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + require.Error(t, err) + require.Contains(t, err.Error(), "loop from index") +} + +func TestActionLoop(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 1, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 0, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + require.Error(t, err) + require.Contains(t, err.Error(), "loop from index") +} + +func TestMultiOutput(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 1, + SecondaryInput: -1, + Output: 1, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + inp := newTestRef("ref1") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 2) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-ref1-mkdir-commit", o.id) + require.Equal(t, 1, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) + + o = outs[1].(*testFileRef) + require.Equal(t, "mount-ref1-mkdir-mkfile-commit", o.id) + require.Equal(t, 2, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) +} + +func TestFileFromScratch(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: -1, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 0, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + }, + } + + s := newTestFileSolver() + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + + o := outs[0].(*testFileRef) + + require.Equal(t, "scratch-mkdir-mkfile-commit", o.id) + require.Equal(t, 2, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) +} + +func TestFileCopyInputRm(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + }, + }, + }, + { + Input: 1, + SecondaryInput: 2, + Output: -1, + Action: &pb.FileAction_Copy{ + Copy: &pb.FileActionCopy{ + Src: "/src", + Dest: "/dest", + }, + }, + }, + { + Input: 3, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Rm{ + Rm: &pb.FileActionRm{ + Path: "/foo/bar/baz", + }, + }, + }, + }, + } + + s := newTestFileSolver() + inp0 := newTestRef("srcref") + inp1 := newTestRef("destref") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp0, inp1}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-destref-copy(mount-srcref-mkdir)-rm-commit", o.id) + require.Equal(t, 2, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].copySrc[0].mkdir) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy) + require.Equal(t, fo.Actions[2].Action.(*pb.FileAction_Rm).Rm, o.mount.chain[1].rm) +} + +func TestFileParallelActions(t *testing.T) { + // two mkdirs from scratch copied over each other. mkdirs should happen in parallel + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo", + }, + }, + }, + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/bar", + }, + }, + }, + { + Input: 2, + SecondaryInput: 1, + Output: 0, + Action: &pb.FileAction_Copy{ + Copy: &pb.FileActionCopy{ + Src: "/src", + Dest: "/dest", + }, + }, + }, + }, + } + + s := newTestFileSolver() + inp := newTestRef("inpref") + + ch := make(chan struct{}) + var sem int64 + inp.mount.callback = func() { + if atomic.AddInt64(&sem, 1) == 2 { + close(ch) + } + <-ch + } + + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + + require.Equal(t, int64(2), sem) +} + +func newTestFileSolver() *FileOpSolver { + return NewFileOpSolver(&testFileBackend{}, &testFileRefBackend{}) +} + +type testFileRef struct { + id string + mount testMount + released bool +} + +func (r *testFileRef) Release(context.Context) error { + if r.released { + return errors.Errorf("ref already released") + } + r.released = true + return nil +} + +func newTestRef(id string) *testFileRef { + return &testFileRef{mount: testMount{id: "mount-" + id}, id: id} +} + +type testMount struct { + id string + released bool + chain []mod + callback func() +} + +type mod struct { + mkdir *pb.FileActionMkDir + rm *pb.FileActionRm + mkfile *pb.FileActionMkFile + copy *pb.FileActionCopy + copySrc []mod +} + +func (m *testMount) Release(context.Context) error { + if m.released { + return errors.Errorf("already released") + } + m.released = true + return nil +} + +func (m *testMount) IsFileOpMount() {} + +type testFileBackend struct { +} + +func (b *testFileBackend) Mkdir(_ context.Context, m fileoptypes.Mount, a pb.FileActionMkDir) error { + mm := m.(*testMount) + if mm.callback != nil { + mm.callback() + } + mm.id += "-mkdir" + mm.chain = append(mm.chain, mod{mkdir: &a}) + return nil +} + +func (b *testFileBackend) Mkfile(_ context.Context, m fileoptypes.Mount, a pb.FileActionMkFile) error { + mm := m.(*testMount) + mm.id += "-mkfile" + mm.chain = append(mm.chain, mod{mkfile: &a}) + return nil +} +func (b *testFileBackend) Rm(_ context.Context, m fileoptypes.Mount, a pb.FileActionRm) error { + mm := m.(*testMount) + mm.id += "-rm" + mm.chain = append(mm.chain, mod{rm: &a}) + return nil +} +func (b *testFileBackend) Copy(_ context.Context, m1 fileoptypes.Mount, m fileoptypes.Mount, a pb.FileActionCopy) error { + mm := m.(*testMount) + mm1 := m1.(*testMount) + mm.id += "-copy(" + mm1.id + ")" + mm.chain = append(mm.chain, mod{copy: &a, copySrc: mm1.chain}) + return nil +} + +type testFileRefBackend struct { +} + +func (b *testFileRefBackend) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { + if ref == nil { + return &testMount{id: "scratch"}, nil + } + m := ref.(*testFileRef).mount + m.chain = append([]mod{}, m.chain...) + return &m, nil +} +func (b *testFileRefBackend) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) { + m := *mount.(*testMount) + return &testFileRef{mount: m, id: m.id + "-commit"}, nil +} diff --git a/solver/llbsolver/ops/fileoptypes/types.go b/solver/llbsolver/ops/fileoptypes/types.go new file mode 100644 index 000000000000..870395200580 --- /dev/null +++ b/solver/llbsolver/ops/fileoptypes/types.go @@ -0,0 +1,28 @@ +package fileoptypes + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" +) + +type Ref interface { + Release(context.Context) error +} + +type Mount interface { + Release(context.Context) error + IsFileOpMount() +} + +type Backend interface { + Mkdir(context.Context, Mount, pb.FileActionMkDir) error + Mkfile(context.Context, Mount, pb.FileActionMkFile) error + Rm(context.Context, Mount, pb.FileActionRm) error + Copy(context.Context, Mount, Mount, pb.FileActionCopy) error +} + +type RefManager interface { + Prepare(ctx context.Context, ref Ref, readonly bool) (Mount, error) + Commit(ctx context.Context, mount Mount) (Ref, error) +} From b2b0e3dbfafc988c642b7933e48a92562ccb195c Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Tue, 19 Feb 2019 18:41:45 -0800 Subject: [PATCH 06/25] fileop: add release support and tests Signed-off-by: Tonis Tiigi --- solver/llbsolver/ops/file.go | 13 ++ solver/llbsolver/ops/file_test.go | 147 +++++++++++++++------- solver/llbsolver/ops/fileoptypes/types.go | 2 +- 3 files changed, 118 insertions(+), 44 deletions(-) diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 0579cac2aa7d..12513daab5b9 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -78,6 +78,14 @@ func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, acti } } + defer func() { + for _, in := range s.ins { + if in.ref == nil && in.mount != nil { + in.mount.Release(context.TODO()) + } + } + }() + outs := make([]fileoptypes.Ref, len(s.outs)) eg, ctx := errgroup.WithContext(ctx) @@ -98,6 +106,11 @@ func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, acti } if err := eg.Wait(); err != nil { + for _, r := range outs { + if r != nil { + r.Release(context.TODO()) + } + } return nil, err } diff --git a/solver/llbsolver/ops/file_test.go b/solver/llbsolver/ops/file_test.go index 5859499aee3d..0baf3067b917 100644 --- a/solver/llbsolver/ops/file_test.go +++ b/solver/llbsolver/ops/file_test.go @@ -40,11 +40,12 @@ func TestMkdirMkfile(t *testing.T) { }, } - s := newTestFileSolver() - inp := newTestRef("ref1") + s, rb := newTestFileSolver() + inp := rb.NewRef("ref1") outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions) require.NoError(t, err) require.Equal(t, len(outs), 1) + rb.checkReleased(t, append(outs, inp)) o := outs[0].(*testFileRef) require.Equal(t, "mount-ref1-mkdir-mkfile-commit", o.id) @@ -71,8 +72,9 @@ func TestInvalidNoOutput(t *testing.T) { }, } - s := newTestFileSolver() - _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + s, rb := newTestFileSolver() + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) + rb.checkReleased(t, outs) require.Error(t, err) require.Contains(t, err.Error(), "no outputs specified") } @@ -106,10 +108,11 @@ func TestInvalidDuplicateOutput(t *testing.T) { }, } - s := newTestFileSolver() + s, rb := newTestFileSolver() _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) require.Error(t, err) require.Contains(t, err.Error(), "duplicate output") + rb.checkReleased(t, nil) } func TestActionInvalidIndex(t *testing.T) { @@ -130,10 +133,11 @@ func TestActionInvalidIndex(t *testing.T) { }, } - s := newTestFileSolver() + s, rb := newTestFileSolver() _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) require.Error(t, err) require.Contains(t, err.Error(), "loop from index") + rb.checkReleased(t, nil) } func TestActionLoop(t *testing.T) { @@ -165,10 +169,11 @@ func TestActionLoop(t *testing.T) { }, } - s := newTestFileSolver() + s, rb := newTestFileSolver() _, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) require.Error(t, err) require.Contains(t, err.Error(), "loop from index") + rb.checkReleased(t, nil) } func TestMultiOutput(t *testing.T) { @@ -200,11 +205,12 @@ func TestMultiOutput(t *testing.T) { }, } - s := newTestFileSolver() - inp := newTestRef("ref1") + s, rb := newTestFileSolver() + inp := rb.NewRef("ref1") outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp}, fo.Actions) require.NoError(t, err) require.Equal(t, len(outs), 2) + rb.checkReleased(t, append(outs, inp)) o := outs[0].(*testFileRef) require.Equal(t, "mount-ref1-mkdir-commit", o.id) @@ -247,14 +253,15 @@ func TestFileFromScratch(t *testing.T) { }, } - s := newTestFileSolver() + s, rb := newTestFileSolver() outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{}, fo.Actions) require.NoError(t, err) require.Equal(t, len(outs), 1) + rb.checkReleased(t, outs) o := outs[0].(*testFileRef) - require.Equal(t, "scratch-mkdir-mkfile-commit", o.id) + require.Equal(t, "mount-scratch-mkdir-mkfile-commit", o.id) require.Equal(t, 2, len(o.mount.chain)) require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) @@ -299,12 +306,13 @@ func TestFileCopyInputRm(t *testing.T) { }, } - s := newTestFileSolver() - inp0 := newTestRef("srcref") - inp1 := newTestRef("destref") + s, rb := newTestFileSolver() + inp0 := rb.NewRef("srcref") + inp1 := rb.NewRef("destref") outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp0, inp1}, fo.Actions) require.NoError(t, err) require.Equal(t, len(outs), 1) + rb.checkReleased(t, append(outs, inp0, inp1)) o := outs[0].(*testFileRef) require.Equal(t, "mount-destref-copy(mount-srcref-mkdir)-rm-commit", o.id) @@ -352,12 +360,12 @@ func TestFileParallelActions(t *testing.T) { }, } - s := newTestFileSolver() - inp := newTestRef("inpref") + s, rb := newTestFileSolver() + inp := rb.NewRef("inpref") ch := make(chan struct{}) var sem int64 - inp.mount.callback = func() { + inp.callback = func() { if atomic.AddInt64(&sem, 1) == 2 { close(ch) } @@ -371,33 +379,34 @@ func TestFileParallelActions(t *testing.T) { require.Equal(t, int64(2), sem) } -func newTestFileSolver() *FileOpSolver { - return NewFileOpSolver(&testFileBackend{}, &testFileRefBackend{}) +func newTestFileSolver() (*FileOpSolver, *testFileRefBackend) { + trb := &testFileRefBackend{refs: map[*testFileRef]struct{}{}, mounts: map[string]*testMount{}} + return NewFileOpSolver(&testFileBackend{}, trb), trb } type testFileRef struct { id string - mount testMount - released bool + mount *testMount + refcount int + callback func() } func (r *testFileRef) Release(context.Context) error { - if r.released { + if r.refcount == 0 { return errors.Errorf("ref already released") } - r.released = true + r.refcount-- return nil } -func newTestRef(id string) *testFileRef { - return &testFileRef{mount: testMount{id: "mount-" + id}, id: id} -} - type testMount struct { - id string - released bool - chain []mod - callback func() + b *testFileRefBackend + id string + initID string + chain []mod + callback func() + unmounted bool + active *testFileRef } type mod struct { @@ -408,16 +417,21 @@ type mod struct { copySrc []mod } -func (m *testMount) Release(context.Context) error { - if m.released { - return errors.Errorf("already released") +func (m *testMount) IsFileOpMount() {} +func (m *testMount) Release(ctx context.Context) error { + if m.initID != m.id { + return m.b.mounts[m.initID].Release(ctx) + } + if m.unmounted { + return errors.Errorf("already unmounted") + } + m.unmounted = true + if m.active != nil { + return m.active.Release(ctx) } - m.released = true return nil } -func (m *testMount) IsFileOpMount() {} - type testFileBackend struct { } @@ -452,17 +466,64 @@ func (b *testFileBackend) Copy(_ context.Context, m1 fileoptypes.Mount, m fileop } type testFileRefBackend struct { + refs map[*testFileRef]struct{} + mounts map[string]*testMount +} + +func (b *testFileRefBackend) NewRef(id string) *testFileRef { + r := &testFileRef{refcount: 1, id: id} + b.refs[r] = struct{}{} + return r } func (b *testFileRefBackend) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { + var active *testFileRef if ref == nil { - return &testMount{id: "scratch"}, nil + active = b.NewRef("scratch") + ref = active + } + rr := ref.(*testFileRef) + m := rr.mount + if m == nil { + m = &testMount{b: b, id: "mount-" + rr.id, callback: rr.callback} } - m := ref.(*testFileRef).mount - m.chain = append([]mod{}, m.chain...) - return &m, nil + m.initID = m.id + m.active = active + b.mounts[m.initID] = m + m2 := *m + m2.chain = append([]mod{}, m2.chain...) + return &m2, nil } func (b *testFileRefBackend) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) { - m := *mount.(*testMount) - return &testFileRef{mount: m, id: m.id + "-commit"}, nil + m := mount.(*testMount) + if err := b.mounts[m.initID].Release(context.TODO()); err != nil { + return nil, err + } + m2 := *m + m2.unmounted = false + m2.callback = nil + r := b.NewRef(m2.id + "-commit") + r.mount = &m2 + return r, nil +} + +func (b *testFileRefBackend) checkReleased(t *testing.T, outs []fileoptypes.Ref) { +loop0: + for r := range b.refs { + for _, o := range outs { + if o.(*testFileRef) == r { + require.Equal(t, 1, r.refcount) + continue loop0 + } + } + require.Equal(t, 0, r.refcount, "%s not released", r.id) + } + for _, o := range outs { + _, ok := b.refs[o.(*testFileRef)] + require.True(t, ok) + } + + for _, m := range b.mounts { + require.True(t, m.unmounted, "%s still mounted", m.id) + } } diff --git a/solver/llbsolver/ops/fileoptypes/types.go b/solver/llbsolver/ops/fileoptypes/types.go index 870395200580..3243e3827ded 100644 --- a/solver/llbsolver/ops/fileoptypes/types.go +++ b/solver/llbsolver/ops/fileoptypes/types.go @@ -11,8 +11,8 @@ type Ref interface { } type Mount interface { - Release(context.Context) error IsFileOpMount() + Release(context.Context) error } type Backend interface { From 2be999ba52ec6c3e75d3514b4228b23f67bc34c7 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 21 Feb 2019 15:37:15 -0800 Subject: [PATCH 07/25] fileop: llbsolver implementation Signed-off-by: Tonis Tiigi --- solver/llbsolver/file/backend.go | 179 ++++++++++++++++++++++++++++ solver/llbsolver/file/refmanager.go | 67 +++++++++++ 2 files changed, 246 insertions(+) create mode 100644 solver/llbsolver/file/backend.go create mode 100644 solver/llbsolver/file/refmanager.go diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go new file mode 100644 index 000000000000..02dec5f78c0e --- /dev/null +++ b/solver/llbsolver/file/backend.go @@ -0,0 +1,179 @@ +package file + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" + copy "github.com/tonistiigi/fsutil/copy" + "golang.org/x/sys/unix" +) + +func mkdir(ctx context.Context, d string, action pb.FileActionMkDir) error { + p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) + if err != nil { + return err + } + + if action.MakeParents { + if err := os.MkdirAll(p, os.FileMode(action.Mode&0777)); err != nil { + return err + } + } else { + if err := os.Mkdir(p, os.FileMode(action.Mode&0777)); err != nil { + return err + } + } + + if action.Timestamp != -1 { + st := unix.Timespec{Sec: action.Timestamp / 1e9, Nsec: action.Timestamp % 1e9} + timespec := []unix.Timespec{st, st} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", p) + } + } + return nil +} + +func mkfile(ctx context.Context, d string, action pb.FileActionMkFile) error { + p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) + if err != nil { + return err + } + + if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)|0777); err != nil { + return err + } + + if action.Timestamp != -1 { + st := unix.Timespec{Sec: action.Timestamp / 1e9, Nsec: action.Timestamp % 1e9} + timespec := []unix.Timespec{st, st} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", p) + } + } + return nil +} + +func rm(ctx context.Context, d string, action pb.FileActionRm) error { + p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) + if err != nil { + return err + } + + if err := os.RemoveAll(p); err != nil { + if os.IsNotExist(errors.Cause(err)) && action.AllowNotFound { + return nil + } + return err + } + + return nil +} + +func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) error { + // // src is the source path + // Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` + // // dest path + // Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` + // // optional owner override + // Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner" json:"owner,omitempty"` + // // optional permission bits override + // Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + // // followSymlink resolves symlinks in src + // FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` + // // dirCopyContents only copies contents if src is a directory + // DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` + // // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead + // AttemptUnpackDockerCompatibility bool `protobuf:"varint,8,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` + // // createDestPath creates dest path directories if needed + // CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` + // // allowWildcard allows filepath.Match wildcards in src path + // AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + // // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files + // AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` + // // optional created time override + // Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + + srcp, err := fs.RootPath(src, filepath.Join(filepath.Join("/", action.Src))) + if err != nil { + return err + } + + destp, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest))) + if err != nil { + return err + } + + if err := copy.Copy(ctx, srcp, destp); err != nil { + return err + } + + return nil +} + +type FileBackend struct { +} + +func (fb *FileBackend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkDir) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + return mkdir(ctx, dir, action) +} + +func (fb *FileBackend) Mkfile(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkFile) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + + _ = mnt + + return errors.Errorf("mkfile not implemented") +} +func (fb *FileBackend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error { + mnt, ok := m.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m) + } + _ = mnt + + return errors.Errorf("rm not implemented") +} +func (fb *FileBackend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileoptypes.Mount, action pb.FileActionCopy) error { + mnt1, ok := m1.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m1) + } + mnt2, ok := m2.(*Mount) + if !ok { + return errors.Errorf("invalid mount type %T", m2) + } + + _ = mnt1 + _ = mnt2 + return errors.Errorf("copy not implemented") +} + +// type Backend interface { +// Mkdir(context.Context, Mount, pb.FileActionMkDir) error +// Mkfile(context.Context, Mount, pb.FileActionMkFile) error +// Rm(context.Context, Mount, pb.FileActionRm) error +// Copy(context.Context, Mount, Mount, pb.FileActionCopy) error +// } diff --git a/solver/llbsolver/file/refmanager.go b/solver/llbsolver/file/refmanager.go new file mode 100644 index 000000000000..aeb0487564e3 --- /dev/null +++ b/solver/llbsolver/file/refmanager.go @@ -0,0 +1,67 @@ +package file + +import ( + "context" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/pkg/errors" +) + +type RefManager struct { + cm cache.Manager +} + +func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { + ir, ok := ref.(cache.ImmutableRef) + if !ok { + return nil, errors.Errorf("invalid ref type: %T", ref) + } + + if ir != nil && readonly { + m, err := ir.Mount(ctx, readonly) + if err != nil { + return nil, err + } + return &Mount{m: m}, nil + } + + mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target")) + if err != nil { + return nil, err + } + m, err := mr.Mount(ctx, readonly) + if err != nil { + return nil, err + } + return &Mount{m: m, mr: mr}, nil +} + +func (rm *RefManager) Commit(ctx context.Context, mount fileoptypes.Mount) (fileoptypes.Ref, error) { + m, ok := mount.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mount) + } + if err := m.Release(context.TODO()); err != nil { + return nil, err + } + if m.mr == nil { + return nil, errors.Errorf("invalid mount without active ref for commit") + } + return m.mr.Commit(ctx) +} + +type Mount struct { + m snapshot.Mountable + mr cache.MutableRef +} + +func (m *Mount) Release(ctx context.Context) error { + m.m.Release() + if m.mr != nil { + return m.mr.Release(ctx) + } + return nil +} +func (m *Mount) IsFileOpMount() {} From 81a5fa5a2e7b8bfc100379eff48c93a49e4d5ff4 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Wed, 27 Feb 2019 14:40:45 -0800 Subject: [PATCH 08/25] llbsolver: fileop implementation Signed-off-by: Tonis Tiigi --- cache/refs.go | 2 +- client/client_test.go | 99 +++++++++++++++ client/llb/fileop.go | 4 +- client/llb/fileop_test.go | 66 ++++++++-- solver/llbsolver/file/backend.go | 54 +++++--- solver/llbsolver/file/refmanager.go | 8 +- solver/llbsolver/ops/exec.go | 10 +- solver/llbsolver/ops/file.go | 185 +++++++++++++++++++++++++++- solver/llbsolver/result.go | 11 +- worker/base/worker.go | 2 + 10 files changed, 402 insertions(+), 39 deletions(-) diff --git a/cache/refs.go b/cache/refs.go index 7521e0aa9af4..4c8fa27d46dd 100644 --- a/cache/refs.go +++ b/cache/refs.go @@ -311,7 +311,7 @@ func (sr *mutableRef) updateLastUsed() bool { func (sr *mutableRef) commit(ctx context.Context) (ImmutableRef, error) { if !sr.mutable || len(sr.refs) == 0 { - return nil, errors.Wrapf(errInvalid, "invalid mutable ref") + return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr) } id := identity.NewID() diff --git a/client/client_test.go b/client/client_test.go index 568e1306ecae..14c03680b39b 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -51,6 +51,8 @@ func (nopWriteCloser) Close() error { return nil } func TestClientIntegration(t *testing.T) { integration.Run(t, []integration.Test{ testRelativeWorkDir, + testFileOpMkdirMkfile, + testFileOpCopyRm, testCallDiskUsage, testBuildMultiMount, testBuildHTTPSource, @@ -653,6 +655,103 @@ func testRelativeWorkDir(t *testing.T, sb integration.Sandbox) { require.Equal(t, []byte("/test1/test2\n"), dt) } +func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + st := llb.Scratch(). + File(llb.Mkdir("/foo", 0700).Mkfile("bar", 0600, []byte("contents"))) + + def, err := st.Marshal() + require.NoError(t, err) + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = c.Solve(context.TODO(), def, SolveOpt{ + Exporter: ExporterLocal, + ExporterOutputDir: destDir, + }, nil) + require.NoError(t, err) + + fi, err := os.Stat(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, true, fi.IsDir()) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "bar")) + require.NoError(t, err) + require.Equal(t, []byte("contents"), dt) +} + +func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { + requiresLinux(t) + c, err := New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + dir, err := tmpdir( + fstest.CreateFile("myfile", []byte("data0"), 0600), + fstest.CreateDir("sub", 0700), + fstest.CreateFile("sub/foo", []byte("foo0"), 0600), + fstest.CreateFile("sub/bar", []byte("bar0"), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + dir2, err := tmpdir( + fstest.CreateFile("file2", []byte("file2"), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + st := llb.Scratch(). + File( + llb.Copy(llb.Local("mylocal"), "myfile", "myfile2"). + Copy(llb.Local("mylocal"), "sub", "out"). + Rm("out/foo"). + Copy(llb.Local("mylocal2"), "file2", "/")) + + def, err := st.Marshal() + require.NoError(t, err) + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = c.Solve(context.TODO(), def, SolveOpt{ + Exporter: ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + "mylocal": dir, + "mylocal2": dir2, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "myfile2")) + require.NoError(t, err) + require.Equal(t, []byte("data0"), dt) + + fi, err := os.Stat(filepath.Join(destDir, "out")) + require.NoError(t, err) + require.Equal(t, true, fi.IsDir()) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "out/bar")) + require.NoError(t, err) + require.Equal(t, []byte("bar0"), dt) + + _, err = os.Stat(filepath.Join(destDir, "out/foo")) + require.Equal(t, true, os.IsNotExist(err)) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "file2")) + require.NoError(t, err) + require.Equal(t, []byte("file2"), dt) + +} + func testCallDiskUsage(t *testing.T, sb integration.Sandbox) { c, err := New(context.TODO(), sb.Address()) require.NoError(t, err) diff --git a/client/llb/fileop.go b/client/llb/fileop.go index 074fb35edc56..18d1d68e14aa 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -634,8 +634,8 @@ func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, } pfo.Actions = append(pfo.Actions, &pb.FileAction{ - Input: getIndex(st.input, len(state.actions), st.inputRelative), - SecondaryInput: getIndex(st.input2, len(state.actions), st.input2Relative), + Input: getIndex(st.input, len(state.inputs), st.inputRelative), + SecondaryInput: getIndex(st.input2, len(state.inputs), st.input2Relative), Output: output, Action: st.action.toProtoAction(parent, st.base), }) diff --git a/client/llb/fileop_test.go b/client/llb/fileop_test.go index 6576530437c5..b32928f35686 100644 --- a/client/llb/fileop_test.go +++ b/client/llb/fileop_test.go @@ -76,7 +76,7 @@ func TestFileMkdirChain(t *testing.T) { require.Nil(t, mkdir.Owner) action = f.Actions[1] - require.Equal(t, 3, int(action.Input)) + require.Equal(t, 1, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, -1, int(action.Output)) mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir @@ -86,7 +86,7 @@ func TestFileMkdirChain(t *testing.T) { require.Nil(t, mkdir.Owner) action = f.Actions[2] - require.Equal(t, 4, int(action.Input)) + require.Equal(t, 2, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, 0, int(action.Output)) mkdir = action.Action.(*pb.FileAction_Mkdir).Mkdir @@ -96,6 +96,52 @@ func TestFileMkdirChain(t *testing.T) { require.Nil(t, mkdir.Owner) } +func TestFileMkdirMkfile(t *testing.T) { + t.Parallel() + + st := Scratch().File(Mkdir("/foo", 0700).Mkfile("bar", 0700, []byte("data"))) + def, err := st.Marshal() + + require.NoError(t, err) + + m, arr := parseDef(t, def.Def) + require.Equal(t, 2, len(arr)) + + dgst, idx := last(t, arr) + require.Equal(t, 0, idx) + require.Equal(t, m[dgst], arr[0]) + + f := arr[0].Op.(*pb.Op_File).File + require.Equal(t, len(arr[1].Inputs), 1) + require.Equal(t, m[arr[1].Inputs[0].Digest], arr[0]) + require.Equal(t, 0, int(arr[1].Inputs[0].Index)) + + require.Equal(t, 2, len(f.Actions)) + + action := f.Actions[0] + require.Equal(t, -1, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, -1, int(action.Output)) + + mkdir := action.Action.(*pb.FileAction_Mkdir).Mkdir + + require.Equal(t, "/foo", mkdir.Path) + require.Equal(t, 0700, int(mkdir.Mode)) + require.Equal(t, int64(-1), mkdir.Timestamp) + + action = f.Actions[1] + require.Equal(t, 0, int(action.Input)) + require.Equal(t, -1, int(action.SecondaryInput)) + require.Equal(t, 0, int(action.Output)) + + mkfile := action.Action.(*pb.FileAction_Mkfile).Mkfile + + require.Equal(t, "/bar", mkfile.Path) + require.Equal(t, 0700, int(mkfile.Mode)) + require.Equal(t, "data", string(mkfile.Data)) + require.Equal(t, int64(-1), mkfile.Timestamp) +} + func TestFileMkfile(t *testing.T) { t.Parallel() @@ -202,7 +248,7 @@ func TestFileSimpleChains(t *testing.T) { require.Equal(t, "/tmp/sub/foo", rm.Path) action = f.Actions[1] - require.Equal(t, 2, int(action.Input)) + require.Equal(t, 1, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, 0, int(action.Output)) @@ -224,7 +270,7 @@ func TestFileSimpleChains(t *testing.T) { require.Equal(t, "/tmp/foo/bar", mkdir.Path) action = f.Actions[1] - require.Equal(t, 3, int(action.Input)) + require.Equal(t, 1, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, -1, int(action.Output)) @@ -232,7 +278,7 @@ func TestFileSimpleChains(t *testing.T) { require.Equal(t, "/tmp/abc", rm.Path) action = f.Actions[2] - require.Equal(t, 4, int(action.Input)) + require.Equal(t, 2, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, 0, int(action.Output)) @@ -314,7 +360,7 @@ func TestFileCopyFromAction(t *testing.T) { require.Equal(t, 0700, int(mkdir.Mode)) action = f.Actions[1] - require.Equal(t, 3, int(action.Input)) + require.Equal(t, 1, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, -1, int(action.Output)) @@ -326,7 +372,7 @@ func TestFileCopyFromAction(t *testing.T) { action = f.Actions[2] require.Equal(t, 0, int(action.Input)) - require.Equal(t, 4, int(action.SecondaryInput)) + require.Equal(t, 2, int(action.SecondaryInput)) require.Equal(t, 0, int(action.Output)) copy := action.Action.(*pb.FileAction_Copy).Copy @@ -420,7 +466,7 @@ func TestFilePipeline(t *testing.T) { require.Equal(t, 0700, int(mkdir.Mode)) action = f.Actions[1] - require.Equal(t, 4, int(action.Input)) + require.Equal(t, 2, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, -1, int(action.Output)) @@ -432,7 +478,7 @@ func TestFilePipeline(t *testing.T) { action = f.Actions[2] require.Equal(t, 0, int(action.Input)) - require.Equal(t, 5, int(action.SecondaryInput)) + require.Equal(t, 3, int(action.SecondaryInput)) require.Equal(t, -1, int(action.Output)) require.Equal(t, arr[4].Inputs[1].Digest, op.Inputs[0].Digest) @@ -442,7 +488,7 @@ func TestFilePipeline(t *testing.T) { require.Equal(t, "/out/baz", copy.Dest) action = f.Actions[3] - require.Equal(t, 6, int(action.Input)) + require.Equal(t, 4, int(action.Input)) require.Equal(t, -1, int(action.SecondaryInput)) require.Equal(t, 0, int(action.Output)) diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go index 02dec5f78c0e..d212fa13d64b 100644 --- a/solver/llbsolver/file/backend.go +++ b/solver/llbsolver/file/backend.go @@ -118,10 +118,10 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) err return nil } -type FileBackend struct { +type Backend struct { } -func (fb *FileBackend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkDir) error { +func (fb *Backend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkDir) error { mnt, ok := m.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m) @@ -137,26 +137,37 @@ func (fb *FileBackend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb return mkdir(ctx, dir, action) } -func (fb *FileBackend) Mkfile(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkFile) error { +func (fb *Backend) Mkfile(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkFile) error { mnt, ok := m.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m) } - _ = mnt + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() - return errors.Errorf("mkfile not implemented") + return mkfile(ctx, dir, action) } -func (fb *FileBackend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error { +func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error { mnt, ok := m.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m) } - _ = mnt - return errors.Errorf("rm not implemented") + lm := snapshot.LocalMounter(mnt.m) + dir, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() + + return rm(ctx, dir, action) } -func (fb *FileBackend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileoptypes.Mount, action pb.FileActionCopy) error { +func (fb *Backend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileoptypes.Mount, action pb.FileActionCopy) error { mnt1, ok := m1.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m1) @@ -166,14 +177,19 @@ func (fb *FileBackend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileop return errors.Errorf("invalid mount type %T", m2) } - _ = mnt1 - _ = mnt2 - return errors.Errorf("copy not implemented") -} + lm := snapshot.LocalMounter(mnt1.m) + src, err := lm.Mount() + if err != nil { + return err + } + defer lm.Unmount() -// type Backend interface { -// Mkdir(context.Context, Mount, pb.FileActionMkDir) error -// Mkfile(context.Context, Mount, pb.FileActionMkFile) error -// Rm(context.Context, Mount, pb.FileActionRm) error -// Copy(context.Context, Mount, Mount, pb.FileActionCopy) error -// } + lm2 := snapshot.LocalMounter(mnt2.m) + dest, err := lm2.Mount() + if err != nil { + return err + } + defer lm2.Unmount() + + return docopy(ctx, src, dest, action) +} diff --git a/solver/llbsolver/file/refmanager.go b/solver/llbsolver/file/refmanager.go index aeb0487564e3..bfee9f85b9f6 100644 --- a/solver/llbsolver/file/refmanager.go +++ b/solver/llbsolver/file/refmanager.go @@ -9,13 +9,17 @@ import ( "github.com/pkg/errors" ) +func NewRefManager(cm cache.Manager) *RefManager { + return &RefManager{cm: cm} +} + type RefManager struct { cm cache.Manager } func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool) (fileoptypes.Mount, error) { ir, ok := ref.(cache.ImmutableRef) - if !ok { + if !ok && ref != nil { return nil, errors.Errorf("invalid ref type: %T", ref) } @@ -43,7 +47,7 @@ func (rm *RefManager) Commit(ctx context.Context, mount fileoptypes.Mount) (file if !ok { return nil, errors.Errorf("invalid mount type %T", mount) } - if err := m.Release(context.TODO()); err != nil { + if err := m.m.Release(); err != nil { return nil, err } if m.mr == nil { diff --git a/solver/llbsolver/ops/exec.go b/solver/llbsolver/ops/exec.go index bbc744703050..ca9b0b9ed341 100644 --- a/solver/llbsolver/ops/exec.go +++ b/solver/llbsolver/ops/exec.go @@ -149,7 +149,7 @@ func (e *execOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo cm.Deps[i].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) } if !dep.NoContentBasedHash { - cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupePaths(dep.Selectors)) + cm.Deps[i].ComputeDigestFunc = llbsolver.NewContentHashFunc(toSelectors(dedupePaths(dep.Selectors))) } } @@ -180,6 +180,14 @@ func dedupePaths(inp []string) []string { return paths } +func toSelectors(p []string) []llbsolver.Selector { + sel := make([]llbsolver.Selector, 0, len(p)) + for _, p := range p { + sel = append(sel, llbsolver.Selector{Path: p}) + } + return sel +} + type dep struct { Selectors []string NoContentBasedHash bool diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 12513daab5b9..ada18391c16e 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -1,17 +1,199 @@ package ops import ( + "bytes" "context" + "encoding/json" "fmt" + "path" + "runtime" + "sort" "sync" + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/cache/metadata" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/llbsolver/file" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) +const fileCacheType = "buildkit.exec.v0" + +type fileOp struct { + op *pb.FileOp + md *metadata.Store + w worker.Worker + solver *FileOpSolver + numInputs int +} + +func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.Store, w worker.Worker) (solver.Op, error) { + return &fileOp{ + op: op.File, + md: md, + numInputs: len(v.Inputs()), + w: w, + solver: NewFileOpSolver(&file.Backend{}, file.NewRefManager(cm)), + }, nil +} + +func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { + selectors := map[int]map[llbsolver.Selector]struct{}{} + + digester := digest.Canonical.Digester() + + for _, action := range f.op.Actions { + var dt []byte + var err error + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + p := *a.Mkdir + p.Owner = nil + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Mkfile: + p := *a.Mkfile + p.Owner = nil + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Rm: + p := *a.Rm + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + case *pb.FileAction_Copy: + p := *a.Copy + p.Owner = nil + if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { + p.Src = path.Base(p.Src) + addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard) + } + dt, err = json.Marshal(p) + if err != nil { + return nil, false, err + } + } + + if _, err = digester.Hash().Write(dt); err != nil { + return nil, false, err + } + } + + cm := &solver.CacheMap{ + Digest: digester.Digest(), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + }, f.numInputs), + } + + for idx, m := range selectors { + dgsts := make([][]byte, 0, len(m)) + for k := range m { + dgsts = append(dgsts, []byte(k.Path)) + } + sort.Slice(dgsts, func(i, j int) bool { + return bytes.Compare(dgsts[i], dgsts[j]) > 0 + }) + cm.Deps[idx].Selector = digest.FromBytes(bytes.Join(dgsts, []byte{0})) + + cm.Deps[idx].ComputeDigestFunc = llbsolver.NewContentHashFunc(dedupeSelectors(m)) + } + + return cm, true, nil +} + +func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { + + inpRefs := make([]fileoptypes.Ref, 0, len(inputs)) + for i, inp := range inputs { + workerRef, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) + } + inpRefs = append(inpRefs, workerRef.ImmutableRef) + logrus.Debugf("inp %d : %+v", i, workerRef.ImmutableRef) + } + + outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions) + if err != nil { + return nil, err + } + + outResults := make([]solver.Result, 0, len(outs)) + for _, out := range outs { + outResults = append(outResults, worker.NewWorkerRefResult(out.(cache.ImmutableRef), f.w)) + } + + return outResults, nil +} + +func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard bool) { + mm, ok := m[idx] + if !ok { + mm = map[llbsolver.Selector]struct{}{} + m[idx] = mm + } + if wildcard && containsWildcards(sel) { + mm[llbsolver.Selector{Path: sel, Wildcard: wildcard}] = struct{}{} + } else { + mm[llbsolver.Selector{Path: sel}] = struct{}{} + } +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector { + paths := make([]string, 0, len(m)) + for sel := range m { + if !sel.Wildcard { + paths = append(paths, sel.Path) + } + } + paths = dedupePaths(paths) + selectors := make([]llbsolver.Selector, 0, len(m)) + + for _, p := range paths { + selectors = append(selectors, llbsolver.Selector{Path: p}) + } + + for sel := range m { + if sel.Wildcard { + selectors = append(selectors, sel) + } + } + + sort.Slice(selectors, func(i, j int) bool { + return selectors[i].Path < selectors[j].Path + }) + + return selectors +} + func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver { return &FileOpSolver{ b: b, @@ -39,8 +221,9 @@ type input struct { func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) { for i, a := range actions { + logrus.Debugf("action: %+v", a) if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) { - return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)) + return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions)) } if int(a.SecondaryInput) < -1 || int(a.SecondaryInput) >= len(inputs)+len(actions) { return nil, errors.Errorf("invalid secondary input index %d, %d provided", a.Input, len(inputs)) diff --git a/solver/llbsolver/result.go b/solver/llbsolver/result.go index 19431fe0a97a..54fa6fc92465 100644 --- a/solver/llbsolver/result.go +++ b/solver/llbsolver/result.go @@ -13,7 +13,12 @@ import ( "golang.org/x/sync/errgroup" ) -func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc { +type Selector struct { + Path string + Wildcard bool +} + +func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { return func(ctx context.Context, res solver.Result) (digest.Digest, error) { ref, ok := res.Sys().(*worker.WorkerRef) if !ok { @@ -21,7 +26,7 @@ func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc { } if len(selectors) == 0 { - selectors = []string{""} + selectors = []Selector{Selector{}} } dgsts := make([][]byte, len(selectors)) @@ -32,7 +37,7 @@ func NewContentHashFunc(selectors []string) solver.ResultBasedCacheFunc { // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) // func(i int) { // eg.Go(func() error { - dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel), true) + dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), true) if err != nil { return "", err } diff --git a/worker/base/worker.go b/worker/base/worker.go index ee93d6e34bed..b051390d3d3a 100644 --- a/worker/base/worker.go +++ b/worker/base/worker.go @@ -198,6 +198,8 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, sm, w) case *pb.Op_Exec: return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager, sm, w.MetadataStore, w.Executor, w) + case *pb.Op_File: + return ops.NewFileOp(v, op, w.CacheManager, w.MetadataStore, w) case *pb.Op_Build: return ops.NewBuildOp(v, op, s, w) default: From 4ffd79735bbe150070c4f528bba05e349206a57f Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sat, 2 Mar 2019 16:45:45 -0800 Subject: [PATCH 09/25] fileop: connect with contenthash Signed-off-by: Tonis Tiigi --- cache/contenthash/checksum.go | 15 +++++++++ cmd/buildctl/main.go | 2 ++ cmd/buildkitd/main.go | 2 ++ solver/llbsolver/file/backend.go | 15 ++++++--- solver/llbsolver/file/refmanager.go | 2 +- solver/llbsolver/ops/exec.go | 2 +- solver/llbsolver/ops/file.go | 52 +++++++++++++++++++---------- solver/llbsolver/result.go | 23 +++++++++---- solver/llbsolver/vertex.go | 21 ++++++++++++ 9 files changed, 104 insertions(+), 30 deletions(-) diff --git a/cache/contenthash/checksum.go b/cache/contenthash/checksum.go index 3fe5c658a092..6cf5bb9f633b 100644 --- a/cache/contenthash/checksum.go +++ b/cache/contenthash/checksum.go @@ -47,6 +47,10 @@ func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, followLi return getDefaultManager().Checksum(ctx, ref, path, followLinks) } +func ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, path string, followLinks bool) (digest.Digest, error) { + return getDefaultManager().ChecksumWildcard(ctx, ref, path, followLinks) +} + func GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { return getDefaultManager().GetCacheContext(ctx, md) } @@ -84,6 +88,14 @@ func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p return cc.Checksum(ctx, ref, p, followLinks) } +func (cm *cacheManager) ChecksumWildcard(ctx context.Context, ref cache.ImmutableRef, p string, followLinks bool) (digest.Digest, error) { + cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata())) + if err != nil { + return "", nil + } + return cc.ChecksumWildcard(ctx, ref, p, followLinks) +} + func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem) (CacheContext, error) { cm.locker.Lock(md.ID()) cm.lruMu.Lock() @@ -343,6 +355,9 @@ func (cc *cacheContext) ChecksumWildcard(ctx context.Context, mountable cache.Mo } } } + if len(wildcards) == 0 { + return digest.FromBytes([]byte{}), nil + } if len(wildcards) > 1 { digester := digest.Canonical.Digester() diff --git a/cmd/buildctl/main.go b/cmd/buildctl/main.go index 07edc812e8aa..19534497df5b 100644 --- a/cmd/buildctl/main.go +++ b/cmd/buildctl/main.go @@ -3,6 +3,7 @@ package main import ( "fmt" "os" + "syscall" bccommon "github.com/moby/buildkit/cmd/buildctl/common" "github.com/moby/buildkit/util/apicaps" @@ -15,6 +16,7 @@ import ( func init() { apicaps.ExportedProduct = "buildkit" + syscall.Umask(0) } func main() { diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go index c9e425656fe3..c8355f104488 100644 --- a/cmd/buildkitd/main.go +++ b/cmd/buildkitd/main.go @@ -13,6 +13,7 @@ import ( "sort" "strconv" "strings" + "syscall" "time" "github.com/BurntSushi/toml" @@ -55,6 +56,7 @@ import ( func init() { apicaps.ExportedProduct = "buildkit" seed.WithTimeAndRand() + syscall.Umask(0) } type workerInitializerOpt struct { diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go index d212fa13d64b..865bf26496a8 100644 --- a/solver/llbsolver/file/backend.go +++ b/solver/llbsolver/file/backend.go @@ -22,11 +22,11 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir) error { } if action.MakeParents { - if err := os.MkdirAll(p, os.FileMode(action.Mode&0777)); err != nil { + if err := os.MkdirAll(p, os.FileMode(action.Mode)&0777); err != nil { return err } } else { - if err := os.Mkdir(p, os.FileMode(action.Mode&0777)); err != nil { + if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil { return err } } @@ -38,6 +38,7 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir) error { return errors.Wrapf(err, "failed to utime %s", p) } } + return nil } @@ -47,7 +48,7 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile) error { return err } - if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)|0777); err != nil { + if err := ioutil.WriteFile(p, action.Data, os.FileMode(action.Mode)&0777); err != nil { return err } @@ -111,7 +112,13 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) err return err } - if err := copy.Copy(ctx, srcp, destp); err != nil { + var opt []copy.Opt + + if action.AllowWildcard { + opt = append(opt, copy.AllowWildcards) + } + + if err := copy.Copy(ctx, srcp, destp, opt...); err != nil { return err } diff --git a/solver/llbsolver/file/refmanager.go b/solver/llbsolver/file/refmanager.go index bfee9f85b9f6..01c30c57314a 100644 --- a/solver/llbsolver/file/refmanager.go +++ b/solver/llbsolver/file/refmanager.go @@ -31,7 +31,7 @@ func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly return &Mount{m: m}, nil } - mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target")) + mr, err := rm.cm.New(ctx, ir, cache.WithDescription("fileop target"), cache.CachePolicyRetain) if err != nil { return nil, err } diff --git a/solver/llbsolver/ops/exec.go b/solver/llbsolver/ops/exec.go index ca9b0b9ed341..2d09e39d03a3 100644 --- a/solver/llbsolver/ops/exec.go +++ b/solver/llbsolver/ops/exec.go @@ -183,7 +183,7 @@ func dedupePaths(inp []string) []string { func toSelectors(p []string) []llbsolver.Selector { sel := make([]llbsolver.Selector, 0, len(p)) for _, p := range p { - sel = append(sel, llbsolver.Selector{Path: p}) + sel = append(sel, llbsolver.Selector{Path: p, FollowLinks: true}) } return sel } diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index ada18391c16e..423e54ae8b45 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -21,11 +21,10 @@ import ( "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) -const fileCacheType = "buildkit.exec.v0" +const fileCacheType = "buildkit.file.v0" type fileOp struct { op *pb.FileOp @@ -48,7 +47,7 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.S func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { selectors := map[int]map[llbsolver.Selector]struct{}{} - digester := digest.Canonical.Digester() + actions := make([][]byte, 0, len(f.op.Actions)) for _, action := range f.op.Actions { var dt []byte @@ -79,7 +78,7 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo p.Owner = nil if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { p.Src = path.Base(p.Src) - addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard) + addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink) } dt, err = json.Marshal(p) if err != nil { @@ -87,13 +86,22 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } } - if _, err = digester.Hash().Write(dt); err != nil { - return nil, false, err - } + actions = append(actions, dt) + } + + dt, err := json.Marshal(struct { + Type string + Actions [][]byte + }{ + Type: fileCacheType, + Actions: actions, + }) + if err != nil { + return nil, false, err } cm := &solver.CacheMap{ - Digest: digester.Digest(), + Digest: digest.FromBytes(dt), Deps: make([]struct { Selector digest.Digest ComputeDigestFunc solver.ResultBasedCacheFunc @@ -117,15 +125,13 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Result, error) { - inpRefs := make([]fileoptypes.Ref, 0, len(inputs)) - for i, inp := range inputs { + for _, inp := range inputs { workerRef, ok := inp.Sys().(*worker.WorkerRef) if !ok { return nil, errors.Errorf("invalid reference for exec %T", inp.Sys()) } inpRefs = append(inpRefs, workerRef.ImmutableRef) - logrus.Debugf("inp %d : %+v", i, workerRef.ImmutableRef) } outs, err := f.solver.Solve(ctx, inpRefs, f.op.Actions) @@ -141,17 +147,21 @@ func (f *fileOp) Exec(ctx context.Context, inputs []solver.Result) ([]solver.Res return outResults, nil } -func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard bool) { +func addSelector(m map[int]map[llbsolver.Selector]struct{}, idx int, sel string, wildcard, followLinks bool) { mm, ok := m[idx] if !ok { mm = map[llbsolver.Selector]struct{}{} m[idx] = mm } + s := llbsolver.Selector{Path: sel} + if wildcard && containsWildcards(sel) { - mm[llbsolver.Selector{Path: sel, Wildcard: wildcard}] = struct{}{} - } else { - mm[llbsolver.Selector{Path: sel}] = struct{}{} + s.Wildcard = true + } + if followLinks { + s.FollowLinks = true } + mm[s] = struct{}{} } func containsWildcards(name string) bool { @@ -169,17 +179,26 @@ func containsWildcards(name string) bool { func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector { paths := make([]string, 0, len(m)) + pathsFollow := make([]string, 0, len(m)) for sel := range m { if !sel.Wildcard { - paths = append(paths, sel.Path) + if sel.FollowLinks { + pathsFollow = append(pathsFollow, sel.Path) + } else { + paths = append(paths, sel.Path) + } } } paths = dedupePaths(paths) + pathsFollow = dedupePaths(pathsFollow) selectors := make([]llbsolver.Selector, 0, len(m)) for _, p := range paths { selectors = append(selectors, llbsolver.Selector{Path: p}) } + for _, p := range pathsFollow { + selectors = append(selectors, llbsolver.Selector{Path: p, FollowLinks: true}) + } for sel := range m { if sel.Wildcard { @@ -221,7 +240,6 @@ type input struct { func (s *FileOpSolver) Solve(ctx context.Context, inputs []fileoptypes.Ref, actions []*pb.FileAction) ([]fileoptypes.Ref, error) { for i, a := range actions { - logrus.Debugf("action: %+v", a) if int(a.Input) < -1 || int(a.Input) >= len(inputs)+len(actions) { return nil, errors.Errorf("invalid input index %d, %d provided", a.Input, len(inputs)+len(actions)) } diff --git a/solver/llbsolver/result.go b/solver/llbsolver/result.go index 54fa6fc92465..dc96e4d00f20 100644 --- a/solver/llbsolver/result.go +++ b/solver/llbsolver/result.go @@ -14,8 +14,9 @@ import ( ) type Selector struct { - Path string - Wildcard bool + Path string + Wildcard bool + FollowLinks bool } func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { @@ -26,7 +27,7 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { } if len(selectors) == 0 { - selectors = []Selector{Selector{}} + selectors = []Selector{{}} } dgsts := make([][]byte, len(selectors)) @@ -37,11 +38,19 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { // FIXME(tonistiigi): enabling this parallelization seems to create wrong results for some big inputs(like gobuild) // func(i int) { // eg.Go(func() error { - dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), true) - if err != nil { - return "", err + if !sel.Wildcard { + dgst, err := contenthash.Checksum(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) + if err != nil { + return "", err + } + dgsts[i] = []byte(dgst) + } else { + dgst, err := contenthash.ChecksumWildcard(ctx, ref.ImmutableRef, path.Join("/", sel.Path), sel.FollowLinks) + if err != nil { + return "", err + } + dgsts[i] = []byte(dgst) } - dgsts[i] = []byte(dgst) // return nil // }) // }(i) diff --git a/solver/llbsolver/vertex.go b/solver/llbsolver/vertex.go index a86c97869f6a..7a853f785a50 100644 --- a/solver/llbsolver/vertex.go +++ b/solver/llbsolver/vertex.go @@ -1,6 +1,7 @@ package llbsolver import ( + "fmt" "strings" "github.com/containerd/containerd/platforms" @@ -228,9 +229,29 @@ func llbOpName(op *pb.Op) string { return op.Source.Identifier case *pb.Op_Exec: return strings.Join(op.Exec.Meta.Args, " ") + case *pb.Op_File: + return fileOpName(op.File.Actions) case *pb.Op_Build: return "build" default: return "unknown" } } + +func fileOpName(actions []*pb.FileAction) string { + names := make([]string, 0, len(actions)) + for _, action := range actions { + switch a := action.Action.(type) { + case *pb.FileAction_Mkdir: + names = append(names, fmt.Sprintf("mkdir %s", a.Mkdir.Path)) + case *pb.FileAction_Mkfile: + names = append(names, fmt.Sprintf("mkfile %s", a.Mkfile.Path)) + case *pb.FileAction_Rm: + names = append(names, fmt.Sprintf("rm %s", a.Rm.Path)) + case *pb.FileAction_Copy: + names = append(names, fmt.Sprintf("copy %s %s", a.Copy.Src, a.Copy.Dest)) + } + } + + return strings.Join(names, ", ") +} From 171feaafeb414c2206af08bf5df13d165a07c6dd Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 15 Mar 2019 16:31:09 -0700 Subject: [PATCH 10/25] vendor: add fsutil copy package Signed-off-by: Tonis Tiigi --- go.mod | 2 +- go.sum | 4 +- .../github.com/tonistiigi/fsutil/copy/copy.go | 423 ++++++++++++++++++ .../tonistiigi/fsutil/copy/copy_linux.go | 97 ++++ .../tonistiigi/fsutil/copy/copy_nowindows.go | 28 ++ .../tonistiigi/fsutil/copy/copy_unix.go | 68 +++ .../tonistiigi/fsutil/copy/copy_windows.go | 33 ++ .../tonistiigi/fsutil/copy/hardlink.go | 27 ++ .../tonistiigi/fsutil/copy/hardlink_unix.go | 17 + .../fsutil/copy/hardlink_windows.go | 7 + .../tonistiigi/fsutil/copy/mkdir.go | 74 +++ .../tonistiigi/fsutil/copy/mkdir_unix.go | 32 ++ .../tonistiigi/fsutil/copy/mkdir_windows.go | 21 + vendor/modules.txt | 3 +- 14 files changed, 832 insertions(+), 4 deletions(-) create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go create mode 100644 vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go diff --git a/go.mod b/go.mod index a62360135b1b..e67959dc4d17 100644 --- a/go.mod +++ b/go.mod @@ -49,7 +49,7 @@ require ( github.com/sirupsen/logrus v1.0.3 github.com/stretchr/testify v1.3.0 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 // indirect - github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd + github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92 github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e github.com/uber/jaeger-lib v1.2.1 // indirect diff --git a/go.sum b/go.sum index 29c54a1b399a..c6fce10161d6 100644 --- a/go.sum +++ b/go.sum @@ -118,8 +118,8 @@ github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0 github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 h1:zLV6q4e8Jv9EHjNg/iHfzwDkCve6Ua5jCygptrtXHvI= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd h1:TT5pfgTCocfXTnVeEZRKSEsO8vAGM+OMZOSSXEO6ixw= -github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd/go.mod h1:pzh7kdwkDRh+Bx8J30uqaKJ1M4QrSH/um8fcIXeM8rc= +github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92 h1:+Njk7pGJkAqK0k007oRFmr9xSmZUA+VjV0SdW0ctqXs= +github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92/go.mod h1:pzh7kdwkDRh+Bx8J30uqaKJ1M4QrSH/um8fcIXeM8rc= github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe h1:pd7hrFSqUPxYS9IB+UMG1AB/8EXGXo17ssx0bSQ5L6Y= github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe/go.mod h1:/+MCh11CJf2oz0BXmlmqyopK/ad1rKkcOXPoYuPCJYU= github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0= diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go new file mode 100644 index 000000000000..a57f9dc8eaaf --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go @@ -0,0 +1,423 @@ +package fs + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" + + "github.com/containerd/continuity/fs" + "github.com/pkg/errors" +) + +var bufferPool = &sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 32*1024) + return &buffer + }, +} + +func rootPath(root, p string, followLinks bool) (string, error) { + p = filepath.Join("/", p) + if p == "/" { + return root, nil + } + if followLinks { + return fs.RootPath(root, p) + } + d, f := filepath.Split(p) + ppath, err := fs.RootPath(root, d) + if err != nil { + return "", err + } + return filepath.Join(ppath, f), nil +} + +func ResolveWildcards(root, src string, followLinks bool) ([]string, error) { + d1, d2 := splitWildcards(src) + if d2 != "" { + p, err := rootPath(root, d1, followLinks) + if err != nil { + return nil, err + } + matches, err := resolveWildcards(p, d2) + if err != nil { + return nil, err + } + for i, m := range matches { + p, err := rel(root, m) + if err != nil { + return nil, err + } + matches[i] = p + } + return matches, nil + } + return []string{d1}, nil +} + +// Copy copies files using `cp -a` semantics. +// Copy is likely unsafe to be used in non-containerized environments. +func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) error { + var ci CopyInfo + for _, o := range opts { + o(&ci) + } + ensureDstPath := dst + if d, f := filepath.Split(dst); f != "" && f != "." { + ensureDstPath = d + } + if ensureDstPath != "" { + ensureDstPath, err := fs.RootPath(dstRoot, ensureDstPath) + if err != nil { + return err + } + if err := MkdirAll(ensureDstPath, 0755, ci.Chown, ci.Utime); err != nil { + return err + } + } + + dst, err := fs.RootPath(dstRoot, filepath.Clean(dst)) + if err != nil { + return err + } + + c := newCopier(ci.Chown, ci.Utime, ci.Mode, ci.XAttrErrorHandler) + srcs := []string{src} + + if ci.AllowWildcards { + matches, err := ResolveWildcards(srcRoot, src, ci.FollowLinks) + if err != nil { + return err + } + if len(matches) == 0 { + return errors.Errorf("no matches found: %s", src) + } + srcs = matches + } + + for _, src := range srcs { + srcFollowed, err := rootPath(srcRoot, src, ci.FollowLinks) + if err != nil { + return err + } + dst, err := c.prepareTargetDir(srcFollowed, src, dst, ci.CopyDirContents) + if err != nil { + return err + } + if err := c.copy(ctx, srcFollowed, dst, false); err != nil { + return err + } + } + + return nil +} + +func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirContents bool) (string, error) { + fiSrc, err := os.Lstat(srcFollowed) + if err != nil { + return "", err + } + + fiDest, err := os.Stat(destPath) + if err != nil { + if !os.IsNotExist(err) { + return "", errors.Wrap(err, "failed to lstat destination path") + } + } + + if (!copyDirContents && fiSrc.IsDir() && fiDest != nil) || (!fiSrc.IsDir() && fiDest != nil && fiDest.IsDir()) { + destPath = filepath.Join(destPath, filepath.Base(src)) + } + + target := filepath.Dir(destPath) + + if copyDirContents && fiSrc.IsDir() && fiDest == nil { + target = destPath + } + if err := MkdirAll(target, 0755, c.chown, c.utime); err != nil { + return "", err + } + + return destPath, nil +} + +type ChownOpt struct { + Uid, Gid int +} + +type XAttrErrorHandler func(dst, src, xattrKey string, err error) error + +type CopyInfo struct { + Chown *ChownOpt + Utime *time.Time + AllowWildcards bool + Mode *int + XAttrErrorHandler XAttrErrorHandler + CopyDirContents bool + FollowLinks bool +} + +type Opt func(*CopyInfo) + +func WithCopyInfo(ci CopyInfo) func(*CopyInfo) { + return func(c *CopyInfo) { + *c = ci + } +} + +func WithChown(uid, gid int) Opt { + return func(ci *CopyInfo) { + ci.Chown = &ChownOpt{Uid: uid, Gid: gid} + } +} + +func AllowWildcards(ci *CopyInfo) { + ci.AllowWildcards = true +} + +func WithXAttrErrorHandler(h XAttrErrorHandler) Opt { + return func(ci *CopyInfo) { + ci.XAttrErrorHandler = h + } +} + +func AllowXAttrErrors(ci *CopyInfo) { + h := func(string, string, string, error) error { + return nil + } + WithXAttrErrorHandler(h)(ci) +} + +type copier struct { + chown *ChownOpt + utime *time.Time + mode *int + inodes map[uint64]string + xattrErrorHandler XAttrErrorHandler +} + +func newCopier(chown *ChownOpt, tm *time.Time, mode *int, xeh XAttrErrorHandler) *copier { + if xeh == nil { + xeh = func(dst, src, key string, err error) error { + return err + } + } + return &copier{inodes: map[uint64]string{}, chown: chown, utime: tm, xattrErrorHandler: xeh, mode: mode} +} + +// dest is always clean +func (c *copier) copy(ctx context.Context, src, target string, overwriteTargetMetadata bool) error { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + fi, err := os.Lstat(src) + if err != nil { + return errors.Wrapf(err, "failed to stat %s", src) + } + + if !fi.IsDir() { + if err := ensureEmptyFileTarget(target); err != nil { + return err + } + } + + copyFileInfo := true + + switch { + case fi.IsDir(): + if created, err := c.copyDirectory(ctx, src, target, fi, overwriteTargetMetadata); err != nil { + return err + } else if !overwriteTargetMetadata { + copyFileInfo = created + } + case (fi.Mode() & os.ModeType) == 0: + link, err := getLinkSource(target, fi, c.inodes) + if err != nil { + return errors.Wrap(err, "failed to get hardlink") + } + if link != "" { + if err := os.Link(link, target); err != nil { + return errors.Wrap(err, "failed to create hard link") + } + } else if err := copyFile(src, target); err != nil { + return errors.Wrap(err, "failed to copy files") + } + case (fi.Mode() & os.ModeSymlink) == os.ModeSymlink: + link, err := os.Readlink(src) + if err != nil { + return errors.Wrapf(err, "failed to read link: %s", src) + } + if err := os.Symlink(link, target); err != nil { + return errors.Wrapf(err, "failed to create symlink: %s", target) + } + case (fi.Mode() & os.ModeDevice) == os.ModeDevice: + if err := copyDevice(target, fi); err != nil { + return errors.Wrapf(err, "failed to create device") + } + default: + // TODO: Support pipes and sockets + return errors.Wrapf(err, "unsupported mode %s", fi.Mode()) + } + + if copyFileInfo { + if err := c.copyFileInfo(fi, target); err != nil { + return errors.Wrap(err, "failed to copy file info") + } + + if err := copyXAttrs(target, src, c.xattrErrorHandler); err != nil { + return errors.Wrap(err, "failed to copy xattrs") + } + } + return nil +} + +func (c *copier) copyDirectory(ctx context.Context, src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) { + if !stat.IsDir() { + return false, errors.Errorf("source is not directory") + } + + created := false + + if st, err := os.Lstat(dst); err != nil { + if !os.IsNotExist(err) { + return false, err + } + created = true + if err := os.Mkdir(dst, stat.Mode()); err != nil { + return created, errors.Wrapf(err, "failed to mkdir %s", dst) + } + } else if !st.IsDir() { + return false, errors.Errorf("cannot copy to non-directory: %s", dst) + } else if overwriteTargetMetadata { + if err := os.Chmod(dst, stat.Mode()); err != nil { + return false, errors.Wrapf(err, "failed to chmod on %s", dst) + } + } + + fis, err := ioutil.ReadDir(src) + if err != nil { + return false, errors.Wrapf(err, "failed to read %s", src) + } + + for _, fi := range fis { + if err := c.copy(ctx, filepath.Join(src, fi.Name()), filepath.Join(dst, fi.Name()), true); err != nil { + return false, err + } + } + + return created, nil +} + +func ensureEmptyFileTarget(dst string) error { + fi, err := os.Lstat(dst) + if err != nil { + if os.IsNotExist(err) { + return nil + } + return errors.Wrap(err, "failed to lstat file target") + } + if fi.IsDir() { + return errors.Errorf("cannot replace to directory %s with file", dst) + } + return os.Remove(dst) +} + +func copyFile(source, target string) error { + src, err := os.Open(source) + if err != nil { + return errors.Wrapf(err, "failed to open source %s", source) + } + defer src.Close() + tgt, err := os.Create(target) + if err != nil { + return errors.Wrapf(err, "failed to open target %s", target) + } + defer tgt.Close() + + return copyFileContent(tgt, src) +} + +func containsWildcards(name string) bool { + isWindows := runtime.GOOS == "windows" + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' && !isWindows { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + +func splitWildcards(p string) (d1, d2 string) { + parts := strings.Split(filepath.Join(p), string(filepath.Separator)) + var p1, p2 []string + var found bool + for _, p := range parts { + if !found && containsWildcards(p) { + found = true + } + if p == "" { + p = "/" + } + if !found { + p1 = append(p1, p) + } else { + p2 = append(p2, p) + } + } + return filepath.Join(p1...), filepath.Join(p2...) +} + +func resolveWildcards(basePath, comp string) ([]string, error) { + var out []string + err := filepath.Walk(basePath, func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + rel, err := rel(basePath, path) + if err != nil { + return err + } + if rel == "." { + return nil + } + if match, _ := filepath.Match(comp, rel); !match { + return nil + } + out = append(out, path) + if info.IsDir() { + return filepath.SkipDir + } + return nil + }) + if err != nil { + return nil, err + } + return out, nil +} + +// rel makes a path relative to base path. Same as `filepath.Rel` but can also +// handle UUID paths in windows. +func rel(basepath, targpath string) (string, error) { + // filepath.Rel can't handle UUID paths in windows + if runtime.GOOS == "windows" { + pfx := basepath + `\` + if strings.HasPrefix(targpath, pfx) { + p := strings.TrimPrefix(targpath, pfx) + if p == "" { + p = "." + } + return p, nil + } + } + return filepath.Rel(basepath, targpath) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go new file mode 100644 index 000000000000..94b023d1c54a --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go @@ -0,0 +1,97 @@ +package fs + +import ( + "io" + "math" + "os" + "syscall" + + "github.com/containerd/containerd/sys" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getUidGid(fi os.FileInfo) (uid, gid int) { + st := fi.Sys().(*syscall.Stat_t) + return int(st.Uid), int(st.Gid) +} + +func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + + chown := c.chown + if chown == nil { + uid, gid := getUidGid(fi) + chown = &ChownOpt{Uid: uid, Gid: gid} + } + if err := Chown(name, chown); err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + + m := fi.Mode() + if c.mode != nil { + m = (m & ^os.FileMode(0777)) | os.FileMode(*c.mode&0777) + } + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, m); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + if c.utime != nil { + if err := Utimes(name, c.utime); err != nil { + return err + } + } else { + timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + } + + return nil +} + +func copyFileContent(dst, src *os.File) error { + st, err := src.Stat() + if err != nil { + return errors.Wrap(err, "unable to stat source") + } + + var written int64 + size := st.Size() + first := true + + for written < size { + var desired int + if size-written > math.MaxInt32 { + desired = int(math.MaxInt32) + } else { + desired = int(size - written) + } + + n, err := unix.CopyFileRange(int(src.Fd()), nil, int(dst.Fd()), nil, desired, 0) + if err != nil { + if (err != unix.ENOSYS && err != unix.EXDEV && err != unix.EPERM) || !first { + return errors.Wrap(err, "copy file range failed") + } + + buf := bufferPool.Get().(*[]byte) + _, err = io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return errors.Wrap(err, "userspace copy failed") + } + + first = false + written += int64(n) + } + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go new file mode 100644 index 000000000000..cbd784e5f570 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_nowindows.go @@ -0,0 +1,28 @@ +// +build !windows + +package fs + +import ( + "github.com/pkg/errors" + + "github.com/containerd/continuity/sysx" +) + +// copyXAttrs requires xeh to be non-nil +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + xattrKeys, err := sysx.LListxattr(src) + if err != nil { + return xeh(dst, src, "", errors.Wrapf(err, "failed to list xattrs on %s", src)) + } + for _, xattr := range xattrKeys { + data, err := sysx.LGetxattr(src, xattr) + if err != nil { + return xeh(dst, src, xattr, errors.Wrapf(err, "failed to get xattr %q on %s", xattr, src)) + } + if err := sysx.LSetxattr(dst, xattr, data, 0); err != nil { + return xeh(dst, src, xattr, errors.Wrapf(err, "failed to set xattr %q on %s", xattr, dst)) + } + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go new file mode 100644 index 000000000000..f80b7dd807f2 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go @@ -0,0 +1,68 @@ +// +build solaris darwin freebsd + +package fs + +import ( + "io" + "os" + "syscall" + + "github.com/containerd/containerd/sys" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func getUidGid(fi os.FileInfo) (uid, gid int) { + st := fi.Sys().(*syscall.Stat_t) + return int(st.Uid), int(st.Gid) +} + +func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { + st := fi.Sys().(*syscall.Stat_t) + chown := c.chown + if chown == nil { + uid, gid := getUidGid(fi) + chown = &ChownOpt{Uid: uid, Gid: gid} + } + if err := Chown(name, chown); err != nil { + return errors.Wrapf(err, "failed to chown %s", name) + } + + m := fi.Mode() + if c.mode != nil { + m = (m & ^os.FileMode(0777)) | os.FileMode(*c.mode&0777) + } + if (fi.Mode() & os.ModeSymlink) != os.ModeSymlink { + if err := os.Chmod(name, m); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + } + + if c.utime != nil { + if err := Utimes(name, c.utime); err != nil { + return err + } + } else { + timespec := []unix.Timespec{unix.Timespec(sys.StatAtime(st)), unix.Timespec(sys.StatMtime(st))} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, name, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", name) + } + } + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + + return err +} + +func copyDevice(dst string, fi os.FileInfo) error { + st, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return errors.New("unsupported stat type") + } + return unix.Mknod(dst, uint32(fi.Mode()), int(st.Rdev)) +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go new file mode 100644 index 000000000000..1c6abb596ccf --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go @@ -0,0 +1,33 @@ +package fs + +import ( + "io" + "os" + + "github.com/pkg/errors" +) + +func (c *copier) copyFileInfo(fi os.FileInfo, name string) error { + if err := os.Chmod(name, fi.Mode()); err != nil { + return errors.Wrapf(err, "failed to chmod %s", name) + } + + // TODO: copy windows specific metadata + + return nil +} + +func copyFileContent(dst, src *os.File) error { + buf := bufferPool.Get().(*[]byte) + _, err := io.CopyBuffer(dst, src, *buf) + bufferPool.Put(buf) + return err +} + +func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { + return nil +} + +func copyDevice(dst string, fi os.FileInfo) error { + return errors.New("device copy not supported") +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go new file mode 100644 index 000000000000..38da93813ce8 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink.go @@ -0,0 +1,27 @@ +package fs + +import "os" + +// GetLinkInfo returns an identifier representing the node a hardlink is pointing +// to. If the file is not hard linked then 0 will be returned. +func GetLinkInfo(fi os.FileInfo) (uint64, bool) { + return getLinkInfo(fi) +} + +// getLinkSource returns a path for the given name and +// file info to its link source in the provided inode +// map. If the given file name is not in the map and +// has other links, it is added to the inode map +// to be a source for other link locations. +func getLinkSource(name string, fi os.FileInfo, inodes map[uint64]string) (string, error) { + inode, isHardlink := getLinkInfo(fi) + if !isHardlink { + return "", nil + } + + path, ok := inodes[inode] + if !ok { + inodes[inode] = name + } + return path, nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go new file mode 100644 index 000000000000..3b825c940bf7 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_unix.go @@ -0,0 +1,17 @@ +// +build !windows + +package fs + +import ( + "os" + "syscall" +) + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + s, ok := fi.Sys().(*syscall.Stat_t) + if !ok { + return 0, false + } + + return uint64(s.Ino), !fi.IsDir() && s.Nlink > 1 +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go new file mode 100644 index 000000000000..ad8845a7fb2c --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go @@ -0,0 +1,7 @@ +package fs + +import "os" + +func getLinkInfo(fi os.FileInfo) (uint64, bool) { + return 0, false +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go new file mode 100644 index 000000000000..649551f1a41b --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir.go @@ -0,0 +1,74 @@ +package fs + +import ( + "os" + "syscall" + "time" +) + +func Chown(p string, user *ChownOpt) error { + if user != nil { + if err := os.Lchown(p, user.Uid, user.Gid); err != nil { + return err + } + } + return nil +} + +// MkdirAll is forked os.MkdirAll +func MkdirAll(path string, perm os.FileMode, user *ChownOpt, tm *time.Time) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent. + err = MkdirAll(fixRootDirectory(path[:j-1]), perm, user, tm) + if err != nil { + return err + } + } + + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + + if err := Chown(path, user); err != nil { + return err + } + + if err := Utimes(path, tm); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go new file mode 100644 index 000000000000..8fb0f6bc604b --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_unix.go @@ -0,0 +1,32 @@ +// +build !windows + +package fs + +import ( + "time" + + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +func fixRootDirectory(p string) string { + return p +} + +func Utimes(p string, tm *time.Time) error { + if tm == nil { + return nil + } + + ts, err := unix.TimeToTimespec(*tm) + if err != nil { + return err + } + + timespec := []unix.Timespec{ts, ts} + if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return errors.Wrapf(err, "failed to utime %s", p) + } + + return nil +} diff --git a/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go new file mode 100644 index 000000000000..6bd17e813358 --- /dev/null +++ b/vendor/github.com/tonistiigi/fsutil/copy/mkdir_windows.go @@ -0,0 +1,21 @@ +// +build windows + +package fs + +import ( + "os" + "time" +) + +func fixRootDirectory(p string) string { + if len(p) == len(`\\?\c:`) { + if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { + return p + `\` + } + } + return p +} + +func Utimes(p string, tm *time.Time) error { + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 422203d2e643..74eb3f4006ea 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -210,9 +210,10 @@ github.com/stretchr/testify/require github.com/stretchr/testify/assert # github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8 github.com/syndtr/gocapability/capability -# github.com/tonistiigi/fsutil v0.0.0-20190314220245-1ec1983587cd +# github.com/tonistiigi/fsutil v0.0.0-20190316003333-2a10686c7e92 github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/types +github.com/tonistiigi/fsutil/copy # github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea github.com/tonistiigi/units # github.com/uber/jaeger-client-go v0.0.0-20180103221425-e02c85f9069e From 7210bf6806999432856be71eccea0b28c4131069 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 15 Mar 2019 16:29:14 -0700 Subject: [PATCH 11/25] fileop: add chown support Signed-off-by: Tonis Tiigi --- cmd/buildctl/main.go | 2 - cmd/buildctl/main_unix.go | 11 ++ cmd/buildkitd/main.go | 2 - cmd/buildkitd/main_unix.go | 11 ++ solver/llbsolver/file/backend.go | 68 ++++++-- solver/llbsolver/file/chown.go | 62 ++++++++ solver/llbsolver/file/chown_unix.go | 7 + solver/llbsolver/file/chown_windows.go | 14 ++ solver/llbsolver/file/user.go | 122 ++++++++++++++ solver/llbsolver/ops/file.go | 98 +++++++++++- solver/llbsolver/ops/file_test.go | 184 +++++++++++++++++++++- solver/llbsolver/ops/fileoptypes/types.go | 6 +- 12 files changed, 561 insertions(+), 26 deletions(-) create mode 100644 cmd/buildctl/main_unix.go create mode 100644 cmd/buildkitd/main_unix.go create mode 100644 solver/llbsolver/file/chown.go create mode 100644 solver/llbsolver/file/chown_unix.go create mode 100644 solver/llbsolver/file/chown_windows.go create mode 100644 solver/llbsolver/file/user.go diff --git a/cmd/buildctl/main.go b/cmd/buildctl/main.go index 19534497df5b..07edc812e8aa 100644 --- a/cmd/buildctl/main.go +++ b/cmd/buildctl/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" "os" - "syscall" bccommon "github.com/moby/buildkit/cmd/buildctl/common" "github.com/moby/buildkit/util/apicaps" @@ -16,7 +15,6 @@ import ( func init() { apicaps.ExportedProduct = "buildkit" - syscall.Umask(0) } func main() { diff --git a/cmd/buildctl/main_unix.go b/cmd/buildctl/main_unix.go new file mode 100644 index 000000000000..93533adb1fdf --- /dev/null +++ b/cmd/buildctl/main_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package main + +import ( + "syscall" +) + +func init() { + syscall.Umask(0) +} diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go index c8355f104488..c9e425656fe3 100644 --- a/cmd/buildkitd/main.go +++ b/cmd/buildkitd/main.go @@ -13,7 +13,6 @@ import ( "sort" "strconv" "strings" - "syscall" "time" "github.com/BurntSushi/toml" @@ -56,7 +55,6 @@ import ( func init() { apicaps.ExportedProduct = "buildkit" seed.WithTimeAndRand() - syscall.Umask(0) } type workerInitializerOpt struct { diff --git a/cmd/buildkitd/main_unix.go b/cmd/buildkitd/main_unix.go new file mode 100644 index 000000000000..93533adb1fdf --- /dev/null +++ b/cmd/buildkitd/main_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package main + +import ( + "syscall" +) + +func init() { + syscall.Umask(0) +} diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go index 865bf26496a8..554d869b6d75 100644 --- a/solver/llbsolver/file/backend.go +++ b/solver/llbsolver/file/backend.go @@ -3,6 +3,7 @@ package file import ( "context" "io/ioutil" + "log" "os" "path/filepath" @@ -11,24 +12,34 @@ import ( "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" "github.com/pkg/errors" + "github.com/sirupsen/logrus" copy "github.com/tonistiigi/fsutil/copy" "golang.org/x/sys/unix" ) -func mkdir(ctx context.Context, d string, action pb.FileActionMkDir) error { +func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *uidgid) error { p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) if err != nil { return err } if action.MakeParents { - if err := os.MkdirAll(p, os.FileMode(action.Mode)&0777); err != nil { + if err := mkdirAll(p, os.FileMode(action.Mode)&0777, user); err != nil { return err } } else { if err := os.Mkdir(p, os.FileMode(action.Mode)&0777); err != nil { + if os.IsExist(err) { + return nil + } return err } + if user != nil { + if err := os.Chown(p, user.uid, user.gid); err != nil { + return err + } + } + } if action.Timestamp != -1 { @@ -42,7 +53,7 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir) error { return nil } -func mkfile(ctx context.Context, d string, action pb.FileActionMkFile) error { +func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *uidgid) error { p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) if err != nil { return err @@ -52,6 +63,12 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile) error { return err } + if user != nil { + if err := os.Chown(p, user.uid, user.gid); err != nil { + return err + } + } + if action.Timestamp != -1 { st := unix.Timespec{Sec: action.Timestamp / 1e9, Nsec: action.Timestamp % 1e9} timespec := []unix.Timespec{st, st} @@ -59,6 +76,7 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile) error { return errors.Wrapf(err, "failed to utime %s", p) } } + return nil } @@ -78,7 +96,7 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error { return nil } -func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) error { +func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *uidgid) error { // // src is the source path // Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` // // dest path @@ -118,6 +136,19 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) err opt = append(opt, copy.AllowWildcards) } + if u != nil { + opt = append(opt, func(ci *copy.CopyInfo) { + ci.Chown = ©.ChownOpt{Uid: u.uid, Gid: u.gid} + }) + } + + xattrErrorHandler := func(dst, src, key string, err error) error { + log.Println(err) + return nil + } + + opt = append(opt, copy.WithXAttrErrorHandler(xattrErrorHandler)) + if err := copy.Copy(ctx, srcp, destp, opt...); err != nil { return err } @@ -128,7 +159,7 @@ func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy) err type Backend struct { } -func (fb *Backend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkDir) error { +func (fb *Backend) Mkdir(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkDir) error { mnt, ok := m.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m) @@ -141,10 +172,15 @@ func (fb *Backend) Mkdir(ctx context.Context, m fileoptypes.Mount, action pb.Fil } defer lm.Unmount() - return mkdir(ctx, dir, action) + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + return mkdir(ctx, dir, action, u) } -func (fb *Backend) Mkfile(ctx context.Context, m fileoptypes.Mount, action pb.FileActionMkFile) error { +func (fb *Backend) Mkfile(ctx context.Context, m, user, group fileoptypes.Mount, action pb.FileActionMkFile) error { mnt, ok := m.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m) @@ -157,7 +193,12 @@ func (fb *Backend) Mkfile(ctx context.Context, m fileoptypes.Mount, action pb.Fi } defer lm.Unmount() - return mkfile(ctx, dir, action) + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + return mkfile(ctx, dir, action, u) } func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileActionRm) error { mnt, ok := m.(*Mount) @@ -174,7 +215,7 @@ func (fb *Backend) Rm(ctx context.Context, m fileoptypes.Mount, action pb.FileAc return rm(ctx, dir, action) } -func (fb *Backend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileoptypes.Mount, action pb.FileActionCopy) error { +func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mount, action pb.FileActionCopy) error { mnt1, ok := m1.(*Mount) if !ok { return errors.Errorf("invalid mount type %T", m1) @@ -198,5 +239,12 @@ func (fb *Backend) Copy(ctx context.Context, m1 fileoptypes.Mount, m2 fileoptype } defer lm2.Unmount() - return docopy(ctx, src, dest, action) + u, err := readUser(action.Owner, user, group) + if err != nil { + return err + } + + logrus.Debugf("copy %+v %+v %+v", action.Owner, user, group) + + return docopy(ctx, src, dest, action, u) } diff --git a/solver/llbsolver/file/chown.go b/solver/llbsolver/file/chown.go new file mode 100644 index 000000000000..4c305cf883d8 --- /dev/null +++ b/solver/llbsolver/file/chown.go @@ -0,0 +1,62 @@ +package file + +import ( + "os" + "syscall" +) + +// mkdirAll is forked os.MkdirAll +func mkdirAll(path string, perm os.FileMode, user *uidgid) error { + // Fast path: if we can tell whether path is a directory or file, stop with success or error. + dir, err := os.Stat(path) + if err == nil { + if dir.IsDir() { + return nil + } + return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} + } + + // Slow path: make sure parent exists and then call Mkdir for path. + i := len(path) + for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. + i-- + } + + j := i + for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. + j-- + } + + if j > 1 { + // Create parent. + err = mkdirAll(fixRootDirectory(path[:j-1]), perm, user) + if err != nil { + return err + } + } + + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + + // Parent now exists; invoke Mkdir and use its result. + err = os.Mkdir(path, perm) + if err != nil { + // Handle arguments like "foo/." by + // double-checking that directory doesn't exist. + dir, err1 := os.Lstat(path) + if err1 == nil && dir.IsDir() { + return nil + } + return err + } + + if user != nil { + if err := os.Chown(path, user.uid, user.gid); err != nil { + return err + } + } + + return nil +} diff --git a/solver/llbsolver/file/chown_unix.go b/solver/llbsolver/file/chown_unix.go new file mode 100644 index 000000000000..4796302ba989 --- /dev/null +++ b/solver/llbsolver/file/chown_unix.go @@ -0,0 +1,7 @@ +// +build !windows + +package file + +func fixRootDirectory(p string) string { + return p +} diff --git a/solver/llbsolver/file/chown_windows.go b/solver/llbsolver/file/chown_windows.go new file mode 100644 index 000000000000..b08746736ae0 --- /dev/null +++ b/solver/llbsolver/file/chown_windows.go @@ -0,0 +1,14 @@ +// +build windows + +package file + +import "os" + +func fixRootDirectory(p string) string { + if len(p) == len(`\\?\c:`) { + if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { + return p + `\` + } + } + return p +} diff --git a/solver/llbsolver/file/user.go b/solver/llbsolver/file/user.go new file mode 100644 index 000000000000..665319b7db80 --- /dev/null +++ b/solver/llbsolver/file/user.go @@ -0,0 +1,122 @@ +package file + +import ( + "os" + + "github.com/containerd/continuity/fs" + "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/solver/pb" + "github.com/opencontainers/runc/libcontainer/user" + "github.com/pkg/errors" +) + +type uidgid struct { + uid, gid int +} + +func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*uidgid, error) { + if chopt == nil { + return nil, nil + } + var us uidgid + if chopt.User != nil { + switch u := chopt.User.User.(type) { + case *pb.UserOpt_ByName: + if mu == nil { + return nil, errors.Errorf("invalid missing user mount") + } + mmu, ok := mu.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mu) + } + lm := snapshot.LocalMounter(mmu.m) + dir, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + passwdPath, err := user.GetPasswdPath() + if err != nil { + return nil, err + } + + passwdPath, err = fs.RootPath(dir, passwdPath) + if err != nil { + return nil, err + } + + ufile, err := os.Open(passwdPath) + if err != nil { + return nil, err + } + defer ufile.Close() + + users, err := user.ParsePasswdFilter(ufile, func(uu user.User) bool { + return uu.Name == u.ByName.Name + }) + if err != nil { + return nil, err + } + + if len(users) > 0 { + us.uid = users[0].Uid + us.gid = users[0].Gid + } + case *pb.UserOpt_ByID: + us.uid = int(u.ByID) + us.gid = int(u.ByID) + } + } + + if chopt.Group != nil { + switch u := chopt.Group.User.(type) { + case *pb.UserOpt_ByName: + if mg == nil { + return nil, errors.Errorf("invalid missing group mount") + } + mmg, ok := mg.(*Mount) + if !ok { + return nil, errors.Errorf("invalid mount type %T", mg) + } + lm := snapshot.LocalMounter(mmg.m) + dir, err := lm.Mount() + if err != nil { + return nil, err + } + defer lm.Unmount() + + groupPath, err := user.GetGroupPath() + if err != nil { + return nil, err + } + + groupPath, err = fs.RootPath(dir, groupPath) + if err != nil { + return nil, err + } + + gfile, err := os.Open(groupPath) + if err != nil { + return nil, err + } + defer gfile.Close() + + groups, err := user.ParseGroupFilter(gfile, func(g user.Group) bool { + return g.Name == u.ByName.Name + }) + if err != nil { + return nil, err + } + + if len(groups) > 0 { + us.gid = groups[0].Gid + } + case *pb.UserOpt_ByID: + us.gid = int(u.ByID) + } + } + + return &us, nil +} diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 423e54ae8b45..013778fe0b0c 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -55,7 +55,7 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo switch a := action.Action.(type) { case *pb.FileAction_Mkdir: p := *a.Mkdir - p.Owner = nil + processOwner(p.Owner, selectors) dt, err = json.Marshal(p) if err != nil { return nil, false, err @@ -63,6 +63,7 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo case *pb.FileAction_Mkfile: p := *a.Mkfile p.Owner = nil + processOwner(p.Owner, selectors) dt, err = json.Marshal(p) if err != nil { return nil, false, err @@ -75,6 +76,7 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } case *pb.FileAction_Copy: p := *a.Copy + processOwner(p.Owner, selectors) p.Owner = nil if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { p.Src = path.Base(p.Src) @@ -213,6 +215,29 @@ func dedupeSelectors(m map[llbsolver.Selector]struct{}) []llbsolver.Selector { return selectors } +func processOwner(chopt *pb.ChownOpt, selectors map[int]map[llbsolver.Selector]struct{}) error { + if chopt == nil { + return nil + } + if chopt.User != nil { + if u, ok := chopt.User.User.(*pb.UserOpt_ByName); ok { + if u.ByName.Input < 0 { + return errors.Errorf("invalid user index %d", u.ByName.Input) + } + addSelector(selectors, int(u.ByName.Input), "/etc/passwd", false, true) + } + } + if chopt.Group != nil { + if u, ok := chopt.Group.User.(*pb.UserOpt_ByName); ok { + if u.ByName.Input < 0 { + return errors.Errorf("invalid user index %d", u.ByName.Input) + } + addSelector(selectors, int(u.ByName.Input), "/etc/group", false, true) + } + } + return nil +} + func NewFileOpSolver(b fileoptypes.Backend, r fileoptypes.RefManager) *FileOpSolver { return &FileOpSolver{ b: b, @@ -354,6 +379,13 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp return inp, nil } + var toRelease []fileoptypes.Mount + defer func() { + for _, m := range toRelease { + m.Release(context.TODO()) + } + }() + var inpMount, inpMountSecondary fileoptypes.Mount action := actions[idx-len(inputs)] @@ -388,6 +420,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp return err } inpMountSecondary = m + toRelease = append(toRelease, m) return nil } inpMountSecondary = inp.mount @@ -395,6 +428,51 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp } } + loadUser := func(ctx context.Context, uopt *pb.UserOpt) (fileoptypes.Mount, error) { + if uopt == nil { + return nil, nil + } + switch u := uopt.User.(type) { + case *pb.UserOpt_ByName: + var m fileoptypes.Mount + if u.ByName.Input < 0 { + return nil, errors.Errorf("invalid user index: %d", u.ByName.Input) + } + inp, err := s.getInput(ctx, int(u.ByName.Input), inputs, actions) + if err != nil { + return nil, err + } + if inp.ref != nil { + mm, err := s.r.Prepare(ctx, inp.ref, true) + if err != nil { + return nil, err + } + toRelease = append(toRelease, mm) + m = mm + } else { + m = inp.mount + } + return m, nil + default: + return nil, nil + } + } + + loadOwner := func(ctx context.Context, chopt *pb.ChownOpt) (fileoptypes.Mount, fileoptypes.Mount, error) { + if chopt == nil { + return nil, nil, nil + } + um, err := loadUser(ctx, chopt.User) + if err != nil { + return nil, nil, err + } + gm, err := loadUser(ctx, chopt.Group) + if err != nil { + return nil, nil, err + } + return um, gm, nil + } + if action.Input != -1 && action.SecondaryInput != -1 { eg, ctx := errgroup.WithContext(ctx) eg.Go(loadInput(ctx)) @@ -425,11 +503,19 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp switch a := action.Action.(type) { case *pb.FileAction_Mkdir: - if err := s.b.Mkdir(ctx, inpMount, *a.Mkdir); err != nil { + user, group, err := loadOwner(ctx, a.Mkdir.Owner) + if err != nil { + return nil, err + } + if err := s.b.Mkdir(ctx, inpMount, user, group, *a.Mkdir); err != nil { return nil, err } case *pb.FileAction_Mkfile: - if err := s.b.Mkfile(ctx, inpMount, *a.Mkfile); err != nil { + user, group, err := loadOwner(ctx, a.Mkfile.Owner) + if err != nil { + return nil, err + } + if err := s.b.Mkfile(ctx, inpMount, user, group, *a.Mkfile); err != nil { return nil, err } case *pb.FileAction_Rm: @@ -444,7 +530,11 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp } inpMountSecondary = m } - if err := s.b.Copy(ctx, inpMountSecondary, inpMount, *a.Copy); err != nil { + user, group, err := loadOwner(ctx, a.Copy.Owner) + if err != nil { + return nil, err + } + if err := s.b.Copy(ctx, inpMountSecondary, inpMount, user, group, *a.Copy); err != nil { return nil, err } default: diff --git a/solver/llbsolver/ops/file_test.go b/solver/llbsolver/ops/file_test.go index 0baf3067b917..1890d4fedf6e 100644 --- a/solver/llbsolver/ops/file_test.go +++ b/solver/llbsolver/ops/file_test.go @@ -54,6 +54,135 @@ func TestMkdirMkfile(t *testing.T) { require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) } +func TestChownOpt(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 0, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkdir{ + Mkdir: &pb.FileActionMkDir{ + Path: "/foo/bar", + MakeParents: true, + Mode: 0700, + Owner: &pb.ChownOpt{ + User: &pb.UserOpt{ + User: &pb.UserOpt_ByName{ + ByName: &pb.NamedUserOpt{ + Input: 1, + Name: "myuser", + }, + }, + }, + Group: &pb.UserOpt{ + User: &pb.UserOpt_ByName{ + ByName: &pb.NamedUserOpt{ + Input: 1, + Name: "myuser", + }, + }, + }, + }, + }, + }, + }, + { + Input: 2, + SecondaryInput: -1, + Output: 0, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + Owner: &pb.ChownOpt{ + User: &pb.UserOpt{ + User: &pb.UserOpt_ByID{ + ByID: 100, + }, + }, + }, + }, + }, + }, + }, + } + + s, rb := newTestFileSolver() + inp := rb.NewRef("ref1") + inp2 := rb.NewRef("usermount") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp, inp2}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + rb.checkReleased(t, append(outs, inp, inp2)) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-ref1-mkdir#u(mount-usermount)#g(mount-usermount)-mkfile-commit", o.id) + require.Equal(t, 2, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Mkdir).Mkdir, o.mount.chain[0].mkdir) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) +} + +func TestChownCopy(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: -1, + SecondaryInput: -1, + Output: -1, + Action: &pb.FileAction_Mkfile{ + Mkfile: &pb.FileActionMkFile{ + Path: "/foo/bar/baz", + Mode: 0700, + }, + }, + }, + { + Input: 1, + SecondaryInput: 0, + Output: 0, + Action: &pb.FileAction_Copy{ + Copy: &pb.FileActionCopy{ + Src: "/src", + Dest: "/dest", + Owner: &pb.ChownOpt{ + User: &pb.UserOpt{ + User: &pb.UserOpt_ByName{ + ByName: &pb.NamedUserOpt{ + Input: 1, + Name: "myuser", + }, + }, + }, + Group: &pb.UserOpt{ + User: &pb.UserOpt_ByName{ + ByName: &pb.NamedUserOpt{ + Input: 2, + Name: "mygroup", + }, + }, + }, + }, + }, + }, + }, + }, + } + + s, rb := newTestFileSolver() + inpSrc := rb.NewRef("src") + inpDest := rb.NewRef("dest") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inpSrc, inpDest}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + rb.checkReleased(t, append(outs, inpSrc, inpDest)) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-dest-copy(mount-src)#u(mount-dest)#g(mount-scratch-mkfile)-commit", o.id) + require.Equal(t, 1, len(o.mount.chain)) + require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy) +} + func TestInvalidNoOutput(t *testing.T) { fo := &pb.FileOp{ Actions: []*pb.FileAction{ @@ -267,6 +396,37 @@ func TestFileFromScratch(t *testing.T) { require.Equal(t, fo.Actions[1].Action.(*pb.FileAction_Mkfile).Mkfile, o.mount.chain[1].mkfile) } +func TestFileCopyInputSrc(t *testing.T) { + fo := &pb.FileOp{ + Actions: []*pb.FileAction{ + { + Input: 1, + SecondaryInput: 0, + Output: 0, + Action: &pb.FileAction_Copy{ + Copy: &pb.FileActionCopy{ + Src: "/src", + Dest: "/dest", + }, + }, + }, + }, + } + + s, rb := newTestFileSolver() + inp0 := rb.NewRef("srcref") + inp1 := rb.NewRef("destref") + outs, err := s.Solve(context.TODO(), []fileoptypes.Ref{inp0, inp1}, fo.Actions) + require.NoError(t, err) + require.Equal(t, len(outs), 1) + rb.checkReleased(t, append(outs, inp0, inp1)) + + o := outs[0].(*testFileRef) + require.Equal(t, "mount-destref-copy(mount-srcref)-commit", o.id) + require.Equal(t, 1, len(o.mount.chain)) + require.Equal(t, fo.Actions[0].Action.(*pb.FileAction_Copy).Copy, o.mount.chain[0].copy) +} + func TestFileCopyInputRm(t *testing.T) { fo := &pb.FileOp{ Actions: []*pb.FileAction{ @@ -409,6 +569,17 @@ type testMount struct { active *testFileRef } +func (tm *testMount) addUser(user, group fileoptypes.Mount) { + if user != nil { + um := user.(*testMount) + tm.id += "#u(" + um.id + ")" + } + if group != nil { + gm := group.(*testMount) + tm.id += "#g(" + gm.id + ")" + } +} + type mod struct { mkdir *pb.FileActionMkDir rm *pb.FileActionRm @@ -419,7 +590,7 @@ type mod struct { func (m *testMount) IsFileOpMount() {} func (m *testMount) Release(ctx context.Context) error { - if m.initID != m.id { + if m.b.mounts[m.initID] != m { return m.b.mounts[m.initID].Release(ctx) } if m.unmounted { @@ -435,19 +606,21 @@ func (m *testMount) Release(ctx context.Context) error { type testFileBackend struct { } -func (b *testFileBackend) Mkdir(_ context.Context, m fileoptypes.Mount, a pb.FileActionMkDir) error { +func (b *testFileBackend) Mkdir(_ context.Context, m, user, group fileoptypes.Mount, a pb.FileActionMkDir) error { mm := m.(*testMount) if mm.callback != nil { mm.callback() } mm.id += "-mkdir" + mm.addUser(user, group) mm.chain = append(mm.chain, mod{mkdir: &a}) return nil } -func (b *testFileBackend) Mkfile(_ context.Context, m fileoptypes.Mount, a pb.FileActionMkFile) error { +func (b *testFileBackend) Mkfile(_ context.Context, m, user, group fileoptypes.Mount, a pb.FileActionMkFile) error { mm := m.(*testMount) mm.id += "-mkfile" + mm.addUser(user, group) mm.chain = append(mm.chain, mod{mkfile: &a}) return nil } @@ -457,10 +630,11 @@ func (b *testFileBackend) Rm(_ context.Context, m fileoptypes.Mount, a pb.FileAc mm.chain = append(mm.chain, mod{rm: &a}) return nil } -func (b *testFileBackend) Copy(_ context.Context, m1 fileoptypes.Mount, m fileoptypes.Mount, a pb.FileActionCopy) error { +func (b *testFileBackend) Copy(_ context.Context, m1, m, user, group fileoptypes.Mount, a pb.FileActionCopy) error { mm := m.(*testMount) mm1 := m1.(*testMount) mm.id += "-copy(" + mm1.id + ")" + mm.addUser(user, group) mm.chain = append(mm.chain, mod{copy: &a, copySrc: mm1.chain}) return nil } @@ -524,6 +698,6 @@ loop0: } for _, m := range b.mounts { - require.True(t, m.unmounted, "%s still mounted", m.id) + require.True(t, m.unmounted, "%s %p still mounted", m.id, m) } } diff --git a/solver/llbsolver/ops/fileoptypes/types.go b/solver/llbsolver/ops/fileoptypes/types.go index 3243e3827ded..67aab0267754 100644 --- a/solver/llbsolver/ops/fileoptypes/types.go +++ b/solver/llbsolver/ops/fileoptypes/types.go @@ -16,10 +16,10 @@ type Mount interface { } type Backend interface { - Mkdir(context.Context, Mount, pb.FileActionMkDir) error - Mkfile(context.Context, Mount, pb.FileActionMkFile) error + Mkdir(context.Context, Mount, Mount, Mount, pb.FileActionMkDir) error + Mkfile(context.Context, Mount, Mount, Mount, pb.FileActionMkFile) error Rm(context.Context, Mount, pb.FileActionRm) error - Copy(context.Context, Mount, Mount, pb.FileActionCopy) error + Copy(context.Context, Mount, Mount, Mount, Mount, pb.FileActionCopy) error } type RefManager interface { From 8a4674bab439b5b2e502ed2f9ceb6e6385ca1b58 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Thu, 7 Mar 2019 18:40:44 -0800 Subject: [PATCH 12/25] fileop: add dockerfile support Signed-off-by: Tonis Tiigi --- client/llb/fileop.go | 46 ++++++- client/llb/state.go | 9 +- examples/dockerfile2llb/main.go | 4 + frontend/dockerfile/dockerfile2llb/convert.go | 123 +++++++++++++++++- .../dockerfile2llb/convert_fileop.go | 5 + .../dockerfile2llb/convert_nofileop.go | 5 + solver/llbsolver/ops/file.go | 16 ++- solver/pb/caps.go | 8 ++ 8 files changed, 202 insertions(+), 14 deletions(-) create mode 100644 frontend/dockerfile/dockerfile2llb/convert_fileop.go create mode 100644 frontend/dockerfile/dockerfile2llb/convert_nofileop.go diff --git a/client/llb/fileop.go b/client/llb/fileop.go index 18d1d68e14aa..731aa61996d6 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -4,6 +4,8 @@ import ( _ "crypto/sha256" "os" "path" + "strconv" + "strings" "time" "github.com/moby/buildkit/solver/pb" @@ -29,11 +31,12 @@ import ( // filestate = state.File(c) // filestate.GetOutput(id).Exec() -func NewFileOp(s State, action *FileAction) *FileOp { +func NewFileOp(s State, action *FileAction, c Constraints) *FileOp { action = action.bind(s) f := &FileOp{ - action: action, + action: action, + constraints: c, } f.output = &output{vertex: f, getIndex: func() (pb.OutputIndex, error) { @@ -190,9 +193,40 @@ func (mi *MkdirInfo) SetMkdirOption(mi2 *MkdirInfo) { } func WithUser(name string) ChownOption { - return ChownOpt{ - User: &UserOpt{Name: name}, + opt := ChownOpt{} + + parts := strings.SplitN(name, ":", 2) + for i, v := range parts { + switch i { + case 0: + uid, err := parseUID(v) + if err != nil { + opt.User = &UserOpt{Name: v} + } else { + opt.User = &UserOpt{UID: uid} + } + case 1: + gid, err := parseUID(v) + if err != nil { + opt.Group = &UserOpt{Name: v} + } else { + opt.Group = &UserOpt{UID: gid} + } + } + } + + return opt +} + +func parseUID(str string) (int, error) { + if str == "root" { + return 0, nil + } + uid, err := strconv.ParseInt(str, 10, 32) + if err != nil { + return 0, err } + return int(uid), nil } func WithUIDGID(uid, gid int) ChownOption { @@ -479,7 +513,7 @@ type FileOp struct { action *FileAction output Output - // constraints Constraints + constraints Constraints isValidated bool } @@ -610,7 +644,7 @@ func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, pfo := &pb.FileOp{} - pop, md := MarshalConstraints(c, &Constraints{}) + pop, md := MarshalConstraints(c, &f.constraints) pop.Op = &pb.Op_File{ File: pfo, } diff --git a/client/llb/state.go b/client/llb/state.go index 5929e19a506e..0336b7fd1a9e 100644 --- a/client/llb/state.go +++ b/client/llb/state.go @@ -229,8 +229,13 @@ func (s State) Run(ro ...RunOption) ExecState { } } -func (s State) File(a *FileAction) State { - return s.WithOutput(NewFileOp(s, a).Output()) +func (s State) File(a *FileAction, opts ...ConstraintsOpt) State { + var c Constraints + for _, o := range opts { + o.SetConstraintsOption(&c) + } + + return s.WithOutput(NewFileOp(s, a, c).Output()) } func (s State) AddEnv(key, value string) State { diff --git a/examples/dockerfile2llb/main.go b/examples/dockerfile2llb/main.go index 9b31ea5d3656..41d2d4d2baed 100644 --- a/examples/dockerfile2llb/main.go +++ b/examples/dockerfile2llb/main.go @@ -9,6 +9,7 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb/imagemetaresolver" "github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb" + "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/appcontext" ) @@ -26,9 +27,12 @@ func main() { panic(err) } + caps := pb.Caps.CapSet(pb.Caps.All()) + state, img, err := dockerfile2llb.Dockerfile2LLB(appcontext.Context(), df, dockerfile2llb.ConvertOpt{ MetaResolver: imagemetaresolver.Default(), Target: opt.target, + LLBCaps: &caps, }) if err != nil { log.Printf("err: %+v", err) diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index 42ea06146456..cb9b1e7af811 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -151,6 +151,10 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, switch cmd.(type) { case *instructions.AddCommand, *instructions.CopyCommand, *instructions.RunCommand: total++ + case *instructions.WorkdirCommand: + if useFileOp(opt.BuildArgs, opt.LLBCaps) { + total++ + } } } ds.cmdTotal = total @@ -307,7 +311,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, d.state = d.state.AddEnv(k, v) } if d.image.Config.WorkingDir != "" { - if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false); err != nil { + if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { return nil, nil, err } } @@ -468,7 +472,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { case *instructions.RunCommand: err = dispatchRun(d, c, opt.proxyEnv, cmd.sources, opt) case *instructions.WorkdirCommand: - err = dispatchWorkdir(d, c, true) + err = dispatchWorkdir(d, c, true, &opt) case *instructions.AddCommand: err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt) if err == nil { @@ -648,7 +652,7 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs), true, &d.state) } -func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool) error { +func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bool, opt *dispatchOpt) error { d.state = d.state.Dir(c.Path) wd := c.Path if !path.IsAbs(c.Path) { @@ -656,13 +660,114 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo } d.image.Config.WorkingDir = wd if commit { + if opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { + mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} + if user := d.image.Config.User; user != "" { + mkdirOpt = append(mkdirOpt, llb.WithUser(user)) + } + platform := opt.targetPlatform + if d.platform != nil { + platform = *d.platform + } + d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), d.state.Env())), d.prefixPlatform, &platform))) + } + return commitToHistory(&d.image, "WORKDIR "+wd, false, nil) } return nil } +func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { + dest := path.Join("/", pathRelativeToWorkingDir(d.state, c.Dest())) + if c.Dest() == "." || c.Dest() == "" || c.Dest()[len(c.Dest())-1] == filepath.Separator { + dest += string(filepath.Separator) + } + + var copyOpt []llb.CopyOption + + if chown != "" { + copyOpt = append(copyOpt, llb.WithUser(chown)) + } + + commitMessage := bytes.NewBufferString("") + if isAddCommand { + commitMessage.WriteString("ADD") + } else { + commitMessage.WriteString("COPY") + } + + var a *llb.FileAction + + for _, src := range c.Sources() { + commitMessage.WriteString(" " + src) + if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { + if !isAddCommand { + return errors.New("source can't be a URL for COPY") + } + + // Resources from remote URLs are not decompressed. + // https://docs.docker.com/engine/reference/builder/#add + // + // Note: mixing up remote archives and local archives in a single ADD instruction + // would result in undefined behavior: https://github.com/moby/buildkit/pull/387#discussion_r189494717 + u, err := url.Parse(src) + f := "__unnamed__" + if err == nil { + if base := path.Base(u.Path); base != "." && base != "/" { + f = base + } + } + + st := llb.HTTP(src, llb.Filename(f), dfCmd(c)) + + opts := append([]llb.CopyOption{&llb.CopyInfo{ + CreateDestPath: true, + }}, copyOpt...) + + if a == nil { + a = llb.Copy(st, f, dest, opts...) + } else { + a = a.Copy(st, f, dest, opts...) + } + } else { + opts := append([]llb.CopyOption{&llb.CopyInfo{ + FollowSymlinks: true, + CopyDirContentsOnly: true, + AttemptUnpack: isAddCommand, + CreateDestPath: true, + AllowWildcard: true, + AllowEmptyWildcard: true, + }}, copyOpt...) + + if a == nil { + a = llb.Copy(sourceState, src, dest, opts...) + } else { + a = a.Copy(sourceState, src, dest, opts...) + } + } + } + + commitMessage.WriteString(" " + c.Dest()) + + platform := opt.targetPlatform + if d.platform != nil { + platform = *d.platform + } + + fileOpt := []llb.ConstraintsOpt{llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), d.state.Env())), d.prefixPlatform, &platform))} + if d.ignoreCache { + fileOpt = append(fileOpt, llb.IgnoreCache) + } + + d.state = d.state.File(a, fileOpt...) + return commitToHistory(&d.image, commitMessage.String(), true, &d.state) +} + func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, opt dispatchOpt) error { - // TODO: this should use CopyOp instead. Current implementation is inefficient + if useFileOp(opt.buildArgValues, opt.llbCaps) { + return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, opt) + } + img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) dest := path.Join(".", pathRelativeToWorkingDir(d.state, c.Dest())) @@ -1176,3 +1281,13 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform out += fmt.Sprintf("%d/%d] ", ds.cmdIndex, ds.cmdTotal) return out + str } + +func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { + enabled := fileOpEnabled + if v, ok := args["BUILDKIT_USE_FILEOP"]; ok { + if b, err := strconv.ParseBool(v); err != nil { + enabled = b + } + } + return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil +} diff --git a/frontend/dockerfile/dockerfile2llb/convert_fileop.go b/frontend/dockerfile/dockerfile2llb/convert_fileop.go new file mode 100644 index 000000000000..9c8abc7c323a --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_fileop.go @@ -0,0 +1,5 @@ +// +build fileop + +package dockerfile2llb + +const fileOpEnabled = true diff --git a/frontend/dockerfile/dockerfile2llb/convert_nofileop.go b/frontend/dockerfile/dockerfile2llb/convert_nofileop.go new file mode 100644 index 000000000000..f8f7582b045f --- /dev/null +++ b/frontend/dockerfile/dockerfile2llb/convert_nofileop.go @@ -0,0 +1,5 @@ +// +build !fileop + +package dockerfile2llb + +const fileOpEnabled = false diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 013778fe0b0c..379d1df9a454 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -46,15 +46,23 @@ func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, md *metadata.S func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, bool, error) { selectors := map[int]map[llbsolver.Selector]struct{}{} + invalidSelectors := map[int]struct{}{} actions := make([][]byte, 0, len(f.op.Actions)) + markInvalid := func(idx pb.InputIndex) { + if idx != -1 { + invalidSelectors[int(idx)] = struct{}{} + } + } + for _, action := range f.op.Actions { var dt []byte var err error switch a := action.Action.(type) { case *pb.FileAction_Mkdir: p := *a.Mkdir + markInvalid(action.Input) processOwner(p.Owner, selectors) dt, err = json.Marshal(p) if err != nil { @@ -62,7 +70,7 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } case *pb.FileAction_Mkfile: p := *a.Mkfile - p.Owner = nil + markInvalid(action.Input) processOwner(p.Owner, selectors) dt, err = json.Marshal(p) if err != nil { @@ -70,14 +78,15 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } case *pb.FileAction_Rm: p := *a.Rm + markInvalid(action.Input) dt, err = json.Marshal(p) if err != nil { return nil, false, err } case *pb.FileAction_Copy: p := *a.Copy + markInvalid(action.Input) processOwner(p.Owner, selectors) - p.Owner = nil if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { p.Src = path.Base(p.Src) addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink) @@ -111,6 +120,9 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo } for idx, m := range selectors { + if _, ok := invalidSelectors[idx]; ok { + continue + } dgsts := make([][]byte, 0, len(m)) for k := range m { dgsts = append(dgsts, []byte(k.Path)) diff --git a/solver/pb/caps.go b/solver/pb/caps.go index b81ec62c419a..add5c17ed7e2 100644 --- a/solver/pb/caps.go +++ b/solver/pb/caps.go @@ -43,6 +43,8 @@ const ( CapExecMountSSH apicaps.CapID = "exec.mount.ssh" CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" + CapFileBase apicaps.CapID = "file.base" + CapConstraints apicaps.CapID = "constraints" CapPlatform apicaps.CapID = "platform" @@ -226,6 +228,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapFileBase, + Enabled: true, + Status: apicaps.CapStatusPrerelease, + }) + Caps.Init(apicaps.Cap{ ID: CapConstraints, Enabled: true, From 0d17ac323e26c1183e8ae1e975497ee8cf6613e3 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 8 Mar 2019 16:15:42 -0800 Subject: [PATCH 13/25] fileop: updates with new fsutil copy pkg Signed-off-by: Tonis Tiigi --- cache/contenthash/checksum.go | 3 +- client/client_test.go | 16 +- client/llb/fileop.go | 21 ++- cmd/buildkitd/main.go | 2 + frontend/dockerfile/dockerfile2llb/convert.go | 11 +- frontend/dockerfile/dockerfile_test.go | 76 +++++++- solver/llbsolver/file/backend.go | 162 ++++++++++-------- solver/llbsolver/file/chown.go | 62 ------- solver/llbsolver/file/chown_unix.go | 7 - solver/llbsolver/file/chown_windows.go | 14 -- solver/llbsolver/file/unpack.go | 61 +++++++ solver/llbsolver/file/user.go | 21 +-- solver/llbsolver/ops/file.go | 12 +- 13 files changed, 281 insertions(+), 187 deletions(-) delete mode 100644 solver/llbsolver/file/chown.go delete mode 100644 solver/llbsolver/file/chown_unix.go delete mode 100644 solver/llbsolver/file/chown_windows.go create mode 100644 solver/llbsolver/file/unpack.go diff --git a/cache/contenthash/checksum.go b/cache/contenthash/checksum.go index 6cf5bb9f633b..0b878215f475 100644 --- a/cache/contenthash/checksum.go +++ b/cache/contenthash/checksum.go @@ -558,12 +558,13 @@ func (cc *cacheContext) lazyChecksum(ctx context.Context, m *mount, p string) (* } func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *iradix.Txn, m *mount, k []byte, follow bool) (*CacheRecord, bool, error) { + origk := k k, cr, err := getFollowLinks(root, k, follow) if err != nil { return nil, false, err } if cr == nil { - return nil, false, errors.Wrapf(errNotFound, "%s not found", convertKeyToPath(k)) + return nil, false, errors.Wrapf(errNotFound, "%q not found", convertKeyToPath(origk)) } if cr.Digest != "" { return cr, false, nil diff --git a/client/client_test.go b/client/client_test.go index 14c03680b39b..cf41ac208816 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -672,8 +672,12 @@ func testFileOpMkdirMkfile(t *testing.T, sb integration.Sandbox) { defer os.RemoveAll(destDir) _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, }, nil) require.NoError(t, err) @@ -722,8 +726,12 @@ func testFileOpCopyRm(t *testing.T, sb integration.Sandbox) { defer os.RemoveAll(destDir) _, err = c.Solve(context.TODO(), def, SolveOpt{ - Exporter: ExporterLocal, - ExporterOutputDir: destDir, + Exports: []ExportEntry{ + { + Type: ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ "mylocal": dir, "mylocal2": dir2, diff --git a/client/llb/fileop.go b/client/llb/fileop.go index 731aa61996d6..21dd90619469 100644 --- a/client/llb/fileop.go +++ b/client/llb/fileop.go @@ -149,7 +149,7 @@ type fileActionMkdir struct { func (a *fileActionMkdir) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { return &pb.FileAction_Mkdir{ Mkdir: &pb.FileActionMkDir{ - Path: normalizePath(parent, a.file), + Path: normalizePath(parent, a.file, false), Mode: int32(a.mode & 0777), MakeParents: a.info.MakeParents, Owner: a.info.ChownOpt.marshal(base), @@ -318,7 +318,7 @@ type fileActionMkfile struct { func (a *fileActionMkfile) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { return &pb.FileAction_Mkfile{ Mkfile: &pb.FileActionMkFile{ - Path: normalizePath(parent, a.file), + Path: normalizePath(parent, a.file, false), Mode: int32(a.mode & 0777), Data: a.dt, Owner: a.info.ChownOpt.marshal(base), @@ -382,7 +382,7 @@ type fileActionRm struct { func (a *fileActionRm) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { return &pb.FileAction_Rm{ Rm: &pb.FileActionRm{ - Path: normalizePath(parent, a.file), + Path: normalizePath(parent, a.file, false), AllowNotFound: a.info.AllowNotFound, AllowWildcard: a.info.AllowWildcard, }, @@ -451,7 +451,7 @@ type fileActionCopy struct { func (a *fileActionCopy) toProtoAction(parent string, base pb.InputIndex) pb.IsFileAction { c := &pb.FileActionCopy{ Src: a.sourcePath(), - Dest: normalizePath(parent, a.dest), + Dest: normalizePath(parent, a.dest, true), Owner: a.info.ChownOpt.marshal(base), AllowWildcard: a.info.AllowWildcard, AllowEmptyWildcard: a.info.AllowEmptyWildcard, @@ -683,11 +683,22 @@ func (f *FileOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata, return f.Load() } -func normalizePath(parent, p string) string { +func normalizePath(parent, p string, keepSlash bool) string { + origPath := p p = path.Clean(p) if !path.IsAbs(p) { p = path.Join("/", parent, p) } + if keepSlash { + if strings.HasSuffix(origPath, "/") && !strings.HasSuffix(p, "/") { + p += "/" + } else if strings.HasSuffix(origPath, "/.") { + if p != "/" { + p += "/" + } + p += "." + } + } return p } diff --git a/cmd/buildkitd/main.go b/cmd/buildkitd/main.go index c9e425656fe3..507f70915b7c 100644 --- a/cmd/buildkitd/main.go +++ b/cmd/buildkitd/main.go @@ -19,6 +19,7 @@ import ( "github.com/containerd/containerd/pkg/seed" "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/sys" + "github.com/docker/docker/pkg/reexec" "github.com/docker/go-connections/sockets" "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" "github.com/moby/buildkit/cache/remotecache" @@ -55,6 +56,7 @@ import ( func init() { apicaps.ExportedProduct = "buildkit" seed.WithTimeAndRand() + reexec.Init() } type workerInitializerOpt struct { diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index cb9b1e7af811..419796b4e6fe 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -474,7 +474,7 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { case *instructions.WorkdirCommand: err = dispatchWorkdir(d, c, true, &opt) case *instructions.AddCommand: - err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, "", opt) + err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, opt) if err == nil { for _, src := range c.Sources() { if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { @@ -660,7 +660,8 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo } d.image.Config.WorkingDir = wd if commit { - if opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { + withLayer := false + if wd != "/" && opt != nil && useFileOp(opt.buildArgValues, opt.llbCaps) { mkdirOpt := []llb.MkdirOption{llb.WithParents(true)} if user := d.image.Config.User; user != "" { mkdirOpt = append(mkdirOpt, llb.WithUser(user)) @@ -670,9 +671,9 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo platform = *d.platform } d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), d.state.Env())), d.prefixPlatform, &platform))) + withLayer = true } - - return commitToHistory(&d.image, "WORKDIR "+wd, false, nil) + return commitToHistory(&d.image, "WORKDIR "+wd, withLayer, nil) } return nil } @@ -1285,7 +1286,7 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { enabled := fileOpEnabled if v, ok := args["BUILDKIT_USE_FILEOP"]; ok { - if b, err := strconv.ParseBool(v); err != nil { + if b, err := strconv.ParseBool(v); err == nil { enabled = b } } diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 34c3892912f5..c0f44e8d0cdd 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -81,6 +81,7 @@ var allTests = []integration.Test{ testCopyChownCreateDest, testEmptyDestDir, testSymlinkedDockerfile, + testDockerfileAddArchiveWildcard, } var opts []integration.TestOpt @@ -1309,6 +1310,79 @@ ADD %s /newname.tar.gz require.Equal(t, buf2.Bytes(), dt) } +func testDockerfileAddArchiveWildcard(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + buf := bytes.NewBuffer(nil) + tw := tar.NewWriter(buf) + expectedContent := []byte("content0") + err := tw.WriteHeader(&tar.Header{ + Name: "foo", + Typeflag: tar.TypeReg, + Size: int64(len(expectedContent)), + Mode: 0644, + }) + require.NoError(t, err) + _, err = tw.Write(expectedContent) + require.NoError(t, err) + err = tw.Close() + require.NoError(t, err) + + buf2 := bytes.NewBuffer(nil) + tw = tar.NewWriter(buf2) + expectedContent = []byte("content1") + err = tw.WriteHeader(&tar.Header{ + Name: "bar", + Typeflag: tar.TypeReg, + Size: int64(len(expectedContent)), + Mode: 0644, + }) + require.NoError(t, err) + _, err = tw.Write(expectedContent) + require.NoError(t, err) + err = tw.Close() + require.NoError(t, err) + + dockerfile := []byte(` +FROM scratch +ADD *.tar /dest +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("t.tar", buf.Bytes(), 0600), + fstest.CreateFile("b.tar", buf2.Bytes(), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "dest/foo")) + require.NoError(t, err) + require.Equal(t, "content0", string(dt)) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "dest/bar")) + require.NoError(t, err) + require.Equal(t, "content1", string(dt)) +} + func testSymlinkDestination(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) f.RequiresBuildctl(t) @@ -2061,7 +2135,7 @@ COPY sub/dir1 subdest6 require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir1/dir2/foo")) + dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo")) require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) diff --git a/solver/llbsolver/file/backend.go b/solver/llbsolver/file/backend.go index 554d869b6d75..05e201c4b533 100644 --- a/solver/llbsolver/file/backend.go +++ b/solver/llbsolver/file/backend.go @@ -6,25 +6,33 @@ import ( "log" "os" "path/filepath" + "strings" + "time" "github.com/containerd/continuity/fs" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" "github.com/pkg/errors" - "github.com/sirupsen/logrus" copy "github.com/tonistiigi/fsutil/copy" - "golang.org/x/sys/unix" ) -func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *uidgid) error { +func timestampToTime(ts int64) *time.Time { + if ts == -1 { + return nil + } + tm := time.Unix(ts/1e9, ts%1e9) + return &tm +} + +func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *copy.ChownOpt) error { p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) if err != nil { return err } if action.MakeParents { - if err := mkdirAll(p, os.FileMode(action.Mode)&0777, user); err != nil { + if err := copy.MkdirAll(p, os.FileMode(action.Mode)&0777, user, timestampToTime(action.Timestamp)); err != nil { return err } } else { @@ -34,26 +42,18 @@ func mkdir(ctx context.Context, d string, action pb.FileActionMkDir, user *uidgi } return err } - if user != nil { - if err := os.Chown(p, user.uid, user.gid); err != nil { - return err - } + if err := copy.Chown(p, user); err != nil { + return err } - - } - - if action.Timestamp != -1 { - st := unix.Timespec{Sec: action.Timestamp / 1e9, Nsec: action.Timestamp % 1e9} - timespec := []unix.Timespec{st, st} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", p) + if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil { + return err } } return nil } -func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *uidgid) error { +func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *copy.ChownOpt) error { p, err := fs.RootPath(d, filepath.Join(filepath.Join("/", action.Path))) if err != nil { return err @@ -63,18 +63,12 @@ func mkfile(ctx context.Context, d string, action pb.FileActionMkFile, user *uid return err } - if user != nil { - if err := os.Chown(p, user.uid, user.gid); err != nil { - return err - } + if err := copy.Chown(p, user); err != nil { + return err } - if action.Timestamp != -1 { - st := unix.Timespec{Sec: action.Timestamp / 1e9, Nsec: action.Timestamp % 1e9} - timespec := []unix.Timespec{st, st} - if err := unix.UtimesNanoAt(unix.AT_FDCWD, p, timespec, unix.AT_SYMLINK_NOFOLLOW); err != nil { - return errors.Wrapf(err, "failed to utime %s", p) - } + if err := copy.Utimes(p, timestampToTime(action.Timestamp)); err != nil { + return err } return nil @@ -96,66 +90,90 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error { return nil } -func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *uidgid) error { - // // src is the source path - // Src string `protobuf:"bytes,1,opt,name=src,proto3" json:"src,omitempty"` - // // dest path - // Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` - // // optional owner override - // Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner" json:"owner,omitempty"` - // // optional permission bits override - // Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` - // // followSymlink resolves symlinks in src - // FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` - // // dirCopyContents only copies contents if src is a directory - // DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` - // // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead - // AttemptUnpackDockerCompatibility bool `protobuf:"varint,8,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` - // // createDestPath creates dest path directories if needed - // CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` - // // allowWildcard allows filepath.Match wildcards in src path - // AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` - // // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files - // AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` - // // optional created time override - // Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - - srcp, err := fs.RootPath(src, filepath.Join(filepath.Join("/", action.Src))) - if err != nil { - return err - } +func docopy(ctx context.Context, src, dest string, action pb.FileActionCopy, u *copy.ChownOpt) error { + srcPath := cleanPath(action.Src) + destPath := cleanPath(action.Dest) - destp, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest))) - if err != nil { - return err + if !action.CreateDestPath { + p, err := fs.RootPath(dest, filepath.Join(filepath.Join("/", action.Dest))) + if err != nil { + return err + } + if _, err := os.Lstat(filepath.Dir(p)); err != nil { + return errors.Wrapf(err, "failed to stat %s", action.Dest) + } } - var opt []copy.Opt + xattrErrorHandler := func(dst, src, key string, err error) error { + log.Println(err) + return nil + } - if action.AllowWildcard { - opt = append(opt, copy.AllowWildcards) + opt := []copy.Opt{ + func(ci *copy.CopyInfo) { + ci.Chown = u + ci.Utime = timestampToTime(action.Timestamp) + if m := int(action.Mode); m != -1 { + ci.Mode = &m + } + ci.CopyDirContents = action.DirCopyContents + ci.FollowLinks = action.FollowSymlink + }, + copy.WithXAttrErrorHandler(xattrErrorHandler), } - if u != nil { - opt = append(opt, func(ci *copy.CopyInfo) { - ci.Chown = ©.ChownOpt{Uid: u.uid, Gid: u.gid} - }) + if !action.AllowWildcard { + if action.AttemptUnpackDockerCompatibility { + if ok, err := unpack(ctx, src, srcPath, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil { + return err + } else if ok { + return nil + } + } + return copy.Copy(ctx, src, srcPath, dest, destPath, opt...) } - xattrErrorHandler := func(dst, src, key string, err error) error { - log.Println(err) - return nil + m, err := copy.ResolveWildcards(src, srcPath, action.FollowSymlink) + if err != nil { + return err } - opt = append(opt, copy.WithXAttrErrorHandler(xattrErrorHandler)) + if len(m) == 0 { + if action.AllowEmptyWildcard { + return nil + } + return errors.Errorf("%s not found", srcPath) + } - if err := copy.Copy(ctx, srcp, destp, opt...); err != nil { - return err + for _, s := range m { + if action.AttemptUnpackDockerCompatibility { + if ok, err := unpack(ctx, src, s, dest, destPath, u, timestampToTime(action.Timestamp)); err != nil { + return err + } else if ok { + continue + } + } + if err := copy.Copy(ctx, src, s, dest, destPath, opt...); err != nil { + return err + } } return nil } +func cleanPath(s string) string { + s2 := filepath.Join("/", s) + if strings.HasSuffix(s, "/.") { + if s2 != "/" { + s2 += "/" + } + s2 += "." + } else if strings.HasSuffix(s, "/") && s2 != "/" { + s2 += "/" + } + return s2 +} + type Backend struct { } @@ -244,7 +262,5 @@ func (fb *Backend) Copy(ctx context.Context, m1, m2, user, group fileoptypes.Mou return err } - logrus.Debugf("copy %+v %+v %+v", action.Owner, user, group) - return docopy(ctx, src, dest, action, u) } diff --git a/solver/llbsolver/file/chown.go b/solver/llbsolver/file/chown.go deleted file mode 100644 index 4c305cf883d8..000000000000 --- a/solver/llbsolver/file/chown.go +++ /dev/null @@ -1,62 +0,0 @@ -package file - -import ( - "os" - "syscall" -) - -// mkdirAll is forked os.MkdirAll -func mkdirAll(path string, perm os.FileMode, user *uidgid) error { - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR} - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent. - err = mkdirAll(fixRootDirectory(path[:j-1]), perm, user) - if err != nil { - return err - } - } - - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - - if user != nil { - if err := os.Chown(path, user.uid, user.gid); err != nil { - return err - } - } - - return nil -} diff --git a/solver/llbsolver/file/chown_unix.go b/solver/llbsolver/file/chown_unix.go deleted file mode 100644 index 4796302ba989..000000000000 --- a/solver/llbsolver/file/chown_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package file - -func fixRootDirectory(p string) string { - return p -} diff --git a/solver/llbsolver/file/chown_windows.go b/solver/llbsolver/file/chown_windows.go deleted file mode 100644 index b08746736ae0..000000000000 --- a/solver/llbsolver/file/chown_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build windows - -package file - -import "os" - -func fixRootDirectory(p string) string { - if len(p) == len(`\\?\c:`) { - if os.IsPathSeparator(p[0]) && os.IsPathSeparator(p[1]) && p[2] == '?' && os.IsPathSeparator(p[3]) && p[5] == ':' { - return p + `\` - } - } - return p -} diff --git a/solver/llbsolver/file/unpack.go b/solver/llbsolver/file/unpack.go new file mode 100644 index 000000000000..395b1379424b --- /dev/null +++ b/solver/llbsolver/file/unpack.go @@ -0,0 +1,61 @@ +package file + +import ( + "archive/tar" + "context" + "os" + "time" + + "github.com/containerd/continuity/fs" + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/chrootarchive" + copy "github.com/tonistiigi/fsutil/copy" +) + +func unpack(ctx context.Context, srcRoot string, src string, destRoot string, dest string, user *copy.ChownOpt, tm *time.Time) (bool, error) { + src, err := fs.RootPath(srcRoot, src) + if err != nil { + return false, err + } + if !isArchivePath(src) { + return false, nil + } + + dest, err = fs.RootPath(destRoot, dest) + if err != nil { + return false, err + } + if err := copy.MkdirAll(dest, 0755, user, tm); err != nil { + return false, err + } + + file, err := os.Open(src) + if err != nil { + return false, err + } + defer file.Close() + + return true, chrootarchive.Untar(file, dest, nil) +} + +func isArchivePath(path string) bool { + fi, err := os.Lstat(path) + if err != nil { + return false + } + if fi.Mode()&os.ModeType != 0 { + return false + } + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := archive.DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} diff --git a/solver/llbsolver/file/user.go b/solver/llbsolver/file/user.go index 665319b7db80..3cc748522aff 100644 --- a/solver/llbsolver/file/user.go +++ b/solver/llbsolver/file/user.go @@ -9,17 +9,14 @@ import ( "github.com/moby/buildkit/solver/pb" "github.com/opencontainers/runc/libcontainer/user" "github.com/pkg/errors" + copy "github.com/tonistiigi/fsutil/copy" ) -type uidgid struct { - uid, gid int -} - -func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*uidgid, error) { +func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.ChownOpt, error) { if chopt == nil { return nil, nil } - var us uidgid + var us copy.ChownOpt if chopt.User != nil { switch u := chopt.User.User.(type) { case *pb.UserOpt_ByName: @@ -61,12 +58,12 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*uidgid, error) { } if len(users) > 0 { - us.uid = users[0].Uid - us.gid = users[0].Gid + us.Uid = users[0].Uid + us.Gid = users[0].Gid } case *pb.UserOpt_ByID: - us.uid = int(u.ByID) - us.gid = int(u.ByID) + us.Uid = int(u.ByID) + us.Gid = int(u.ByID) } } @@ -111,10 +108,10 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*uidgid, error) { } if len(groups) > 0 { - us.gid = groups[0].Gid + us.Gid = groups[0].Gid } case *pb.UserOpt_ByID: - us.gid = int(u.ByID) + us.Gid = int(u.ByID) } } diff --git a/solver/llbsolver/ops/file.go b/solver/llbsolver/ops/file.go index 379d1df9a454..23bcad4d6c4a 100644 --- a/solver/llbsolver/ops/file.go +++ b/solver/llbsolver/ops/file.go @@ -88,8 +88,8 @@ func (f *fileOp) CacheMap(ctx context.Context, index int) (*solver.CacheMap, boo markInvalid(action.Input) processOwner(p.Owner, selectors) if action.SecondaryInput != -1 && int(action.SecondaryInput) < f.numInputs { - p.Src = path.Base(p.Src) addSelector(selectors, int(action.SecondaryInput), p.Src, p.AllowWildcard, p.FollowSymlink) + p.Src = path.Base(p.Src) } dt, err = json.Marshal(p) if err != nil { @@ -375,7 +375,7 @@ func (s *FileOpSolver) validate(idx int, inputs []fileoptypes.Ref, actions []*pb } func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptypes.Ref, actions []*pb.FileAction) (input, error) { - inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (interface{}, error) { + inp, err := s.g.Do(ctx, fmt.Sprintf("inp-%d", idx), func(ctx context.Context) (_ interface{}, err error) { s.mu.Lock() inp := s.ins[idx] s.mu.Unlock() @@ -391,14 +391,18 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp return inp, nil } + var inpMount, inpMountSecondary fileoptypes.Mount var toRelease []fileoptypes.Mount + var inpMountPrepared bool defer func() { for _, m := range toRelease { m.Release(context.TODO()) } + if err != nil && inpMount != nil && inpMountPrepared { + inpMount.Release(context.TODO()) + } }() - var inpMount, inpMountSecondary fileoptypes.Mount action := actions[idx-len(inputs)] loadInput := func(ctx context.Context) func() error { @@ -413,6 +417,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp return err } inpMount = m + inpMountPrepared = true return nil } inpMount = inp.mount @@ -511,6 +516,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp return nil, err } inpMount = m + inpMountPrepared = true } switch a := action.Action.(type) { From 33955c9b9e7c9900483129ffe7d57a33c385477d Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 19:22:05 -0700 Subject: [PATCH 14/25] vendor: revendor new packages Signed-off-by: Tonis Tiigi --- .../docker/docker/pkg/archive/README.md | 1 + .../docker/docker/pkg/archive/archive.go | 1290 +++++++++++++++++ .../docker/pkg/archive/archive_linux.go | 92 ++ .../docker/pkg/archive/archive_other.go | 7 + .../docker/docker/pkg/archive/archive_unix.go | 114 ++ .../docker/pkg/archive/archive_windows.go | 77 + .../docker/docker/pkg/archive/changes.go | 441 ++++++ .../docker/pkg/archive/changes_linux.go | 313 ++++ .../docker/pkg/archive/changes_other.go | 97 ++ .../docker/docker/pkg/archive/changes_unix.go | 37 + .../docker/pkg/archive/changes_windows.go | 30 + .../docker/docker/pkg/archive/copy.go | 472 ++++++ .../docker/docker/pkg/archive/copy_unix.go | 11 + .../docker/docker/pkg/archive/copy_windows.go | 9 + .../docker/docker/pkg/archive/diff.go | 256 ++++ .../docker/pkg/archive/example_changes.go | 97 ++ .../docker/docker/pkg/archive/time_linux.go | 16 + .../docker/pkg/archive/time_unsupported.go | 16 + .../docker/docker/pkg/archive/whiteouts.go | 23 + .../docker/docker/pkg/archive/wrap.go | 59 + .../docker/pkg/chrootarchive/archive.go | 73 + .../docker/pkg/chrootarchive/archive_unix.go | 88 ++ .../pkg/chrootarchive/archive_windows.go | 22 + .../docker/pkg/chrootarchive/chroot_linux.go | 113 ++ .../docker/pkg/chrootarchive/chroot_unix.go | 12 + .../docker/docker/pkg/chrootarchive/diff.go | 23 + .../docker/pkg/chrootarchive/diff_unix.go | 130 ++ .../docker/pkg/chrootarchive/diff_windows.go | 45 + .../docker/pkg/chrootarchive/init_unix.go | 28 + .../docker/pkg/chrootarchive/init_windows.go | 4 + .../docker/docker/pkg/pools/pools.go | 137 ++ .../docker/docker/pkg/reexec/README.md | 5 + .../docker/docker/pkg/reexec/command_linux.go | 28 + .../docker/docker/pkg/reexec/command_unix.go | 23 + .../docker/pkg/reexec/command_unsupported.go | 12 + .../docker/pkg/reexec/command_windows.go | 21 + .../docker/docker/pkg/reexec/reexec.go | 47 + vendor/modules.txt | 6 +- 38 files changed, 4274 insertions(+), 1 deletion(-) create mode 100644 vendor/github.com/docker/docker/pkg/archive/README.md create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_other.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/changes_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/copy_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/example_changes.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/time_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/whiteouts.go create mode 100644 vendor/github.com/docker/docker/pkg/archive/wrap.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/pools/pools.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/README.md create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_linux.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unix.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/command_windows.go create mode 100644 vendor/github.com/docker/docker/pkg/reexec/reexec.go diff --git a/vendor/github.com/docker/docker/pkg/archive/README.md b/vendor/github.com/docker/docker/pkg/archive/README.md new file mode 100644 index 000000000000..7307d9694f66 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/README.md @@ -0,0 +1 @@ +This code provides helper functions for dealing with archive files. diff --git a/vendor/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/docker/docker/pkg/archive/archive.go new file mode 100644 index 000000000000..5fb3995e9b03 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive.go @@ -0,0 +1,1290 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bufio" + "bytes" + "compress/bzip2" + "compress/gzip" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/fileutils" + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +var unpigzPath string + +func init() { + if path, err := exec.LookPath("unpigz"); err != nil { + logrus.Debug("unpigz binary not found in PATH, falling back to go gzip library") + } else { + logrus.Debugf("Using unpigz binary found at path %s", path) + unpigzPath = path + } +} + +type ( + // Compression is the state represents if compressed or not. + Compression int + // WhiteoutFormat is the format of whiteouts unpacked + WhiteoutFormat int + + // TarOptions wraps the tar options. + TarOptions struct { + IncludeFiles []string + ExcludePatterns []string + Compression Compression + NoLchown bool + UIDMaps []idtools.IDMap + GIDMaps []idtools.IDMap + ChownOpts *idtools.IDPair + IncludeSourceDir bool + // WhiteoutFormat is the expected on disk format for whiteout files. + // This format will be converted to the standard format on pack + // and from the standard format on unpack. + WhiteoutFormat WhiteoutFormat + // When unpacking, specifies whether overwriting a directory with a + // non-directory is allowed and vice versa. + NoOverwriteDirNonDir bool + // For each include when creating an archive, the included name will be + // replaced with the matching name from this map. + RebaseNames map[string]string + InUserNS bool + } +) + +// Archiver implements the Archiver interface and allows the reuse of most utility functions of +// this package with a pluggable Untar function. Also, to facilitate the passing of specific id +// mappings for untar, an Archiver can be created with maps which will then be passed to Untar operations. +type Archiver struct { + Untar func(io.Reader, string, *TarOptions) error + IDMappingsVar *idtools.IDMappings +} + +// NewDefaultArchiver returns a new Archiver without any IDMappings +func NewDefaultArchiver() *Archiver { + return &Archiver{Untar: Untar, IDMappingsVar: &idtools.IDMappings{}} +} + +// breakoutError is used to differentiate errors related to breaking out +// When testing archive breakout in the unit tests, this error is expected +// in order for the test to pass. +type breakoutError error + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Bzip2 is bzip2 compression algorithm. + Bzip2 + // Gzip is gzip compression algorithm. + Gzip + // Xz is xz compression algorithm. + Xz +) + +const ( + // AUFSWhiteoutFormat is the default format for whiteouts + AUFSWhiteoutFormat WhiteoutFormat = iota + // OverlayWhiteoutFormat formats whiteout according to the overlay + // standard. + OverlayWhiteoutFormat +) + +const ( + modeISDIR = 040000 // Directory + modeISFIFO = 010000 // FIFO + modeISREG = 0100000 // Regular file + modeISLNK = 0120000 // Symbolic link + modeISBLK = 060000 // Block special file + modeISCHR = 020000 // Character special file + modeISSOCK = 0140000 // Socket +) + +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Bzip2: {0x42, 0x5A, 0x68}, + Gzip: {0x1F, 0x8B, 0x08}, + Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, + } { + if len(source) < len(m) { + logrus.Debug("Len too short") + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +func xzDecompress(ctx context.Context, archive io.Reader) (io.ReadCloser, error) { + args := []string{"xz", "-d", "-c", "-q"} + + return cmdStream(exec.CommandContext(ctx, args[0], args[1:]...), archive) +} + +func gzDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + disablePigzEnv := os.Getenv("MOBY_DISABLE_PIGZ") + if disablePigzEnv != "" { + if disablePigz, err := strconv.ParseBool(disablePigzEnv); err != nil { + return nil, err + } else if disablePigz { + return gzip.NewReader(buf) + } + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func wrapReadCloser(readBuf io.ReadCloser, cancel context.CancelFunc) io.ReadCloser { + return ioutils.NewReadCloserWrapper(readBuf, func() error { + cancel() + return readBuf.Close() + }) +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (io.ReadCloser, error) { + p := pools.BufioReader32KPool + buf := p.Get(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 + return nil, err + } + + compression := DetectCompression(bs) + switch compression { + case Uncompressed: + readBufWrapper := p.NewReadCloserWrapper(buf, buf) + return readBufWrapper, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + + gzReader, err := gzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + case Bzip2: + bz2Reader := bzip2.NewReader(buf) + readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) + return readBufWrapper, nil + case Xz: + ctx, cancel := context.WithCancel(context.Background()) + + xzReader, err := xzDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) + return wrapReadCloser(readBufWrapper, cancel), nil + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + p := pools.BufioWriter32KPool + buf := p.Get(dest) + switch compression { + case Uncompressed: + writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) + return writeBufWrapper, nil + case Gzip: + gzWriter := gzip.NewWriter(dest) + writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) + return writeBufWrapper, nil + case Bzip2, Xz: + // archive/bzip2 does not support writing, and there is no xz support at all + // However, this is not a problem as docker only currently generates gzipped tars + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + default: + return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) + } +} + +// TarModifierFunc is a function that can be passed to ReplaceFileTarWrapper to +// modify the contents or header of an entry in the archive. If the file already +// exists in the archive the TarModifierFunc will be called with the Header and +// a reader which will return the files content. If the file does not exist both +// header and content will be nil. +type TarModifierFunc func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) + +// ReplaceFileTarWrapper converts inputTarStream to a new tar stream. Files in the +// tar stream are modified if they match any of the keys in mods. +func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModifierFunc) io.ReadCloser { + pipeReader, pipeWriter := io.Pipe() + + go func() { + tarReader := tar.NewReader(inputTarStream) + tarWriter := tar.NewWriter(pipeWriter) + defer inputTarStream.Close() + defer tarWriter.Close() + + modify := func(name string, original *tar.Header, modifier TarModifierFunc, tarReader io.Reader) error { + header, data, err := modifier(name, original, tarReader) + switch { + case err != nil: + return err + case header == nil: + return nil + } + + header.Name = name + header.Size = int64(len(data)) + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + if len(data) != 0 { + if _, err := tarWriter.Write(data); err != nil { + return err + } + } + return nil + } + + var err error + var originalHeader *tar.Header + for { + originalHeader, err = tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + pipeWriter.CloseWithError(err) + return + } + + modifier, ok := mods[originalHeader.Name] + if !ok { + // No modifiers for this file, copy the header and data + if err := tarWriter.WriteHeader(originalHeader); err != nil { + pipeWriter.CloseWithError(err) + return + } + if _, err := pools.Copy(tarWriter, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + continue + } + delete(mods, originalHeader.Name) + + if err := modify(originalHeader.Name, originalHeader, modifier, tarReader); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + // Apply the modifiers that haven't matched any files in the archive + for name, modifier := range mods { + if err := modify(name, nil, modifier, nil); err != nil { + pipeWriter.CloseWithError(err) + return + } + } + + pipeWriter.Close() + + }() + return pipeReader +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Uncompressed: + return "tar" + case Bzip2: + return "tar.bz2" + case Gzip: + return "tar.gz" + case Xz: + return "tar.xz" + } + return "" +} + +// FileInfoHeader creates a populated Header from fi. +// Compared to archive pkg this function fills in more information. +// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR), +// which have been deleted since Go 1.9 archive/tar. +func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) { + hdr, err := tar.FileInfoHeader(fi, link) + if err != nil { + return nil, err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi) + name, err = canonicalTarName(name, fi.IsDir()) + if err != nil { + return nil, fmt.Errorf("tar: cannot canonicalize path: %v", err) + } + hdr.Name = name + if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil { + return nil, err + } + return hdr, nil +} + +// fillGo18FileTypeBits fills type bits which have been removed on Go 1.9 archive/tar +// https://github.com/golang/go/commit/66b5a2f +func fillGo18FileTypeBits(mode int64, fi os.FileInfo) int64 { + fm := fi.Mode() + switch { + case fm.IsRegular(): + mode |= modeISREG + case fi.IsDir(): + mode |= modeISDIR + case fm&os.ModeSymlink != 0: + mode |= modeISLNK + case fm&os.ModeDevice != 0: + if fm&os.ModeCharDevice != 0 { + mode |= modeISCHR + } else { + mode |= modeISBLK + } + case fm&os.ModeNamedPipe != 0: + mode |= modeISFIFO + case fm&os.ModeSocket != 0: + mode |= modeISSOCK + } + return mode +} + +// ReadSecurityXattrToTarHeader reads security.capability xattr from filesystem +// to a tar header +func ReadSecurityXattrToTarHeader(path string, hdr *tar.Header) error { + capability, _ := system.Lgetxattr(path, "security.capability") + if capability != nil { + hdr.Xattrs = make(map[string]string) + hdr.Xattrs["security.capability"] = string(capability) + } + return nil +} + +type tarWhiteoutConverter interface { + ConvertWrite(*tar.Header, string, os.FileInfo) (*tar.Header, error) + ConvertRead(*tar.Header, string) (bool, error) +} + +type tarAppender struct { + TarWriter *tar.Writer + Buffer *bufio.Writer + + // for hardlink mapping + SeenFiles map[uint64]string + IDMappings *idtools.IDMappings + ChownOpts *idtools.IDPair + + // For packing and unpacking whiteout files in the + // non standard format. The whiteout files defined + // by the AUFS standard are used as the tar whiteout + // standard. + WhiteoutConverter tarWhiteoutConverter +} + +func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer, chownOpts *idtools.IDPair) *tarAppender { + return &tarAppender{ + SeenFiles: make(map[uint64]string), + TarWriter: tar.NewWriter(writer), + Buffer: pools.BufioWriter32KPool.Get(nil), + IDMappings: idMapping, + ChownOpts: chownOpts, + } +} + +// canonicalTarName provides a platform-independent and consistent posix-style +//path for files and directories to be archived regardless of the platform. +func canonicalTarName(name string, isDir bool) (string, error) { + name, err := CanonicalTarNameForPath(name) + if err != nil { + return "", err + } + + // suffix with '/' for directories + if isDir && !strings.HasSuffix(name, "/") { + name += "/" + } + return name, nil +} + +// addTarFile adds to the tar archive a file from `path` as `name` +func (ta *tarAppender) addTarFile(path, name string) error { + fi, err := os.Lstat(path) + if err != nil { + return err + } + + var link string + if fi.Mode()&os.ModeSymlink != 0 { + var err error + link, err = os.Readlink(path) + if err != nil { + return err + } + } + + hdr, err := FileInfoHeader(name, fi, link) + if err != nil { + return err + } + if err := ReadSecurityXattrToTarHeader(path, hdr); err != nil { + return err + } + + // if it's not a directory and has more than 1 link, + // it's hard linked, so set the type flag accordingly + if !fi.IsDir() && hasHardlinks(fi) { + inode, err := getInodeFromStat(fi.Sys()) + if err != nil { + return err + } + // a link should have a name that it links too + // and that linked name should be first in the tar archive + if oldpath, ok := ta.SeenFiles[inode]; ok { + hdr.Typeflag = tar.TypeLink + hdr.Linkname = oldpath + hdr.Size = 0 // This Must be here for the writer math to add up! + } else { + ta.SeenFiles[inode] = name + } + } + + //check whether the file is overlayfs whiteout + //if yes, skip re-mapping container ID mappings. + isOverlayWhiteout := fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 + + //handle re-mapping container ID mappings back to host ID mappings before + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !isOverlayWhiteout && + !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && + !ta.IDMappings.Empty() { + fileIDPair, err := getFileUIDGID(fi.Sys()) + if err != nil { + return err + } + hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair) + if err != nil { + return err + } + } + + // explicitly override with ChownOpts + if ta.ChownOpts != nil { + hdr.Uid = ta.ChownOpts.UID + hdr.Gid = ta.ChownOpts.GID + } + + if ta.WhiteoutConverter != nil { + wo, err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi) + if err != nil { + return err + } + + // If a new whiteout file exists, write original hdr, then + // replace hdr with wo to be written after. Whiteouts should + // always be written after the original. Note the original + // hdr may have been updated to be a whiteout with returning + // a whiteout header + if wo != nil { + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + return fmt.Errorf("tar: cannot use whiteout for non-empty file") + } + hdr = wo + } + } + + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + return err + } + + if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { + // We use system.OpenSequential to ensure we use sequential file + // access on Windows to avoid depleting the standby list. + // On Linux, this equates to a regular os.Open. + file, err := system.OpenSequential(path) + if err != nil { + return err + } + + ta.Buffer.Reset(ta.TarWriter) + defer ta.Buffer.Reset(nil) + _, err = io.Copy(ta.Buffer, file) + file.Close() + if err != nil { + return err + } + err = ta.Buffer.Flush() + if err != nil { + return err + } + } + + return nil +} + +func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error { + // hdr.Mode is in linux format, which we can use for sycalls, + // but for os.Foo() calls we need the mode converted to os.FileMode, + // so use hdrInfo.Mode() (they differ for e.g. setuid bits) + hdrInfo := hdr.FileInfo() + + switch hdr.Typeflag { + case tar.TypeDir: + // Create directory unless it exists as a directory already. + // In that case we just want to merge the two + if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { + if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { + return err + } + } + + case tar.TypeReg, tar.TypeRegA: + // Source is regular file. We use system.OpenFileSequential to use sequential + // file access to avoid depleting the standby list on Windows. + // On Linux, this equates to a regular os.OpenFile + file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) + if err != nil { + return err + } + if _, err := io.Copy(file, reader); err != nil { + file.Close() + return err + } + file.Close() + + case tar.TypeBlock, tar.TypeChar: + if inUserns { // cannot create devices in a userns + return nil + } + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeFifo: + // Handle this is an OS-specific way + if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { + return err + } + + case tar.TypeLink: + targetPath := filepath.Join(extractDir, hdr.Linkname) + // check for hardlink breakout + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) + } + if err := os.Link(targetPath, path); err != nil { + return err + } + + case tar.TypeSymlink: + // path -> hdr.Linkname = targetPath + // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file + targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) + + // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because + // that symlink would first have to be created, which would be caught earlier, at this very check: + if !strings.HasPrefix(targetPath, extractDir) { + return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) + } + if err := os.Symlink(hdr.Linkname, path); err != nil { + return err + } + + case tar.TypeXGlobalHeader: + logrus.Debug("PAX Global Extended Headers found and ignored") + return nil + + default: + return fmt.Errorf("unhandled tar header type %d", hdr.Typeflag) + } + + // Lchown is not supported on Windows. + if Lchown && runtime.GOOS != "windows" { + if chownOpts == nil { + chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid} + } + if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { + return err + } + } + + var errors []string + for key, value := range hdr.Xattrs { + if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { + if err == syscall.ENOTSUP { + // We ignore errors here because not all graphdrivers support + // xattrs *cough* old versions of AUFS *cough*. However only + // ENOTSUP should be emitted in that case, otherwise we still + // bail. + errors = append(errors, err.Error()) + continue + } + return err + } + + } + + if len(errors) > 0 { + logrus.WithFields(logrus.Fields{ + "errors": errors, + }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") + } + + // There is no LChmod, so ignore mode for symlink. Also, this + // must happen after chown, as that can modify the file mode + if err := handleLChmod(hdr, path, hdrInfo); err != nil { + return err + } + + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + + // system.Chtimes doesn't support a NOFOLLOW flag atm + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { + return err + } + } else { + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} + if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { + return err + } + } + return nil +} + +// Tar creates an archive from the directory at `path`, and returns it as a +// stream of bytes. +func Tar(path string, compression Compression) (io.ReadCloser, error) { + return TarWithOptions(path, &TarOptions{Compression: compression}) +} + +// TarWithOptions creates an archive from the directory at `path`, only including files whose relative +// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. +func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { + + // Fix the source path to work with long path names. This is a no-op + // on platforms other than Windows. + srcPath = fixVolumePathPrefix(srcPath) + + pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns) + if err != nil { + return nil, err + } + + pipeReader, pipeWriter := io.Pipe() + + compressWriter, err := CompressStream(pipeWriter, options.Compression) + if err != nil { + return nil, err + } + + go func() { + ta := newTarAppender( + idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps), + compressWriter, + options.ChownOpts, + ) + ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat) + + defer func() { + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Errorf("Can't close tar writer: %s", err) + } + if err := compressWriter.Close(); err != nil { + logrus.Errorf("Can't close compress writer: %s", err) + } + if err := pipeWriter.Close(); err != nil { + logrus.Errorf("Can't close pipe writer: %s", err) + } + }() + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + + stat, err := os.Lstat(srcPath) + if err != nil { + return + } + + if !stat.IsDir() { + // We can't later join a non-dir with any includes because the + // 'walk' will error if "file/." is stat-ed and "file" is not a + // directory. So, we must split the source path and use the + // basename as the include. + if len(options.IncludeFiles) > 0 { + logrus.Warn("Tar: Can't archive a file with includes") + } + + dir, base := SplitPathDirEntry(srcPath) + srcPath = dir + options.IncludeFiles = []string{base} + } + + if len(options.IncludeFiles) == 0 { + options.IncludeFiles = []string{"."} + } + + seen := make(map[string]bool) + + for _, include := range options.IncludeFiles { + rebaseName := options.RebaseNames[include] + + walkRoot := getWalkRoot(srcPath, include) + filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { + if err != nil { + logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) + return nil + } + + relFilePath, err := filepath.Rel(srcPath, filePath) + if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { + // Error getting relative path OR we are looking + // at the source directory path. Skip in both situations. + return nil + } + + if options.IncludeSourceDir && include == "." && relFilePath != "." { + relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) + } + + skip := false + + // If "include" is an exact match for the current file + // then even if there's an "excludePatterns" pattern that + // matches it, don't skip it. IOW, assume an explicit 'include' + // is asking for that file no matter what - which is true + // for some files, like .dockerignore and Dockerfile (sometimes) + if include != relFilePath { + skip, err = pm.Matches(relFilePath) + if err != nil { + logrus.Errorf("Error matching %s: %v", relFilePath, err) + return err + } + } + + if skip { + // If we want to skip this file and its a directory + // then we should first check to see if there's an + // excludes pattern (e.g. !dir/file) that starts with this + // dir. If so then we can't skip this dir. + + // Its not a dir then so we can just return/skip. + if !f.IsDir() { + return nil + } + + // No exceptions (!...) in patterns so just skip dir + if !pm.Exclusions() { + return filepath.SkipDir + } + + dirSlash := relFilePath + string(filepath.Separator) + + for _, pat := range pm.Patterns() { + if !pat.Exclusion() { + continue + } + if strings.HasPrefix(pat.String()+string(filepath.Separator), dirSlash) { + // found a match - so can't skip this dir + return nil + } + } + + // No matching exclusion dir so just skip dir + return filepath.SkipDir + } + + if seen[relFilePath] { + return nil + } + seen[relFilePath] = true + + // Rename the base resource. + if rebaseName != "" { + var replacement string + if rebaseName != string(filepath.Separator) { + // Special case the root directory to replace with an + // empty string instead so that we don't end up with + // double slashes in the paths. + replacement = rebaseName + } + + relFilePath = strings.Replace(relFilePath, include, replacement, 1) + } + + if err := ta.addTarFile(filePath, relFilePath); err != nil { + logrus.Errorf("Can't add file %s to tar: %s", filePath, err) + // if pipe is broken, stop writing tar stream to it + if err == io.ErrClosedPipe { + return err + } + } + return nil + }) + } + }() + + return pipeReader, nil +} + +// Unpack unpacks the decompressedArchive to dest with options. +func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { + tr := tar.NewReader(decompressedArchive) + trBuf := pools.BufioReader32KPool.Get(nil) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) + + // Iterate through the files in the archive. +loop: + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return err + } + + // Normalize name, for safety and for a simple is-root check + // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: + // This keeps "..\" as-is, but normalizes "\..\" to "\". + hdr.Name = filepath.Clean(hdr.Name) + + for _, exclude := range options.ExcludePatterns { + if strings.HasPrefix(hdr.Name, exclude) { + continue loop + } + } + + // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in + // the filepath format for the OS on which the daemon is running. Hence + // the check for a slash-suffix MUST be done in an OS-agnostic way. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs) + if err != nil { + return err + } + } + } + + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return err + } + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + + // If path exits we almost always just want to remove and replace it + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing directory with a non-directory from the archive. + return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) + } + + if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { + // If NoOverwriteDirNonDir is true then we cannot replace + // an existing non-directory with a directory from the archive. + return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) + } + + if fi.IsDir() && hdr.Name == "." { + continue + } + + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return err + } + } + } + trBuf.Reset(tr) + + if err := remapIDs(idMappings, hdr); err != nil { + return err + } + + if whiteoutConverter != nil { + writeFile, err := whiteoutConverter.ConvertRead(hdr, path) + if err != nil { + return err + } + if !writeFile { + continue + } + } + + if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts, options.InUserNS); err != nil { + return err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return err + } + } + return nil +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +// FIXME: specify behavior when target path exists vs. doesn't exist. +func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + dest = filepath.Clean(dest) + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + r := tarArchive + if decompress { + decompressedArchive, err := DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return Unpack(r, dest, options) +} + +// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. +// If either Tar or Untar fails, TarUntar aborts and returns the error. +func (archiver *Archiver) TarUntar(src, dst string) error { + logrus.Debugf("TarUntar(%s %s)", src, dst) + archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// UntarPath untar a file from path to a destination, src is the source tar file path. +func (archiver *Archiver) UntarPath(src, dst string) error { + archive, err := os.Open(src) + if err != nil { + return err + } + defer archive.Close() + options := &TarOptions{ + UIDMaps: archiver.IDMappingsVar.UIDs(), + GIDMaps: archiver.IDMappingsVar.GIDs(), + } + return archiver.Untar(archive, dst, options) +} + +// CopyWithTar creates a tar archive of filesystem path `src`, and +// unpacks it at filesystem path `dst`. +// The archive is streamed directly with fixed buffering and no +// intermediary disk IO. +func (archiver *Archiver) CopyWithTar(src, dst string) error { + srcSt, err := os.Stat(src) + if err != nil { + return err + } + if !srcSt.IsDir() { + return archiver.CopyFileWithTar(src, dst) + } + + // if this Archiver is set up with ID mapping we need to create + // the new destination directory with the remapped root UID/GID pair + // as owner + rootIDs := archiver.IDMappingsVar.RootPair() + // Create dst, copy src's content into it + logrus.Debugf("Creating dest directory: %s", dst) + if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil { + return err + } + logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) + return archiver.TarUntar(src, dst) +} + +// CopyFileWithTar emulates the behavior of the 'cp' command-line +// for a single file. It copies a regular file from path `src` to +// path `dst`, and preserves all its metadata. +func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { + logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) + srcSt, err := os.Stat(src) + if err != nil { + return err + } + + if srcSt.IsDir() { + return fmt.Errorf("Can't copy a directory") + } + + // Clean up the trailing slash. This must be done in an operating + // system specific manner. + if dst[len(dst)-1] == os.PathSeparator { + dst = filepath.Join(dst, filepath.Base(src)) + } + // Create the holding directory if necessary + if err := system.MkdirAll(filepath.Dir(dst), 0700, ""); err != nil { + return err + } + + r, w := io.Pipe() + errC := make(chan error, 1) + + go func() { + defer close(errC) + + errC <- func() error { + defer w.Close() + + srcF, err := os.Open(src) + if err != nil { + return err + } + defer srcF.Close() + + hdr, err := tar.FileInfoHeader(srcSt, "") + if err != nil { + return err + } + hdr.Format = tar.FormatPAX + hdr.ModTime = hdr.ModTime.Truncate(time.Second) + hdr.AccessTime = time.Time{} + hdr.ChangeTime = time.Time{} + hdr.Name = filepath.Base(dst) + hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) + + if err := remapIDs(archiver.IDMappingsVar, hdr); err != nil { + return err + } + + tw := tar.NewWriter(w) + defer tw.Close() + if err := tw.WriteHeader(hdr); err != nil { + return err + } + if _, err := io.Copy(tw, srcF); err != nil { + return err + } + return nil + }() + }() + defer func() { + if er := <-errC; err == nil && er != nil { + err = er + } + }() + + err = archiver.Untar(r, filepath.Dir(dst), nil) + if err != nil { + r.CloseWithError(err) + } + return err +} + +// IDMappings returns the IDMappings of the archiver. +func (archiver *Archiver) IDMappings() *idtools.IDMappings { + return archiver.IDMappingsVar +} + +func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error { + ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}) + hdr.Uid, hdr.Gid = ids.UID, ids.GID + return err +} + +// cmdStream executes a command, and returns its stdout as a stream. +// If the command fails to run or doesn't complete successfully, an error +// will be returned, including anything written on stderr. +func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, error) { + cmd.Stdin = input + pipeR, pipeW := io.Pipe() + cmd.Stdout = pipeW + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + // Run the command and return the pipe + if err := cmd.Start(); err != nil { + return nil, err + } + + // Copy stdout to the returned pipe + go func() { + if err := cmd.Wait(); err != nil { + pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + pipeW.Close() + } + }() + + return pipeR, nil +} + +// NewTempArchive reads the content of src into a temporary file, and returns the contents +// of that file as an archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +func NewTempArchive(src io.Reader, dir string) (*TempArchive, error) { + f, err := ioutil.TempFile(dir, "") + if err != nil { + return nil, err + } + if _, err := io.Copy(f, src); err != nil { + return nil, err + } + if _, err := f.Seek(0, 0); err != nil { + return nil, err + } + st, err := f.Stat() + if err != nil { + return nil, err + } + size := st.Size() + return &TempArchive{File: f, Size: size}, nil +} + +// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, +// the file will be deleted. +type TempArchive struct { + *os.File + Size int64 // Pre-computed from Stat().Size() as a convenience + read int64 + closed bool +} + +// Close closes the underlying file if it's still open, or does a no-op +// to allow callers to try to close the TempArchive multiple times safely. +func (archive *TempArchive) Close() error { + if archive.closed { + return nil + } + + archive.closed = true + + return archive.File.Close() +} + +func (archive *TempArchive) Read(data []byte) (int, error) { + n, err := archive.File.Read(data) + archive.read += int64(n) + if err != nil || archive.read == archive.Size { + archive.Close() + os.Remove(archive.File.Name()) + } + return n, err +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_linux.go b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go new file mode 100644 index 000000000000..970d4d06800d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_linux.go @@ -0,0 +1,92 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + if format == OverlayWhiteoutFormat { + return overlayWhiteoutConverter{} + } + return nil +} + +type overlayWhiteoutConverter struct{} + +func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) { + // convert whiteouts to AUFS format + if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { + // we just rename the file and make it normal + dir, filename := filepath.Split(hdr.Name) + hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) + hdr.Mode = 0600 + hdr.Typeflag = tar.TypeReg + hdr.Size = 0 + } + + if fi.Mode()&os.ModeDir != 0 { + // convert opaque dirs to AUFS format by writing an empty file with the prefix + opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") + if err != nil { + return nil, err + } + if len(opaque) == 1 && opaque[0] == 'y' { + if hdr.Xattrs != nil { + delete(hdr.Xattrs, "trusted.overlay.opaque") + } + + // create a header for the whiteout file + // it should inherit some properties from the parent, but be a regular file + wo = &tar.Header{ + Typeflag: tar.TypeReg, + Mode: hdr.Mode & int64(os.ModePerm), + Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), + Size: 0, + Uid: hdr.Uid, + Uname: hdr.Uname, + Gid: hdr.Gid, + Gname: hdr.Gname, + AccessTime: hdr.AccessTime, + ChangeTime: hdr.ChangeTime, + } + } + } + + return +} + +func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { + base := filepath.Base(path) + dir := filepath.Dir(path) + + // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay + if base == WhiteoutOpaqueDir { + err := unix.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0) + // don't write the file itself + return false, err + } + + // if a file was deleted and we are using overlay, we need to create a character device + if strings.HasPrefix(base, WhiteoutPrefix) { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + + if err := unix.Mknod(originalPath, unix.S_IFCHR, 0); err != nil { + return false, err + } + if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { + return false, err + } + + // don't write the file itself + return false, nil + } + + return true, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_other.go b/vendor/github.com/docker/docker/pkg/archive/archive_other.go new file mode 100644 index 000000000000..462dfc632325 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_other.go @@ -0,0 +1,7 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go new file mode 100644 index 000000000000..e81076c17010 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_unix.go @@ -0,0 +1,114 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "os" + "path/filepath" + "syscall" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return srcPath +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. On Linux, we +// can't use filepath.Join(srcPath,include) because this will clean away +// a trailing "." or "/" which may be important. +func getWalkRoot(srcPath string, include string) string { + return srcPath + string(filepath.Separator) + include +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + return p, nil // already unix-style +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. + +func chmodTarEntry(perm os.FileMode) os.FileMode { + return perm // noop for unix as golang APIs provide perm bits correctly +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + // Currently go does not fill in the major/minors + if s.Mode&unix.S_IFBLK != 0 || + s.Mode&unix.S_IFCHR != 0 { + hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) // nolint: unconvert + hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) // nolint: unconvert + } + } + + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + s, ok := stat.(*syscall.Stat_t) + + if ok { + inode = s.Ino + } + + return +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + s, ok := stat.(*syscall.Stat_t) + + if !ok { + return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t") + } + return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + if rsystem.RunningInUserNS() { + // cannot create a device if running in user namespace + return nil + } + + mode := uint32(hdr.Mode & 07777) + switch hdr.Typeflag { + case tar.TypeBlock: + mode |= unix.S_IFBLK + case tar.TypeChar: + mode |= unix.S_IFCHR + case tar.TypeFifo: + mode |= unix.S_IFIFO + } + + return system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))) +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + if hdr.Typeflag == tar.TypeLink { + if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + } else if hdr.Typeflag != tar.TypeSymlink { + if err := os.Chmod(path, hdrInfo.Mode()); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go new file mode 100644 index 000000000000..69aadd823c8a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/archive_windows.go @@ -0,0 +1,77 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/longpath" +) + +// fixVolumePathPrefix does platform specific processing to ensure that if +// the path being passed in is not in a volume path format, convert it to one. +func fixVolumePathPrefix(srcPath string) string { + return longpath.AddPrefix(srcPath) +} + +// getWalkRoot calculates the root path when performing a TarWithOptions. +// We use a separate function as this is platform specific. +func getWalkRoot(srcPath string, include string) string { + return filepath.Join(srcPath, include) +} + +// CanonicalTarNameForPath returns platform-specific filepath +// to canonical posix-style path for tar archival. p is relative +// path. +func CanonicalTarNameForPath(p string) (string, error) { + // windows: convert windows style relative path with backslashes + // into forward slashes. Since windows does not allow '/' or '\' + // in file names, it is mostly safe to replace however we must + // check just in case + if strings.Contains(p, "/") { + return "", fmt.Errorf("Windows path contains forward slash: %s", p) + } + return strings.Replace(p, string(os.PathSeparator), "/", -1), nil + +} + +// chmodTarEntry is used to adjust the file permissions used in tar header based +// on the platform the archival is done. +func chmodTarEntry(perm os.FileMode) os.FileMode { + //perm &= 0755 // this 0-ed out tar flags (like link, regular file, directory marker etc.) + permPart := perm & os.ModePerm + noPermPart := perm &^ os.ModePerm + // Add the x bit: make everything +x from windows + permPart |= 0111 + permPart &= 0755 + + return noPermPart | permPart +} + +func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) { + // do nothing. no notion of Rdev, Nlink in stat on Windows + return +} + +func getInodeFromStat(stat interface{}) (inode uint64, err error) { + // do nothing. no notion of Inode in stat on Windows + return +} + +// handleTarTypeBlockCharFifo is an OS-specific helper function used by +// createTarFile to handle the following types of header: Block; Char; Fifo +func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { + return nil +} + +func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { + return nil +} + +func getFileUIDGID(stat interface{}) (idtools.IDPair, error) { + // no notion of file ownership mapping yet on Windows + return idtools.IDPair{UID: 0, GID: 0}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/docker/docker/pkg/archive/changes.go new file mode 100644 index 000000000000..43734db5b117 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes.go @@ -0,0 +1,441 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strings" + "syscall" + "time" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// ChangeType represents the change type. +type ChangeType int + +const ( + // ChangeModify represents the modify operation. + ChangeModify = iota + // ChangeAdd represents the add operation. + ChangeAdd + // ChangeDelete represents the delete operation. + ChangeDelete +) + +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + +// Change represents a change, it wraps the change type and path. +// It describes changes of the files in the path respect to the +// parent layers. The change could be modify, add, delete. +// This is used for layer diff. +type Change struct { + Path string + Kind ChangeType +} + +func (change *Change) String() string { + return fmt.Sprintf("%s %s", change.Kind, change.Path) +} + +// for sort.Sort +type changesByPath []Change + +func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } +func (c changesByPath) Len() int { return len(c) } +func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } + +// Gnu tar and the go tar writer don't have sub-second mtime +// precision, which is problematic when we apply changes via tar +// files, we handle this by comparing for exact times, *or* same +// second count and either a or b having exactly 0 nanoseconds +func sameFsTime(a, b time.Time) bool { + return a == b || + (a.Unix() == b.Unix() && + (a.Nanosecond() == 0 || b.Nanosecond() == 0)) +} + +func sameFsTimeSpec(a, b syscall.Timespec) bool { + return a.Sec == b.Sec && + (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) +} + +// Changes walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func Changes(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) +} + +func aufsMetadataSkip(path string) (skip bool, err error) { + skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) + if err != nil { + skip = true + } + return +} + +func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { + f := filepath.Base(path) + + // If there is a whiteout, then the file was removed + if strings.HasPrefix(f, WhiteoutPrefix) { + originalFile := f[len(WhiteoutPrefix):] + return filepath.Join(filepath.Dir(path), originalFile), nil + } + + return "", nil +} + +type skipChange func(string) (bool, error) +type deleteChange func(string, string, os.FileInfo) (string, error) + +func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { + var ( + changes []Change + changedDirs = make(map[string]struct{}) + ) + + err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + path, err = filepath.Rel(rw, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + if sc != nil { + if skip, err := sc(path); skip { + return err + } + } + + change := Change{ + Path: path, + } + + deletedFile, err := dc(rw, path, f) + if err != nil { + return err + } + + // Find out what kind of modification happened + if deletedFile != "" { + change.Path = deletedFile + change.Kind = ChangeDelete + } else { + // Otherwise, the file was added + change.Kind = ChangeAdd + + // ...Unless it already existed in a top layer, in which case, it's a modification + for _, layer := range layers { + stat, err := os.Stat(filepath.Join(layer, path)) + if err != nil && !os.IsNotExist(err) { + return err + } + if err == nil { + // The file existed in the top layer, so that's a modification + + // However, if it's a directory, maybe it wasn't actually modified. + // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar + if stat.IsDir() && f.IsDir() { + if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { + // Both directories are the same, don't record the change + return nil + } + } + change.Kind = ChangeModify + break + } + } + } + + // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. + // This block is here to ensure the change is recorded even if the + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. + // Check https://github.com/docker/docker/pull/13590 for details. + if f.IsDir() { + changedDirs[path] = struct{}{} + } + if change.Kind == ChangeAdd || change.Kind == ChangeDelete { + parent := filepath.Dir(path) + if _, ok := changedDirs[parent]; !ok && parent != "/" { + changes = append(changes, Change{Path: parent, Kind: ChangeModify}) + changedDirs[parent] = struct{}{} + } + } + + // Record change + changes = append(changes, change) + return nil + }) + if err != nil && !os.IsNotExist(err) { + return nil, err + } + return changes, nil +} + +// FileInfo describes the information of a file. +type FileInfo struct { + parent *FileInfo + name string + stat *system.StatT + children map[string]*FileInfo + capability []byte + added bool +} + +// LookUp looks up the file information of a file. +func (info *FileInfo) LookUp(path string) *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + parent := info + if path == string(os.PathSeparator) { + return info + } + + pathElements := strings.Split(path, string(os.PathSeparator)) + for _, elem := range pathElements { + if elem != "" { + child := parent.children[elem] + if child == nil { + return nil + } + parent = child + } + } + return parent +} + +func (info *FileInfo) path() string { + if info.parent == nil { + // As this runs on the daemon side, file paths are OS specific. + return string(os.PathSeparator) + } + return filepath.Join(info.parent.path(), info.name) +} + +func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { + + sizeAtEntry := len(*changes) + + if oldInfo == nil { + // add + change := Change{ + Path: info.path(), + Kind: ChangeAdd, + } + *changes = append(*changes, change) + info.added = true + } + + // We make a copy so we can modify it to detect additions + // also, we only recurse on the old dir if the new info is a directory + // otherwise any previous delete/change is considered recursive + oldChildren := make(map[string]*FileInfo) + if oldInfo != nil && info.isDir() { + for k, v := range oldInfo.children { + oldChildren[k] = v + } + } + + for name, newChild := range info.children { + oldChild := oldChildren[name] + if oldChild != nil { + // change? + oldStat := oldChild.stat + newStat := newChild.stat + // Note: We can't compare inode or ctime or blocksize here, because these change + // when copying a file into a container. However, that is not generally a problem + // because any content change will change mtime, and any status change should + // be visible when actually comparing the stat fields. The only time this + // breaks down is if some code intentionally hides a change by setting + // back mtime + if statDifferent(oldStat, newStat) || + !bytes.Equal(oldChild.capability, newChild.capability) { + change := Change{ + Path: newChild.path(), + Kind: ChangeModify, + } + *changes = append(*changes, change) + newChild.added = true + } + + // Remove from copy so we can detect deletions + delete(oldChildren, name) + } + + newChild.addChanges(oldChild, changes) + } + for _, oldChild := range oldChildren { + // delete + change := Change{ + Path: oldChild.path(), + Kind: ChangeDelete, + } + *changes = append(*changes, change) + } + + // If there were changes inside this directory, we need to add it, even if the directory + // itself wasn't changed. This is needed to properly save and restore filesystem permissions. + // As this runs on the daemon side, file paths are OS specific. + if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { + change := Change{ + Path: info.path(), + Kind: ChangeModify, + } + // Let's insert the directory entry before the recently added entries located inside this dir + *changes = append(*changes, change) // just to resize the slice, will be overwritten + copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) + (*changes)[sizeAtEntry] = change + } + +} + +// Changes add changes to file information. +func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { + var changes []Change + + info.addChanges(oldInfo, &changes) + + return changes +} + +func newRootFileInfo() *FileInfo { + // As this runs on the daemon side, file paths are OS specific. + root := &FileInfo{ + name: string(os.PathSeparator), + children: make(map[string]*FileInfo), + } + return root +} + +// ChangesDirs compares two directories and generates an array of Change objects describing the changes. +// If oldDir is "", then all files in newDir will be Add-Changes. +func ChangesDirs(newDir, oldDir string) ([]Change, error) { + var ( + oldRoot, newRoot *FileInfo + ) + if oldDir == "" { + emptyDir, err := ioutil.TempDir("", "empty") + if err != nil { + return nil, err + } + defer os.Remove(emptyDir) + oldDir = emptyDir + } + oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) + if err != nil { + return nil, err + } + + return newRoot.Changes(oldRoot), nil +} + +// ChangesSize calculates the size in bytes of the provided changes, based on newDir. +func ChangesSize(newDir string, changes []Change) int64 { + var ( + size int64 + sf = make(map[uint64]struct{}) + ) + for _, change := range changes { + if change.Kind == ChangeModify || change.Kind == ChangeAdd { + file := filepath.Join(newDir, change.Path) + fileInfo, err := os.Lstat(file) + if err != nil { + logrus.Errorf("Can not stat %q: %s", file, err) + continue + } + + if fileInfo != nil && !fileInfo.IsDir() { + if hasHardlinks(fileInfo) { + inode := getIno(fileInfo) + if _, ok := sf[inode]; !ok { + size += fileInfo.Size() + sf[inode] = struct{}{} + } + } else { + size += fileInfo.Size() + } + } + } + } + return size +} + +// ExportChanges produces an Archive from the provided changes, relative to dir. +func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) { + reader, writer := io.Pipe() + go func() { + ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer, nil) + + // this buffer is needed for the duration of this piped stream + defer pools.BufioWriter32KPool.Put(ta.Buffer) + + sort.Sort(changesByPath(changes)) + + // In general we log errors here but ignore them because + // during e.g. a diff operation the container can continue + // mutating the filesystem and we can see transient errors + // from this + for _, change := range changes { + if change.Kind == ChangeDelete { + whiteOutDir := filepath.Dir(change.Path) + whiteOutBase := filepath.Base(change.Path) + whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) + timestamp := time.Now() + hdr := &tar.Header{ + Name: whiteOut[1:], + Size: 0, + ModTime: timestamp, + AccessTime: timestamp, + ChangeTime: timestamp, + } + if err := ta.TarWriter.WriteHeader(hdr); err != nil { + logrus.Debugf("Can't write whiteout header: %s", err) + } + } else { + path := filepath.Join(dir, change.Path) + if err := ta.addTarFile(path, change.Path[1:]); err != nil { + logrus.Debugf("Can't add file %s to tar: %s", path, err) + } + } + } + + // Make sure to check the error on Close. + if err := ta.TarWriter.Close(); err != nil { + logrus.Debugf("Can't close layer: %s", err) + } + if err := writer.Close(); err != nil { + logrus.Debugf("failed close Changes writer: %s", err) + } + }() + return reader, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_linux.go b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go new file mode 100644 index 000000000000..78a5393c8ea5 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_linux.go @@ -0,0 +1,313 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "sort" + "syscall" + "unsafe" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +// walker is used to implement collectFileInfoForChanges on linux. Where this +// method in general returns the entire contents of two directory trees, we +// optimize some FS calls out on linux. In particular, we take advantage of the +// fact that getdents(2) returns the inode of each file in the directory being +// walked, which, when walking two trees in parallel to generate a list of +// changes, can be used to prune subtrees without ever having to lstat(2) them +// directly. Eliminating stat calls in this way can save up to seconds on large +// images. +type walker struct { + dir1 string + dir2 string + root1 *FileInfo + root2 *FileInfo +} + +// collectFileInfoForChanges returns a complete representation of the trees +// rooted at dir1 and dir2, with one important exception: any subtree or +// leaf where the inode and device numbers are an exact match between dir1 +// and dir2 will be pruned from the results. This method is *only* to be used +// to generating a list of changes between the two directories, as it does not +// reflect the full contents. +func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { + w := &walker{ + dir1: dir1, + dir2: dir2, + root1: newRootFileInfo(), + root2: newRootFileInfo(), + } + + i1, err := os.Lstat(w.dir1) + if err != nil { + return nil, nil, err + } + i2, err := os.Lstat(w.dir2) + if err != nil { + return nil, nil, err + } + + if err := w.walk("/", i1, i2); err != nil { + return nil, nil, err + } + + return w.root1, w.root2, nil +} + +// Given a FileInfo, its path info, and a reference to the root of the tree +// being constructed, register this file with the tree. +func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { + if fi == nil { + return nil + } + parent := root.LookUp(filepath.Dir(path)) + if parent == nil { + return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path) + } + info := &FileInfo{ + name: filepath.Base(path), + children: make(map[string]*FileInfo), + parent: parent, + } + cpath := filepath.Join(dir, path) + stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) + if err != nil { + return err + } + info.stat = stat + info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access + parent.children[info.name] = info + return nil +} + +// Walk a subtree rooted at the same path in both trees being iterated. For +// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d +func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { + // Register these nodes with the return trees, unless we're still at the + // (already-created) roots: + if path != "/" { + if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { + return err + } + if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { + return err + } + } + + is1Dir := i1 != nil && i1.IsDir() + is2Dir := i2 != nil && i2.IsDir() + + sameDevice := false + if i1 != nil && i2 != nil { + si1 := i1.Sys().(*syscall.Stat_t) + si2 := i2.Sys().(*syscall.Stat_t) + if si1.Dev == si2.Dev { + sameDevice = true + } + } + + // If these files are both non-existent, or leaves (non-dirs), we are done. + if !is1Dir && !is2Dir { + return nil + } + + // Fetch the names of all the files contained in both directories being walked: + var names1, names2 []nameIno + if is1Dir { + names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access + if err != nil { + return err + } + } + if is2Dir { + names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access + if err != nil { + return err + } + } + + // We have lists of the files contained in both parallel directories, sorted + // in the same order. Walk them in parallel, generating a unique merged list + // of all items present in either or both directories. + var names []string + ix1 := 0 + ix2 := 0 + + for { + if ix1 >= len(names1) { + break + } + if ix2 >= len(names2) { + break + } + + ni1 := names1[ix1] + ni2 := names2[ix2] + + switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { + case -1: // ni1 < ni2 -- advance ni1 + // we will not encounter ni1 in names2 + names = append(names, ni1.name) + ix1++ + case 0: // ni1 == ni2 + if ni1.ino != ni2.ino || !sameDevice { + names = append(names, ni1.name) + } + ix1++ + ix2++ + case 1: // ni1 > ni2 -- advance ni2 + // we will not encounter ni2 in names1 + names = append(names, ni2.name) + ix2++ + } + } + for ix1 < len(names1) { + names = append(names, names1[ix1].name) + ix1++ + } + for ix2 < len(names2) { + names = append(names, names2[ix2].name) + ix2++ + } + + // For each of the names present in either or both of the directories being + // iterated, stat the name under each root, and recurse the pair of them: + for _, name := range names { + fname := filepath.Join(path, name) + var cInfo1, cInfo2 os.FileInfo + if is1Dir { + cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if is2Dir { + cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access + if err != nil && !os.IsNotExist(err) { + return err + } + } + if err = w.walk(fname, cInfo1, cInfo2); err != nil { + return err + } + } + return nil +} + +// {name,inode} pairs used to support the early-pruning logic of the walker type +type nameIno struct { + name string + ino uint64 +} + +type nameInoSlice []nameIno + +func (s nameInoSlice) Len() int { return len(s) } +func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } + +// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode +// numbers further up the stack when reading directory contents. Unlike +// os.Readdirnames, which returns a list of filenames, this function returns a +// list of {filename,inode} pairs. +func readdirnames(dirname string) (names []nameIno, err error) { + var ( + size = 100 + buf = make([]byte, 4096) + nbuf int + bufp int + nb int + ) + + f, err := os.Open(dirname) + if err != nil { + return nil, err + } + defer f.Close() + + names = make([]nameIno, 0, size) // Empty with room to grow. + for { + // Refill the buffer if necessary + if bufp >= nbuf { + bufp = 0 + nbuf, err = unix.ReadDirent(int(f.Fd()), buf) // getdents on linux + if nbuf < 0 { + nbuf = 0 + } + if err != nil { + return nil, os.NewSyscallError("readdirent", err) + } + if nbuf <= 0 { + break // EOF + } + } + + // Drain the buffer + nb, names = parseDirent(buf[bufp:nbuf], names) + bufp += nb + } + + sl := nameInoSlice(names) + sort.Sort(sl) + return sl, nil +} + +// parseDirent is a minor modification of unix.ParseDirent (linux version) +// which returns {name,inode} pairs instead of just names. +func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { + origlen := len(buf) + for len(buf) > 0 { + dirent := (*unix.Dirent)(unsafe.Pointer(&buf[0])) + buf = buf[dirent.Reclen:] + if dirent.Ino == 0 { // File absent in directory. + continue + } + bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) + var name = string(bytes[0:clen(bytes[:])]) + if name == "." || name == ".." { // Useless names + continue + } + names = append(names, nameIno{name, dirent.Ino}) + } + return origlen - len(buf), names +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +// OverlayChanges walks the path rw and determines changes for the files in the path, +// with respect to the parent layers +func OverlayChanges(layers []string, rw string) ([]Change, error) { + return changes(layers, rw, overlayDeletedFile, nil) +} + +func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { + if fi.Mode()&os.ModeCharDevice != 0 { + s := fi.Sys().(*syscall.Stat_t) + if unix.Major(uint64(s.Rdev)) == 0 && unix.Minor(uint64(s.Rdev)) == 0 { // nolint: unconvert + return path, nil + } + } + if fi.Mode()&os.ModeDir != 0 { + opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") + if err != nil { + return "", err + } + if len(opaque) == 1 && opaque[0] == 'y' { + return path, nil + } + } + + return "", nil + +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_other.go b/vendor/github.com/docker/docker/pkg/archive/changes_other.go new file mode 100644 index 000000000000..ba744741cd02 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_other.go @@ -0,0 +1,97 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "fmt" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/system" +) + +func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { + var ( + oldRoot, newRoot *FileInfo + err1, err2 error + errs = make(chan error, 2) + ) + go func() { + oldRoot, err1 = collectFileInfo(oldDir) + errs <- err1 + }() + go func() { + newRoot, err2 = collectFileInfo(newDir) + errs <- err2 + }() + + // block until both routines have returned + for i := 0; i < 2; i++ { + if err := <-errs; err != nil { + return nil, nil, err + } + } + + return oldRoot, newRoot, nil +} + +func collectFileInfo(sourceDir string) (*FileInfo, error) { + root := newRootFileInfo() + + err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + + // Rebase path + relPath, err := filepath.Rel(sourceDir, path) + if err != nil { + return err + } + + // As this runs on the daemon side, file paths are OS specific. + relPath = filepath.Join(string(os.PathSeparator), relPath) + + // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. + // Temporary workaround. If the returned path starts with two backslashes, + // trim it down to a single backslash. Only relevant on Windows. + if runtime.GOOS == "windows" { + if strings.HasPrefix(relPath, `\\`) { + relPath = relPath[1:] + } + } + + if relPath == string(os.PathSeparator) { + return nil + } + + parent := root.LookUp(filepath.Dir(relPath)) + if parent == nil { + return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) + } + + info := &FileInfo{ + name: filepath.Base(relPath), + children: make(map[string]*FileInfo), + parent: parent, + } + + s, err := system.Lstat(path) + if err != nil { + return err + } + info.stat = s + + info.capability, _ = system.Lgetxattr(path, "security.capability") + + parent.children[info.name] = info + + return nil + }) + if err != nil { + return nil, err + } + return root, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_unix.go b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go new file mode 100644 index 000000000000..c06a209d8ed1 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_unix.go @@ -0,0 +1,37 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + "syscall" + + "github.com/docker/docker/pkg/system" + "golang.org/x/sys/unix" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mode() != newStat.Mode() || + oldStat.UID() != newStat.UID() || + oldStat.GID() != newStat.GID() || + oldStat.Rdev() != newStat.Rdev() || + // Don't look at size for dirs, its not a good measure of change + (oldStat.Mode()&unix.S_IFDIR != unix.S_IFDIR && + (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode()&unix.S_IFDIR != 0 +} + +func getIno(fi os.FileInfo) uint64 { + return fi.Sys().(*syscall.Stat_t).Ino +} + +func hasHardlinks(fi os.FileInfo) bool { + return fi.Sys().(*syscall.Stat_t).Nlink > 1 +} diff --git a/vendor/github.com/docker/docker/pkg/archive/changes_windows.go b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go new file mode 100644 index 000000000000..6555c01368c6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/changes_windows.go @@ -0,0 +1,30 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "os" + + "github.com/docker/docker/pkg/system" +) + +func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { + + // Don't look at size for dirs, its not a good measure of change + if oldStat.Mtim() != newStat.Mtim() || + oldStat.Mode() != newStat.Mode() || + oldStat.Size() != newStat.Size() && !oldStat.Mode().IsDir() { + return true + } + return false +} + +func (info *FileInfo) isDir() bool { + return info.parent == nil || info.stat.Mode().IsDir() +} + +func getIno(fi os.FileInfo) (inode uint64) { + return +} + +func hasHardlinks(fi os.FileInfo) bool { + return false +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/docker/docker/pkg/archive/copy.go new file mode 100644 index 000000000000..d0f13ca79beb --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy.go @@ -0,0 +1,472 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "errors" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// Errors used or returned by this file. +var ( + ErrNotDirectory = errors.New("not a directory") + ErrDirNotExists = errors.New("no such directory") + ErrCannotCopyDir = errors.New("cannot copy directory") + ErrInvalidCopySource = errors.New("invalid copy source content") +) + +// PreserveTrailingDotOrSeparator returns the given cleaned path (after +// processing using any utility functions from the path or filepath stdlib +// packages) and appends a trailing `/.` or `/` if its corresponding original +// path (from before being processed by utility functions from the path or +// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned +// path already ends in a `.` path segment, then another is not added. If the +// clean path already ends in the separator, then another is not added. +func PreserveTrailingDotOrSeparator(cleanedPath string, originalPath string, sep byte) string { + // Ensure paths are in platform semantics + cleanedPath = strings.Replace(cleanedPath, "/", string(sep), -1) + originalPath = strings.Replace(originalPath, "/", string(sep), -1) + + if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { + if !hasTrailingPathSeparator(cleanedPath, sep) { + // Add a separator if it doesn't already end with one (a cleaned + // path would only end in a separator if it is the root). + cleanedPath += string(sep) + } + cleanedPath += "." + } + + if !hasTrailingPathSeparator(cleanedPath, sep) && hasTrailingPathSeparator(originalPath, sep) { + cleanedPath += string(sep) + } + + return cleanedPath +} + +// assertsDirectory returns whether the given path is +// asserted to be a directory, i.e., the path ends with +// a trailing '/' or `/.`, assuming a path separator of `/`. +func assertsDirectory(path string, sep byte) bool { + return hasTrailingPathSeparator(path, sep) || specifiesCurrentDir(path) +} + +// hasTrailingPathSeparator returns whether the given +// path ends with the system's path separator character. +func hasTrailingPathSeparator(path string, sep byte) bool { + return len(path) > 0 && path[len(path)-1] == sep +} + +// specifiesCurrentDir returns whether the given path specifies +// a "current directory", i.e., the last path segment is `.`. +func specifiesCurrentDir(path string) bool { + return filepath.Base(path) == "." +} + +// SplitPathDirEntry splits the given path between its directory name and its +// basename by first cleaning the path but preserves a trailing "." if the +// original path specified the current directory. +func SplitPathDirEntry(path string) (dir, base string) { + cleanedPath := filepath.Clean(filepath.FromSlash(path)) + + if specifiesCurrentDir(path) { + cleanedPath += string(os.PathSeparator) + "." + } + + return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) +} + +// TarResource archives the resource described by the given CopyInfo to a Tar +// archive. A non-nil error is returned if sourcePath does not exist or is +// asserted to be a directory but exists as another type of file. +// +// This function acts as a convenient wrapper around TarWithOptions, which +// requires a directory as the source path. TarResource accepts either a +// directory or a file path and correctly sets the Tar options. +func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) { + return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) +} + +// TarResourceRebase is like TarResource but renames the first path element of +// items in the resulting tar archive to match the given rebaseName if not "". +func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) { + sourcePath = normalizePath(sourcePath) + if _, err = os.Lstat(sourcePath); err != nil { + // Catches the case where the source does not exist or is not a + // directory if asserted to be a directory, as this also causes an + // error. + return + } + + // Separate the source path between its directory and + // the entry in that directory which we are archiving. + sourceDir, sourceBase := SplitPathDirEntry(sourcePath) + opts := TarResourceRebaseOpts(sourceBase, rebaseName) + + logrus.Debugf("copying %q from %q", sourceBase, sourceDir) + return TarWithOptions(sourceDir, opts) +} + +// TarResourceRebaseOpts does not preform the Tar, but instead just creates the rebase +// parameters to be sent to TarWithOptions (the TarOptions struct) +func TarResourceRebaseOpts(sourceBase string, rebaseName string) *TarOptions { + filter := []string{sourceBase} + return &TarOptions{ + Compression: Uncompressed, + IncludeFiles: filter, + IncludeSourceDir: true, + RebaseNames: map[string]string{ + sourceBase: rebaseName, + }, + } +} + +// CopyInfo holds basic info about the source +// or destination path of a copy operation. +type CopyInfo struct { + Path string + Exists bool + IsDir bool + RebaseName string +} + +// CopyInfoSourcePath stats the given path to create a CopyInfo +// struct representing that resource for the source of an archive copy +// operation. The given path should be an absolute local path. A source path +// has all symlinks evaluated that appear before the last path separator ("/" +// on Unix). As it is to be a copy source, the path must exist. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set + path = normalizePath(path) + + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) + if err != nil { + return CopyInfo{}, err + } + + stat, err := os.Lstat(resolvedPath) + if err != nil { + return CopyInfo{}, err + } + + return CopyInfo{ + Path: resolvedPath, + Exists: true, + IsDir: stat.IsDir(), + RebaseName: rebaseName, + }, nil +} + +// CopyInfoDestinationPath stats the given path to create a CopyInfo +// struct representing that resource for the destination of an archive copy +// operation. The given path should be an absolute local path. +func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { + maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. + path = normalizePath(path) + originalPath := path + + stat, err := os.Lstat(path) + + if err == nil && stat.Mode()&os.ModeSymlink == 0 { + // The path exists and is not a symlink. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil + } + + // While the path is a symlink. + for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { + if n > maxSymlinkIter { + // Don't follow symlinks more than this arbitrary number of times. + return CopyInfo{}, errors.New("too many symlinks in " + originalPath) + } + + // The path is a symbolic link. We need to evaluate it so that the + // destination of the copy operation is the link target and not the + // link itself. This is notably different than CopyInfoSourcePath which + // only evaluates symlinks before the last appearing path separator. + // Also note that it is okay if the last path element is a broken + // symlink as the copy operation should create the target. + var linkTarget string + + linkTarget, err = os.Readlink(path) + if err != nil { + return CopyInfo{}, err + } + + if !system.IsAbs(linkTarget) { + // Join with the parent directory. + dstParent, _ := SplitPathDirEntry(path) + linkTarget = filepath.Join(dstParent, linkTarget) + } + + path = linkTarget + stat, err = os.Lstat(path) + } + + if err != nil { + // It's okay if the destination path doesn't exist. We can still + // continue the copy operation if the parent directory exists. + if !os.IsNotExist(err) { + return CopyInfo{}, err + } + + // Ensure destination parent dir exists. + dstParent, _ := SplitPathDirEntry(path) + + parentDirStat, err := os.Stat(dstParent) + if err != nil { + return CopyInfo{}, err + } + if !parentDirStat.IsDir() { + return CopyInfo{}, ErrNotDirectory + } + + return CopyInfo{Path: path}, nil + } + + // The path exists after resolving symlinks. + return CopyInfo{ + Path: path, + Exists: true, + IsDir: stat.IsDir(), + }, nil +} + +// PrepareArchiveCopy prepares the given srcContent archive, which should +// contain the archived resource described by srcInfo, to the destination +// described by dstInfo. Returns the possibly modified content archive along +// with the path to the destination directory which it should be extracted to. +func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) { + // Ensure in platform semantics + srcInfo.Path = normalizePath(srcInfo.Path) + dstInfo.Path = normalizePath(dstInfo.Path) + + // Separate the destination path between its directory and base + // components in case the source archive contents need to be rebased. + dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) + _, srcBase := SplitPathDirEntry(srcInfo.Path) + + switch { + case dstInfo.Exists && dstInfo.IsDir: + // The destination exists as a directory. No alteration + // to srcContent is needed as its contents can be + // simply extracted to the destination directory. + return dstInfo.Path, ioutil.NopCloser(srcContent), nil + case dstInfo.Exists && srcInfo.IsDir: + // The destination exists as some type of file and the source + // content is a directory. This is an error condition since + // you cannot copy a directory to an existing file location. + return "", nil, ErrCannotCopyDir + case dstInfo.Exists: + // The destination exists as some type of file and the source content + // is also a file. The source content entry will have to be renamed to + // have a basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case srcInfo.IsDir: + // The destination does not exist and the source content is an archive + // of a directory. The archive should be extracted to the parent of + // the destination path instead, and when it is, the directory that is + // created as a result should take the name of the destination path. + // The source content entries will have to be renamed to have a + // basename which matches the destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + case assertsDirectory(dstInfo.Path, os.PathSeparator): + // The destination does not exist and is asserted to be created as a + // directory, but the source content is not a directory. This is an + // error condition since you cannot create a directory from a file + // source. + return "", nil, ErrDirNotExists + default: + // The last remaining case is when the destination does not exist, is + // not asserted to be a directory, and the source content is not an + // archive of a directory. It this case, the destination file will need + // to be created when the archive is extracted and the source content + // entry will have to be renamed to have a basename which matches the + // destination path's basename. + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil + } + +} + +// RebaseArchiveEntries rewrites the given srcContent archive replacing +// an occurrence of oldBase with newBase at the beginning of entry names. +func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser { + if oldBase == string(os.PathSeparator) { + // If oldBase specifies the root directory, use an empty string as + // oldBase instead so that newBase doesn't replace the path separator + // that all paths will start with. + oldBase = "" + } + + rebased, w := io.Pipe() + + go func() { + srcTar := tar.NewReader(srcContent) + rebasedTar := tar.NewWriter(w) + + for { + hdr, err := srcTar.Next() + if err == io.EOF { + // Signals end of archive. + rebasedTar.Close() + w.Close() + return + } + if err != nil { + w.CloseWithError(err) + return + } + + hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) + if hdr.Typeflag == tar.TypeLink { + hdr.Linkname = strings.Replace(hdr.Linkname, oldBase, newBase, 1) + } + + if err = rebasedTar.WriteHeader(hdr); err != nil { + w.CloseWithError(err) + return + } + + if _, err = io.Copy(rebasedTar, srcTar); err != nil { + w.CloseWithError(err) + return + } + } + }() + + return rebased +} + +// TODO @gupta-ak. These might have to be changed in the future to be +// continuity driver aware as well to support LCOW. + +// CopyResource performs an archive copy from the given source path to the +// given destination path. The source path MUST exist and the destination +// path's parent directory must exist. +func CopyResource(srcPath, dstPath string, followLink bool) error { + var ( + srcInfo CopyInfo + err error + ) + + // Ensure in platform semantics + srcPath = normalizePath(srcPath) + dstPath = normalizePath(dstPath) + + // Clean the source and destination paths. + srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath, os.PathSeparator) + dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath, os.PathSeparator) + + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { + return err + } + + content, err := TarResource(srcInfo) + if err != nil { + return err + } + defer content.Close() + + return CopyTo(content, srcInfo, dstPath) +} + +// CopyTo handles extracting the given content whose +// entries should be sourced from srcInfo to dstPath. +func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error { + // The destination path need not exist, but CopyInfoDestinationPath will + // ensure that at least the parent directory exists. + dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) + if err != nil { + return err + } + + dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) + if err != nil { + return err + } + defer copyArchive.Close() + + options := &TarOptions{ + NoLchown: true, + NoOverwriteDirNonDir: true, + } + + return Untar(copyArchive, dstDir, options) +} + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path, os.PathSeparator) && + filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && + !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path, os.PathSeparator) && + !hasTrailingPathSeparator(resolvedPath, os.PathSeparator) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_unix.go b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go new file mode 100644 index 000000000000..3958364f5ba0 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_unix.go @@ -0,0 +1,11 @@ +// +build !windows + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.ToSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/copy_windows.go b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go new file mode 100644 index 000000000000..a878d1bac426 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/copy_windows.go @@ -0,0 +1,9 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "path/filepath" +) + +func normalizePath(path string) string { + return filepath.FromSlash(path) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/docker/docker/pkg/archive/diff.go new file mode 100644 index 000000000000..d0cff98ffc24 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/diff.go @@ -0,0 +1,256 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/docker/docker/pkg/idtools" + "github.com/docker/docker/pkg/pools" + "github.com/docker/docker/pkg/system" + "github.com/sirupsen/logrus" +) + +// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) { + tr := tar.NewReader(layer) + trBuf := pools.BufioReader32KPool.Get(tr) + defer pools.BufioReader32KPool.Put(trBuf) + + var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) + + if options == nil { + options = &TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + + aufsTempdir := "" + aufsHardlinks := make(map[string]*tar.Header) + + // Iterate through the files in the archive. + for { + hdr, err := tr.Next() + if err == io.EOF { + // end of tar archive + break + } + if err != nil { + return 0, err + } + + size += hdr.Size + + // Normalize name, for safety and for a simple is-root check + hdr.Name = filepath.Clean(hdr.Name) + + // Windows does not support filenames with colons in them. Ignore + // these files. This is not a problem though (although it might + // appear that it is). Let's suppose a client is running docker pull. + // The daemon it points to is Windows. Would it make sense for the + // client to be doing a docker pull Ubuntu for example (which has files + // with colons in the name under /usr/share/man/man3)? No, absolutely + // not as it would really only make sense that they were pulling a + // Windows image. However, for development, it is necessary to be able + // to pull Linux images which are in the repository. + // + // TODO Windows. Once the registry is aware of what images are Windows- + // specific or Linux-specific, this warning should be changed to an error + // to cater for the situation where someone does manage to upload a Linux + // image but have it tagged as Windows inadvertently. + if runtime.GOOS == "windows" { + if strings.Contains(hdr.Name, ":") { + logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) + continue + } + } + + // Note as these operations are platform specific, so must the slash be. + if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { + // Not the root directory, ensure that the parent directory exists. + // This happened in some tests where an image had a tarfile without any + // parent directories. + parent := filepath.Dir(hdr.Name) + parentPath := filepath.Join(dest, parent) + + if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { + err = system.MkdirAll(parentPath, 0600, "") + if err != nil { + return 0, err + } + } + } + + // Skip AUFS metadata dirs + if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { + // Regular files inside /.wh..wh.plnk can be used as hardlink targets + // We don't want this directory, but we need the files in them so that + // such hardlinks can be resolved. + if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { + basename := filepath.Base(hdr.Name) + aufsHardlinks[basename] = hdr + if aufsTempdir == "" { + if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { + return 0, err + } + defer os.RemoveAll(aufsTempdir) + } + if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil { + return 0, err + } + } + + if hdr.Name != WhiteoutOpaqueDir { + continue + } + } + path := filepath.Join(dest, hdr.Name) + rel, err := filepath.Rel(dest, path) + if err != nil { + return 0, err + } + + // Note as these operations are platform specific, so must the slash be. + if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { + return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) + } + base := filepath.Base(path) + + if strings.HasPrefix(base, WhiteoutPrefix) { + dir := filepath.Dir(path) + if base == WhiteoutOpaqueDir { + _, err := os.Lstat(dir) + if err != nil { + return 0, err + } + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { + return 0, err + } + } else { + originalBase := base[len(WhiteoutPrefix):] + originalPath := filepath.Join(dir, originalBase) + if err := os.RemoveAll(originalPath); err != nil { + return 0, err + } + } + } else { + // If path exits we almost always just want to remove and replace it. + // The only exception is when it is a directory *and* the file from + // the layer is also a directory. Then we want to merge them (i.e. + // just apply the metadata from the layer). + if fi, err := os.Lstat(path); err == nil { + if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { + if err := os.RemoveAll(path); err != nil { + return 0, err + } + } + } + + trBuf.Reset(tr) + srcData := io.Reader(trBuf) + srcHdr := hdr + + // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so + // we manually retarget these into the temporary files we extracted them into + if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { + linkBasename := filepath.Base(hdr.Linkname) + srcHdr = aufsHardlinks[linkBasename] + if srcHdr == nil { + return 0, fmt.Errorf("Invalid aufs hardlink") + } + tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) + if err != nil { + return 0, err + } + defer tmpFile.Close() + srcData = tmpFile + } + + if err := remapIDs(idMappings, srcHdr); err != nil { + return 0, err + } + + if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil { + return 0, err + } + + // Directory mtimes must be handled at the end to avoid further + // file creation in them to modify the directory mtime + if hdr.Typeflag == tar.TypeDir { + dirs = append(dirs, hdr) + } + unpackedPaths[path] = struct{}{} + } + } + + for _, hdr := range dirs { + path := filepath.Join(dest, hdr.Name) + if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + return 0, err + } + } + + return size, nil +} + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can be +// compressed or uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (int64, error) { + return applyLayerHandler(dest, layer, &TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} + +// do the bulk load of ApplyLayer, but allow for not calling DecompressStream +func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) { + dest = filepath.Clean(dest) + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + if err != nil { + return 0, err + } + defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform + + if decompress { + layer, err = DecompressStream(layer) + if err != nil { + return 0, err + } + } + return UnpackLayer(dest, layer, options) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/example_changes.go b/vendor/github.com/docker/docker/pkg/archive/example_changes.go new file mode 100644 index 000000000000..495db809e9f6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/example_changes.go @@ -0,0 +1,97 @@ +// +build ignore + +// Simple tool to create an archive stream from an old and new directory +// +// By default it will stream the comparison of two temporary directories with junk files +package main + +import ( + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path" + + "github.com/docker/docker/pkg/archive" + "github.com/sirupsen/logrus" +) + +var ( + flDebug = flag.Bool("D", false, "debugging output") + flNewDir = flag.String("newdir", "", "") + flOldDir = flag.String("olddir", "", "") + log = logrus.New() +) + +func main() { + flag.Usage = func() { + fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") + fmt.Printf("%s [OPTIONS]\n", os.Args[0]) + flag.PrintDefaults() + } + flag.Parse() + log.Out = os.Stderr + if (len(os.Getenv("DEBUG")) > 0) || *flDebug { + logrus.SetLevel(logrus.DebugLevel) + } + var newDir, oldDir string + + if len(*flNewDir) == 0 { + var err error + newDir, err = ioutil.TempDir("", "docker-test-newDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(newDir) + if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { + log.Fatal(err) + } + } else { + newDir = *flNewDir + } + + if len(*flOldDir) == 0 { + oldDir, err := ioutil.TempDir("", "docker-test-oldDir") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(oldDir) + } else { + oldDir = *flOldDir + } + + changes, err := archive.ChangesDirs(newDir, oldDir) + if err != nil { + log.Fatal(err) + } + + a, err := archive.ExportChanges(newDir, changes) + if err != nil { + log.Fatal(err) + } + defer a.Close() + + i, err := io.Copy(os.Stdout, a) + if err != nil && err != io.EOF { + log.Fatal(err) + } + fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) +} + +func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { + fileData := []byte("fooo") + for n := 0; n < numberOfFiles; n++ { + fileName := fmt.Sprintf("file-%d", n) + if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { + return 0, err + } + if makeLinks { + if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { + return 0, err + } + } + } + totalSize := numberOfFiles * len(fileData) + return totalSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_linux.go b/vendor/github.com/docker/docker/pkg/archive/time_linux.go new file mode 100644 index 000000000000..797143ee84d8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_linux.go @@ -0,0 +1,16 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + if time.IsZero() { + // Return UTIME_OMIT special value + ts.Sec = 0 + ts.Nsec = (1 << 30) - 2 + return + } + return syscall.NsecToTimespec(time.UnixNano()) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go new file mode 100644 index 000000000000..f58bf227fd33 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/time_unsupported.go @@ -0,0 +1,16 @@ +// +build !linux + +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "syscall" + "time" +) + +func timeToTimespec(time time.Time) (ts syscall.Timespec) { + nsec := int64(0) + if !time.IsZero() { + nsec = time.UnixNano() + } + return syscall.NsecToTimespec(nsec) +} diff --git a/vendor/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go new file mode 100644 index 000000000000..4c072a87ee53 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/whiteouts.go @@ -0,0 +1,23 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +// Whiteouts are files with a special meaning for the layered filesystem. +// Docker uses AUFS whiteout files inside exported archives. In other +// filesystems these files are generated/handled on tar creation/extraction. + +// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a +// filename this means that file has been removed from the base layer. +const WhiteoutPrefix = ".wh." + +// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not +// for removing an actual file. Normally these files are excluded from exported +// archives. +const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix + +// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other +// layers. Normally these should not go into exported archives and all changed +// hardlinks should be copied to the top layer. +const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" + +// WhiteoutOpaqueDir file means directory has been made opaque - meaning +// readdir calls to this directory do not follow to lower layers. +const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/vendor/github.com/docker/docker/pkg/archive/wrap.go b/vendor/github.com/docker/docker/pkg/archive/wrap.go new file mode 100644 index 000000000000..85435694cff7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/archive/wrap.go @@ -0,0 +1,59 @@ +package archive // import "github.com/docker/docker/pkg/archive" + +import ( + "archive/tar" + "bytes" + "io" +) + +// Generate generates a new archive from the content provided +// as input. +// +// `files` is a sequence of path/content pairs. A new file is +// added to the archive for each pair. +// If the last pair is incomplete, the file is created with an +// empty content. For example: +// +// Generate("foo.txt", "hello world", "emptyfile") +// +// The above call will return an archive with 2 files: +// * ./foo.txt with content "hello world" +// * ./empty with empty content +// +// FIXME: stream content instead of buffering +// FIXME: specify permissions and other archive metadata +func Generate(input ...string) (io.Reader, error) { + files := parseStringPairs(input...) + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, file := range files { + name, content := file[0], file[1] + hdr := &tar.Header{ + Name: name, + Size: int64(len(content)), + } + if err := tw.WriteHeader(hdr); err != nil { + return nil, err + } + if _, err := tw.Write([]byte(content)); err != nil { + return nil, err + } + } + if err := tw.Close(); err != nil { + return nil, err + } + return buf, nil +} + +func parseStringPairs(input ...string) (output [][2]string) { + output = make([][2]string, 0, len(input)/2+1) + for i := 0; i < len(input); i += 2 { + var pair [2]string + pair[0] = input[i] + if i+1 < len(input) { + pair[1] = input[i+1] + } + output = append(output, pair) + } + return +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go new file mode 100644 index 000000000000..47c9a2b94c5f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive.go @@ -0,0 +1,73 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/idtools" +) + +// NewArchiver returns a new Archiver which uses chrootarchive.Untar +func NewArchiver(idMappings *idtools.IDMappings) *archive.Archiver { + if idMappings == nil { + idMappings = &idtools.IDMappings{} + } + return &archive.Archiver{ + Untar: Untar, + IDMappingsVar: idMappings, + } +} + +// Untar reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive may be compressed with one of the following algorithms: +// identity (uncompressed), gzip, bzip2, xz. +func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, true) +} + +// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, +// and unpacks it into the directory at `dest`. +// The archive must be an uncompressed stream. +func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { + return untarHandler(tarArchive, dest, options, false) +} + +// Handler for teasing out the automatic decompression +func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { + if tarArchive == nil { + return fmt.Errorf("Empty archive") + } + if options == nil { + options = &archive.TarOptions{} + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps) + rootIDs := idMappings.RootPair() + + dest = filepath.Clean(dest) + if _, err := os.Stat(dest); os.IsNotExist(err) { + if err := idtools.MkdirAllAndChownNew(dest, 0755, rootIDs); err != nil { + return err + } + } + + r := ioutil.NopCloser(tarArchive) + if decompress { + decompressedArchive, err := archive.DecompressStream(tarArchive) + if err != nil { + return err + } + defer decompressedArchive.Close() + r = decompressedArchive + } + + return invokeUnpack(r, dest, options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go new file mode 100644 index 000000000000..5df8afd66205 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_unix.go @@ -0,0 +1,88 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" +) + +// untar is the entry-point for docker-untar on re-exec. This is not used on +// Windows as it does not support chroot, hence no point sandboxing through +// chroot and rexec. +func untar() { + runtime.LockOSThread() + flag.Parse() + + var options *archive.TarOptions + + //read the options from the pipe "ExtraFiles" + if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { + fatal(err) + } + + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + if err := archive.Unpack(os.Stdin, "/", options); err != nil { + fatal(err) + } + // fully consume stdin in case it is zero padded + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { + + // We can't pass a potentially large exclude list directly via cmd line + // because we easily overrun the kernel's max argument/environment size + // when the full image list is passed (e.g. when this is used by + // `docker load`). We will marshall the options via a pipe to the + // child + r, w, err := os.Pipe() + if err != nil { + return fmt.Errorf("Untar pipe failure: %v", err) + } + + cmd := reexec.Command("docker-untar", dest) + cmd.Stdin = decompressedArchive + + cmd.ExtraFiles = append(cmd.ExtraFiles, r) + output := bytes.NewBuffer(nil) + cmd.Stdout = output + cmd.Stderr = output + + if err := cmd.Start(); err != nil { + w.Close() + return fmt.Errorf("Untar error on re-exec cmd: %v", err) + } + //write the options to the pipe for the untar exec to read + if err := json.NewEncoder(w).Encode(options); err != nil { + w.Close() + return fmt.Errorf("Untar json encode to pipe failed: %v", err) + } + w.Close() + + if err := cmd.Wait(); err != nil { + // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, + // we need to exhaust `xz`'s output, otherwise the `xz` side will be + // pending on write pipe forever + io.Copy(ioutil.Discard, decompressedArchive) + + return fmt.Errorf("Error processing tar file(%v): %s", err, output) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go new file mode 100644 index 000000000000..f2973132a391 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/archive_windows.go @@ -0,0 +1,22 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// chroot is not supported by Windows +func chroot(path string) error { + return nil +} + +func invokeUnpack(decompressedArchive io.ReadCloser, + dest string, + options *archive.TarOptions) error { + // Windows is different to Linux here because Windows does not support + // chroot. Hence there is no point sandboxing a chrooted process to + // do the unpack. We call inline instead within the daemon process. + return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go new file mode 100644 index 000000000000..9802fad5145f --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_linux.go @@ -0,0 +1,113 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/mount" + rsystem "github.com/opencontainers/runc/libcontainer/system" + "golang.org/x/sys/unix" +) + +// chroot on linux uses pivot_root instead of chroot +// pivot_root takes a new root and an old root. +// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. +// New root is where the new rootfs is set to. +// Old root is removed after the call to pivot_root so it is no longer available under the new root. +// This is similar to how libcontainer sets up a container's rootfs +func chroot(path string) (err error) { + // if the engine is running in a user namespace we need to use actual chroot + if rsystem.RunningInUserNS() { + return realChroot(path) + } + if err := unix.Unshare(unix.CLONE_NEWNS); err != nil { + return fmt.Errorf("Error creating mount namespace before pivot: %v", err) + } + + // Make everything in new ns slave. + // Don't use `private` here as this could race where the mountns gets a + // reference to a mount and an unmount from the host does not propagate, + // which could potentially cause transient errors for other operations, + // even though this should be relatively small window here `slave` should + // not cause any problems. + if err := mount.MakeRSlave("/"); err != nil { + return err + } + + if mounted, _ := mount.Mounted(path); !mounted { + if err := mount.Mount(path, path, "bind", "rbind,rw"); err != nil { + return realChroot(path) + } + } + + // setup oldRoot for pivot_root + pivotDir, err := ioutil.TempDir(path, ".pivot_root") + if err != nil { + return fmt.Errorf("Error setting up pivot dir: %v", err) + } + + var mounted bool + defer func() { + if mounted { + // make sure pivotDir is not mounted before we try to remove it + if errCleanup := unix.Unmount(pivotDir, unix.MNT_DETACH); errCleanup != nil { + if err == nil { + err = errCleanup + } + return + } + } + + errCleanup := os.Remove(pivotDir) + // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful + // because we already cleaned it up on failed pivot_root + if errCleanup != nil && !os.IsNotExist(errCleanup) { + errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) + if err == nil { + err = errCleanup + } + } + }() + + if err := unix.PivotRoot(path, pivotDir); err != nil { + // If pivot fails, fall back to the normal chroot after cleaning up temp dir + if err := os.Remove(pivotDir); err != nil { + return fmt.Errorf("Error cleaning up after failed pivot: %v", err) + } + return realChroot(path) + } + mounted = true + + // This is the new path for where the old root (prior to the pivot) has been moved to + // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction + pivotDir = filepath.Join("/", filepath.Base(pivotDir)) + + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root: %v", err) + } + + // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host + if err := unix.Mount("", pivotDir, "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil { + return fmt.Errorf("Error making old root private after pivot: %v", err) + } + + // Now unmount the old root so it's no longer visible from the new root + if err := unix.Unmount(pivotDir, unix.MNT_DETACH); err != nil { + return fmt.Errorf("Error while unmounting old root after pivot: %v", err) + } + mounted = false + + return nil +} + +func realChroot(path string) error { + if err := unix.Chroot(path); err != nil { + return fmt.Errorf("Error after fallback to chroot: %v", err) + } + if err := unix.Chdir("/"); err != nil { + return fmt.Errorf("Error changing to new root after chroot: %v", err) + } + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go new file mode 100644 index 000000000000..9a1ee5875490 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/chroot_unix.go @@ -0,0 +1,12 @@ +// +build !windows,!linux + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import "golang.org/x/sys/unix" + +func chroot(path string) error { + if err := unix.Chroot(path); err != nil { + return err + } + return unix.Chdir("/") +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go new file mode 100644 index 000000000000..7712cc17c8cd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff.go @@ -0,0 +1,23 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "io" + + "github.com/docker/docker/pkg/archive" +) + +// ApplyLayer parses a diff in the standard layer format from `layer`, +// and applies it to the directory `dest`. The stream `layer` can only be +// uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyLayer(dest string, layer io.Reader) (size int64, err error) { + return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) +} + +// ApplyUncompressedLayer parses a diff in the standard layer format from +// `layer`, and applies it to the directory `dest`. The stream `layer` +// can only be uncompressed. +// Returns the size in bytes of the contents of the layer. +func ApplyUncompressedLayer(dest string, layer io.Reader, options *archive.TarOptions) (int64, error) { + return applyLayerHandler(dest, layer, options, false) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go new file mode 100644 index 000000000000..d96a09f8fa7a --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_unix.go @@ -0,0 +1,130 @@ +//+build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "bytes" + "encoding/json" + "flag" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "runtime" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/reexec" + "github.com/docker/docker/pkg/system" + rsystem "github.com/opencontainers/runc/libcontainer/system" +) + +type applyLayerResponse struct { + LayerSize int64 `json:"layerSize"` +} + +// applyLayer is the entry-point for docker-applylayer on re-exec. This is not +// used on Windows as it does not support chroot, hence no point sandboxing +// through chroot and rexec. +func applyLayer() { + + var ( + tmpDir string + err error + options *archive.TarOptions + ) + runtime.LockOSThread() + flag.Parse() + + inUserns := rsystem.RunningInUserNS() + if err := chroot(flag.Arg(0)); err != nil { + fatal(err) + } + + // We need to be able to set any perms + oldmask, err := system.Umask(0) + defer system.Umask(oldmask) + if err != nil { + fatal(err) + } + + if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { + fatal(err) + } + + if inUserns { + options.InUserNS = true + } + + if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { + fatal(err) + } + + os.Setenv("TMPDIR", tmpDir) + size, err := archive.UnpackLayer("/", os.Stdin, options) + os.RemoveAll(tmpDir) + if err != nil { + fatal(err) + } + + encoder := json.NewEncoder(os.Stdout) + if err := encoder.Encode(applyLayerResponse{size}); err != nil { + fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) + } + + if _, err := flush(os.Stdin); err != nil { + fatal(err) + } + + os.Exit(0) +} + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + if options == nil { + options = &archive.TarOptions{} + if rsystem.RunningInUserNS() { + options.InUserNS = true + } + } + if options.ExcludePatterns == nil { + options.ExcludePatterns = []string{} + } + + data, err := json.Marshal(options) + if err != nil { + return 0, fmt.Errorf("ApplyLayer json encode: %v", err) + } + + cmd := reexec.Command("docker-applyLayer", dest) + cmd.Stdin = layer + cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) + + outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) + cmd.Stdout, cmd.Stderr = outBuf, errBuf + + if err = cmd.Run(); err != nil { + return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) + } + + // Stdout should be a valid JSON struct representing an applyLayerResponse. + response := applyLayerResponse{} + decoder := json.NewDecoder(outBuf) + if err = decoder.Decode(&response); err != nil { + return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) + } + + return response.LayerSize, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go new file mode 100644 index 000000000000..8f3f3a4a8aae --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/diff_windows.go @@ -0,0 +1,45 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + + "github.com/docker/docker/pkg/archive" + "github.com/docker/docker/pkg/longpath" +) + +// applyLayerHandler parses a diff in the standard layer format from `layer`, and +// applies it to the directory `dest`. Returns the size in bytes of the +// contents of the layer. +func applyLayerHandler(dest string, layer io.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { + dest = filepath.Clean(dest) + + // Ensure it is a Windows-style volume path + dest = longpath.AddPrefix(dest) + + if decompress { + decompressed, err := archive.DecompressStream(layer) + if err != nil { + return 0, err + } + defer decompressed.Close() + + layer = decompressed + } + + tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") + if err != nil { + return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) + } + + s, err := archive.UnpackLayer(dest, layer, nil) + os.RemoveAll(tmpDir) + if err != nil { + return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s: %s", layer, dest, err) + } + + return s, nil +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go new file mode 100644 index 000000000000..a15e4bb83c40 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_unix.go @@ -0,0 +1,28 @@ +// +build !windows + +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +import ( + "fmt" + "io" + "io/ioutil" + "os" + + "github.com/docker/docker/pkg/reexec" +) + +func init() { + reexec.Register("docker-applyLayer", applyLayer) + reexec.Register("docker-untar", untar) +} + +func fatal(err error) { + fmt.Fprint(os.Stderr, err) + os.Exit(1) +} + +// flush consumes all the bytes from the reader discarding +// any errors +func flush(r io.Reader) (bytes int64, err error) { + return io.Copy(ioutil.Discard, r) +} diff --git a/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go new file mode 100644 index 000000000000..15ed874e7751 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/chrootarchive/init_windows.go @@ -0,0 +1,4 @@ +package chrootarchive // import "github.com/docker/docker/pkg/chrootarchive" + +func init() { +} diff --git a/vendor/github.com/docker/docker/pkg/pools/pools.go b/vendor/github.com/docker/docker/pkg/pools/pools.go new file mode 100644 index 000000000000..46339c282f11 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/pools/pools.go @@ -0,0 +1,137 @@ +// Package pools provides a collection of pools which provide various +// data types with buffers. These can be used to lower the number of +// memory allocations and reuse buffers. +// +// New pools should be added to this package to allow them to be +// shared across packages. +// +// Utility functions which operate on pools should be added to this +// package to allow them to be reused. +package pools // import "github.com/docker/docker/pkg/pools" + +import ( + "bufio" + "io" + "sync" + + "github.com/docker/docker/pkg/ioutils" +) + +const buffer32K = 32 * 1024 + +var ( + // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. + BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) + // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. + BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) + buffer32KPool = newBufferPoolWithSize(buffer32K) +) + +// BufioReaderPool is a bufio reader that uses sync.Pool. +type BufioReaderPool struct { + pool sync.Pool +} + +// newBufioReaderPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioReaderPoolWithSize(size int) *BufioReaderPool { + return &BufioReaderPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. +func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { + buf := bufPool.pool.Get().(*bufio.Reader) + buf.Reset(r) + return buf +} + +// Put puts the bufio.Reader back into the pool. +func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +type bufferPool struct { + pool sync.Pool +} + +func newBufferPoolWithSize(size int) *bufferPool { + return &bufferPool{ + pool: sync.Pool{ + New: func() interface{} { return make([]byte, size) }, + }, + } +} + +func (bp *bufferPool) Get() []byte { + return bp.pool.Get().([]byte) +} + +func (bp *bufferPool) Put(b []byte) { + bp.pool.Put(b) +} + +// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. +func Copy(dst io.Writer, src io.Reader) (written int64, err error) { + buf := buffer32KPool.Get() + written, err = io.CopyBuffer(dst, src, buf) + buffer32KPool.Put(buf) + return +} + +// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back +// into the pool and closes the reader if it's an io.ReadCloser. +func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { + return ioutils.NewReadCloserWrapper(r, func() error { + if readCloser, ok := r.(io.ReadCloser); ok { + readCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} + +// BufioWriterPool is a bufio writer that uses sync.Pool. +type BufioWriterPool struct { + pool sync.Pool +} + +// newBufioWriterPoolWithSize is unexported because new pools should be +// added here to be shared where required. +func newBufioWriterPoolWithSize(size int) *BufioWriterPool { + return &BufioWriterPool{ + pool: sync.Pool{ + New: func() interface{} { return bufio.NewWriterSize(nil, size) }, + }, + } +} + +// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. +func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { + buf := bufPool.pool.Get().(*bufio.Writer) + buf.Reset(w) + return buf +} + +// Put puts the bufio.Writer back into the pool. +func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { + b.Reset(nil) + bufPool.pool.Put(b) +} + +// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back +// into the pool and closes the writer if it's an io.Writecloser. +func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { + return ioutils.NewWriteCloserWrapper(w, func() error { + buf.Flush() + if writeCloser, ok := w.(io.WriteCloser); ok { + writeCloser.Close() + } + bufPool.Put(buf) + return nil + }) +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/README.md b/vendor/github.com/docker/docker/pkg/reexec/README.md new file mode 100644 index 000000000000..6658f69b69d7 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/README.md @@ -0,0 +1,5 @@ +# reexec + +The `reexec` package facilitates the busybox style reexec of the docker binary that we require because +of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of +the exec of the binary will be used to find and execute custom init paths. diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_linux.go b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go new file mode 100644 index 000000000000..efea71794fdf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_linux.go @@ -0,0 +1,28 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" + "syscall" + + "golang.org/x/sys/unix" +) + +// Self returns the path to the current process's binary. +// Returns "/proc/self/exe". +func Self() string { + return "/proc/self/exe" +} + +// Command returns *exec.Cmd which has Path as current binary. Also it setting +// SysProcAttr.Pdeathsig to SIGTERM. +// This will use the in-memory version (/proc/self/exe) of the current binary, +// it is thus safe to delete or replace the on-disk binary (os.Args[0]). +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + SysProcAttr: &syscall.SysProcAttr{ + Pdeathsig: unix.SIGTERM, + }, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unix.go b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go new file mode 100644 index 000000000000..ceaabbdeee91 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unix.go @@ -0,0 +1,23 @@ +// +build freebsd darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will +// be set to "/usr/bin/docker". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go new file mode 100644 index 000000000000..09fb4b2d2930 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_unsupported.go @@ -0,0 +1,12 @@ +// +build !linux,!windows,!freebsd,!darwin + +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Command is unsupported on operating systems apart from Linux, Windows, and Darwin. +func Command(args ...string) *exec.Cmd { + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/command_windows.go b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go new file mode 100644 index 000000000000..438226890f62 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/command_windows.go @@ -0,0 +1,21 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "os/exec" +) + +// Self returns the path to the current process's binary. +// Uses os.Args[0]. +func Self() string { + return naiveSelf() +} + +// Command returns *exec.Cmd which has Path as current binary. +// For example if current binary is "docker.exe" at "C:\", then cmd.Path will +// be set to "C:\docker.exe". +func Command(args ...string) *exec.Cmd { + return &exec.Cmd{ + Path: Self(), + Args: args, + } +} diff --git a/vendor/github.com/docker/docker/pkg/reexec/reexec.go b/vendor/github.com/docker/docker/pkg/reexec/reexec.go new file mode 100644 index 000000000000..f8ccddd599e8 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/reexec/reexec.go @@ -0,0 +1,47 @@ +package reexec // import "github.com/docker/docker/pkg/reexec" + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" +) + +var registeredInitializers = make(map[string]func()) + +// Register adds an initialization func under the specified name +func Register(name string, initializer func()) { + if _, exists := registeredInitializers[name]; exists { + panic(fmt.Sprintf("reexec func already registered under name %q", name)) + } + + registeredInitializers[name] = initializer +} + +// Init is called as the first part of the exec process and returns true if an +// initialization function was called. +func Init() bool { + initializer, exists := registeredInitializers[os.Args[0]] + if exists { + initializer() + + return true + } + return false +} + +func naiveSelf() string { + name := os.Args[0] + if filepath.Base(name) == name { + if lp, err := exec.LookPath(name); err == nil { + return lp + } + } + // handle conversion of relative paths to absolute + if absName, err := filepath.Abs(name); err == nil { + return absName + } + // if we couldn't get absolute name, return original + // (NOTE: Go only errors on Abs() if os.Getwd fails) + return name +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 74eb3f4006ea..9659bbd846ed 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -118,17 +118,21 @@ github.com/docker/distribution/reference github.com/docker/distribution/digestset # github.com/docker/docker v0.7.3-0.20180531152204-71cd53e4a197 github.com/docker/docker/pkg/locker +github.com/docker/docker/pkg/reexec github.com/docker/docker/builder/dockerignore github.com/docker/docker/api/types/strslice github.com/docker/docker/pkg/signal github.com/docker/docker/api/types/container +github.com/docker/docker/pkg/archive +github.com/docker/docker/pkg/chrootarchive github.com/docker/docker/pkg/fileutils github.com/docker/docker/pkg/ioutils github.com/docker/docker/api/types/blkiodev github.com/docker/docker/api/types/mount github.com/docker/docker/pkg/homedir -github.com/docker/docker/pkg/longpath github.com/docker/docker/pkg/idtools +github.com/docker/docker/pkg/longpath +github.com/docker/docker/pkg/pools github.com/docker/docker/pkg/system github.com/docker/docker/pkg/mount # github.com/docker/docker-credential-helpers v0.6.0 From bb11c8c5c9c51b7a8ba97811c7ed12ec05d83974 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 09:28:02 -0700 Subject: [PATCH 15/25] dockerfile: empty wildcard regression test Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 39 ++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index c0f44e8d0cdd..58fd1be28cf7 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -82,6 +82,7 @@ var allTests = []integration.Test{ testEmptyDestDir, testSymlinkedDockerfile, testDockerfileAddArchiveWildcard, + testEmptyWildcard, } var opts []integration.TestOpt @@ -181,6 +182,44 @@ ENV foo bar require.NoError(t, err) } +func testEmptyWildcard(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM scratch +COPY foo nomatch* / +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("foo", []byte("contents0"), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, "contents0", string(dt)) +} + func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) From 39ba2ede04b05a30dcc778c97b3706a86d163373 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sat, 9 Mar 2019 22:24:45 -0800 Subject: [PATCH 16/25] dockerfile: regression test for workdir creation Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 39 ++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 58fd1be28cf7..d764556e8880 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -83,6 +83,7 @@ var allTests = []integration.Test{ testSymlinkedDockerfile, testDockerfileAddArchiveWildcard, testEmptyWildcard, + testWorkdirCreatesDir, } var opts []integration.TestOpt @@ -154,6 +155,44 @@ RUN [ "$(cat testfile)" == "contents0" ] require.NoError(t, err) } +func testWorkdirCreatesDir(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM scratch +WORKDIR /foo +WORKDIR / +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile", dockerfile, 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + fi, err := os.Lstat(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, true, fi.IsDir()) +} + func testSymlinkedDockerfile(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) From 369f7482de18ece5cebef6c98c38fc6266e85199 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sat, 9 Mar 2019 23:34:00 -0800 Subject: [PATCH 17/25] dockerfile: regression test for wildcard unpack Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 1 + 1 file changed, 1 insertion(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index d764556e8880..13cc9a60b50a 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -84,6 +84,7 @@ var allTests = []integration.Test{ testDockerfileAddArchiveWildcard, testEmptyWildcard, testWorkdirCreatesDir, + testDockerfileAddArchiveWildcard, } var opts []integration.TestOpt From c4ef668f621f5c588f18f2f4ab1875eb368e5b61 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 19:02:16 -0700 Subject: [PATCH 18/25] dockerfile: regression test for existing dest dir perms Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 95 ++++++++++++++++++++++++++ 1 file changed, 95 insertions(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 13cc9a60b50a..87ec48fa2070 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -85,6 +85,7 @@ var allTests = []integration.Test{ testEmptyWildcard, testWorkdirCreatesDir, testDockerfileAddArchiveWildcard, + testCopyChownExistingDir, } var opts []integration.TestOpt @@ -222,6 +223,100 @@ ENV foo bar require.NoError(t, err) } +func testCopyChownExistingDir(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + dockerfile := []byte(` +# Set up files and directories with known ownership +FROM busybox AS source +RUN touch /file && chown 100:200 /file \ + && mkdir -p /dir/subdir \ + && touch /dir/subdir/nestedfile \ + && chown 100:200 /dir \ + && chown 101:201 /dir/subdir \ + && chown 102:202 /dir/subdir/nestedfile + +FROM busybox AS test_base +RUN mkdir -p /existingdir/existingsubdir \ + && touch /existingdir/existingfile \ + && chown 500:600 /existingdir \ + && chown 501:601 /existingdir/existingsubdir \ + && chown 501:601 /existingdir/existingfile + + +# Copy files from the source stage +FROM test_base AS copy_from +COPY --from=source /file . +# Copy to a non-existing target directory creates the target directory (as root), then copies the _contents_ of the source directory into it +COPY --from=source /dir /dir +# Copying to an existing target directory will copy the _contents_ of the source directory into it +COPY --from=source /dir/. /existingdir + +RUN e="100:200"; p="/file" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="0:0"; p="/dir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="101:201"; p="/dir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="102:202"; p="/dir/subdir/nestedfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ +# Existing files and directories ownership should not be modified + && e="500:600"; p="/existingdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="501:601"; p="/existingdir/existingsubdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="501:601"; p="/existingdir/existingfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ +# But new files and directories should maintain their ownership + && e="101:201"; p="/existingdir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="102:202"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi + + +# Copy files from the source stage and chown them. +FROM test_base AS copy_from_chowned +COPY --from=source --chown=300:400 /file . +# Copy to a non-existing target directory creates the target directory (as root), then copies the _contents_ of the source directory into it +COPY --from=source --chown=300:400 /dir /dir +# Copying to an existing target directory copies the _contents_ of the source directory into it +COPY --from=source --chown=300:400 /dir/. /existingdir + +RUN e="300:400"; p="/file" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="300:400"; p="/dir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="300:400"; p="/dir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="300:400"; p="/dir/subdir/nestedfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ +# Existing files and directories ownership should not be modified + && e="500:600"; p="/existingdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="501:601"; p="/existingdir/existingsubdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="501:601"; p="/existingdir/existingfile" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ +# But new files and directories should be chowned + && e="300:400"; p="/existingdir/subdir" ; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi \ + && e="300:400"; p="/existingdir/subdir/nestedfile"; a=` + "`" + `stat -c "%u:%g" "$p"` + "`" + `; if [ "$a" != "$e" ]; then echo "incorrect ownership on $p. expected $e, got $a"; exit 1; fi +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile.web", dockerfile, 0600), + fstest.Symlink("Dockerfile.web", "Dockerfile"), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "target": "copy_from", + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) +} + func testEmptyWildcard(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) From c57e5b22ea57c2cfdf2cbcc8b367eea139ec24ed Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 14:51:07 -0700 Subject: [PATCH 19/25] dockerfile: add regression test for wildcard cache Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 86 ++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index 87ec48fa2070..d974457513a2 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -86,6 +86,7 @@ var allTests = []integration.Test{ testWorkdirCreatesDir, testDockerfileAddArchiveWildcard, testCopyChownExistingDir, + testCopyWildcardCache, } var opts []integration.TestOpt @@ -317,6 +318,91 @@ RUN e="300:400"; p="/file" ; a=` + "`" + `stat -c "%u:%g require.NoError(t, err) } +func testCopyWildcardCache(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + + dockerfile := []byte(` +FROM busybox AS base +COPY foo* files/ +RUN cat /dev/urandom | head -c 100 | sha256sum > unique +COPY bar files/ +FROM scratch +COPY --from=base unique / +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("foo1", []byte("foo1-data"), 0600), + fstest.CreateFile("foo2", []byte("foo2-data"), 0600), + fstest.CreateFile("bar", []byte("bar-data"), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + require.NoError(t, err) + + err = ioutil.WriteFile(filepath.Join(dir, "bar"), []byte("bar-data-mod"), 0600) + require.NoError(t, err) + + destDir, err = ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + dt2, err := ioutil.ReadFile(filepath.Join(destDir, "unique")) + require.NoError(t, err) + require.Equal(t, string(dt), string(dt2)) + + err = ioutil.WriteFile(filepath.Join(dir, "foo2"), []byte("foo2-data-mod"), 0600) + require.NoError(t, err) + + destDir, err = ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exporter: client.ExporterLocal, + ExporterOutputDir: destDir, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) + + dt2, err = ioutil.ReadFile(filepath.Join(destDir, "unique")) + require.NoError(t, err) + require.NotEqual(t, string(dt), string(dt2)) +} + func testEmptyWildcard(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) From 637bec71966a8926db3556c6786dcf981bcce3cc Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 21:08:51 -0700 Subject: [PATCH 20/25] dockerfile: make fileop default Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile2llb/convert.go | 2 +- frontend/dockerfile/dockerfile2llb/convert_fileop.go | 5 ----- frontend/dockerfile/dockerfile2llb/convert_nofileop.go | 5 ----- 3 files changed, 1 insertion(+), 11 deletions(-) delete mode 100644 frontend/dockerfile/dockerfile2llb/convert_fileop.go delete mode 100644 frontend/dockerfile/dockerfile2llb/convert_nofileop.go diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index 419796b4e6fe..b19c89518517 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -1284,7 +1284,7 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform } func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { - enabled := fileOpEnabled + enabled := true if v, ok := args["BUILDKIT_USE_FILEOP"]; ok { if b, err := strconv.ParseBool(v); err == nil { enabled = b diff --git a/frontend/dockerfile/dockerfile2llb/convert_fileop.go b/frontend/dockerfile/dockerfile2llb/convert_fileop.go deleted file mode 100644 index 9c8abc7c323a..000000000000 --- a/frontend/dockerfile/dockerfile2llb/convert_fileop.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build fileop - -package dockerfile2llb - -const fileOpEnabled = true diff --git a/frontend/dockerfile/dockerfile2llb/convert_nofileop.go b/frontend/dockerfile/dockerfile2llb/convert_nofileop.go deleted file mode 100644 index f8f7582b045f..000000000000 --- a/frontend/dockerfile/dockerfile2llb/convert_nofileop.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !fileop - -package dockerfile2llb - -const fileOpEnabled = false From f38d971b95e597215c51ab1d56069daf036921bb Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 21:31:24 -0700 Subject: [PATCH 21/25] dockerfile: add matrix testing for non-fileop versions Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 145 ++++++++++++++++++++----- 1 file changed, 118 insertions(+), 27 deletions(-) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index d974457513a2..c47ea3c2e0d7 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -15,6 +15,7 @@ import ( "path/filepath" "runtime" "sort" + "strconv" "strings" "testing" "time" @@ -38,7 +39,6 @@ import ( ) var allTests = []integration.Test{ - testNoSnapshotLeak, testCmdShell, testGlobalArg, testDockerfileDirs, @@ -52,11 +52,7 @@ var allTests = []integration.Test{ testDockerignore, testDockerignoreInvalid, testDockerfileFromGit, - testCopyChown, - testCopyWildcards, - testCopyOverrideFiles, testMultiStageImplicitFrom, - testCopyVarSubstitution, testMultiStageCaseInsensitive, testLabels, testCacheImportExport, @@ -68,18 +64,11 @@ var allTests = []integration.Test{ testPullScratch, testSymlinkDestination, testHTTPDockerfile, - testNoSnapshotLeak, - testCopySymlinks, - testContextChangeDirToFile, testPlatformArgsImplicit, testPlatformArgsExplicit, testExportMultiPlatform, testQuotedMetaArgs, testIgnoreEntrypoint, - testCopyThroughSymlinkContext, - testCopyThroughSymlinkMultiStage, - testCopyChownCreateDest, - testEmptyDestDir, testSymlinkedDockerfile, testDockerfileAddArchiveWildcard, testEmptyWildcard, @@ -89,6 +78,20 @@ var allTests = []integration.Test{ testCopyWildcardCache, } +var fileOpTests = []integration.Test{ + testEmptyDestDir, + testCopyChownCreateDest, + testCopyThroughSymlinkContext, + testCopyThroughSymlinkMultiStage, + testContextChangeDirToFile, + testNoSnapshotLeak, + testCopySymlinks, + testCopyChown, + testCopyOverrideFiles, + testCopyVarSubstitution, + testCopyWildcards, +} + var opts []integration.TestOpt type frontend interface { @@ -126,10 +129,15 @@ func init() { func TestIntegration(t *testing.T) { integration.Run(t, allTests, opts...) + integration.Run(t, fileOpTests, append(opts, integration.WithMatrix("fileop", map[string]interface{}{ + "true": true, + "false": false, + }))...) } func testEmptyDestDir(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -150,6 +158,9 @@ RUN [ "$(cat testfile)" == "contents0" ] defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -182,8 +193,12 @@ WORKDIR / defer os.RemoveAll(destDir) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -348,8 +363,12 @@ COPY --from=base unique / defer os.RemoveAll(destDir) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -368,8 +387,12 @@ COPY --from=base unique / defer os.RemoveAll(destDir) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -389,8 +412,12 @@ COPY --from=base unique / defer os.RemoveAll(destDir) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -427,8 +454,12 @@ COPY foo nomatch* / defer os.RemoveAll(destDir) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -443,6 +474,7 @@ COPY foo nomatch* / func testCopyChownCreateDest(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox @@ -462,6 +494,9 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ] defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -472,6 +507,7 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ] func testCopyThroughSymlinkContext(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -502,6 +538,9 @@ COPY link/foo . OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -516,6 +555,7 @@ COPY link/foo . func testCopyThroughSymlinkMultiStage(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox AS build @@ -546,6 +586,9 @@ COPY --from=build /sub2/foo bar OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -775,6 +818,7 @@ COPY arch-$TARGETARCH whoami // tonistiigi/fsutil#46 func testContextChangeDirToFile(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -794,6 +838,9 @@ COPY foo / defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -819,6 +866,9 @@ COPY foo / OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -833,6 +883,7 @@ COPY foo / func testNoSnapshotLeak(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -851,6 +902,9 @@ COPY foo / defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -862,6 +916,9 @@ COPY foo / require.NoError(t, err) _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -877,6 +934,7 @@ COPY foo / func testCopySymlinks(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch @@ -904,6 +962,9 @@ COPY sub/l* alllinks/ defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -1625,8 +1686,12 @@ ADD *.tar /dest defer c.Close() _, err = f.Solve(context.TODO(), c, client.SolveOpt{ - Exporter: client.ExporterLocal, - ExporterOutputDir: destDir, + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -2182,6 +2247,7 @@ USER nobody func testCopyChown(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM busybox AS base @@ -2218,6 +2284,9 @@ COPY --from=base /out / OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -2236,6 +2305,7 @@ COPY --from=base /out / func testCopyOverrideFiles(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -2273,6 +2343,9 @@ COPY files dest OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -2291,6 +2364,7 @@ COPY files dest func testCopyVarSubstitution(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -2321,6 +2395,9 @@ COPY $FOO baz OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -2335,6 +2412,7 @@ COPY $FOO baz func testCopyWildcards(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) dockerfile := []byte(` FROM scratch AS base @@ -2376,6 +2454,9 @@ COPY sub/dir1 subdest6 OutputDir: destDir, }, }, + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, builder.DefaultLocalNameContext: dir, @@ -2395,9 +2476,11 @@ COPY sub/dir1 subdest6 require.NoError(t, err) require.Equal(t, "foo-contents", string(dt)) - dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo")) - require.NoError(t, err) - require.Equal(t, "foo-contents", string(dt)) + if isFileOp { // non-fileop implementation is historically buggy + dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest/dir2/foo")) + require.NoError(t, err) + require.Equal(t, "foo-contents", string(dt)) + } dt, err = ioutil.ReadFile(filepath.Join(destDir, "subdest2/foo")) require.NoError(t, err) @@ -3474,3 +3557,11 @@ func getFrontend(t *testing.T, sb integration.Sandbox) frontend { require.True(t, ok) return fn } + +func getFileOp(t *testing.T, sb integration.Sandbox) bool { + v := sb.Value("fileop") + require.NotNil(t, v) + vv, ok := v.(bool) + require.True(t, ok) + return vv +} From a16b47fe08346e16824dad5ae1c57faaa70fae07 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Sun, 10 Mar 2019 21:04:50 -0700 Subject: [PATCH 22/25] integration: disable gc Signed-off-by: Tonis Tiigi --- util/testutil/integration/containerd.go | 1 + util/testutil/integration/oci.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/util/testutil/integration/containerd.go b/util/testutil/integration/containerd.go index e75b9c4e97d1..7751830c6993 100644 --- a/util/testutil/integration/containerd.go +++ b/util/testutil/integration/containerd.go @@ -123,6 +123,7 @@ disabled_plugins = ["cri"] buildkitdArgs := []string{"buildkitd", "--oci-worker=false", + "--containerd-worker-gc=false", "--containerd-worker=true", "--containerd-worker-addr", address, "--containerd-worker-labels=org.mobyproject.buildkit.worker.sandbox=true", // Include use of --containerd-worker-labels to trigger https://github.com/moby/buildkit/pull/603 diff --git a/util/testutil/integration/oci.go b/util/testutil/integration/oci.go index a22af37293aa..c06ff053fd61 100644 --- a/util/testutil/integration/oci.go +++ b/util/testutil/integration/oci.go @@ -60,7 +60,7 @@ func (s *oci) New(opt ...SandboxOpt) (Sandbox, func() error, error) { } logs := map[string]*bytes.Buffer{} // Include use of --oci-worker-labels to trigger https://github.com/moby/buildkit/pull/603 - buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"} + buildkitdArgs := []string{"buildkitd", "--oci-worker=true", "--containerd-worker=false", "--oci-worker-gc=false", "--oci-worker-labels=org.mobyproject.buildkit.worker.sandbox=true"} deferF := &multiCloser{} From c6149da2ebf6e8dc5663aa70f44f5c8866b66f00 Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Mon, 11 Mar 2019 10:19:24 -0700 Subject: [PATCH 23/25] fileop: review fixes Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile2llb/convert.go | 4 +- frontend/dockerfile/dockerfile_test.go | 26 +- solver/pb/ops.pb.go | 364 +++++++++--------- solver/pb/ops.proto | 20 +- 4 files changed, 207 insertions(+), 207 deletions(-) diff --git a/frontend/dockerfile/dockerfile2llb/convert.go b/frontend/dockerfile/dockerfile2llb/convert.go index b19c89518517..f2a0443536bc 100644 --- a/frontend/dockerfile/dockerfile2llb/convert.go +++ b/frontend/dockerfile/dockerfile2llb/convert.go @@ -1285,9 +1285,9 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { enabled := true - if v, ok := args["BUILDKIT_USE_FILEOP"]; ok { + if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { if b, err := strconv.ParseBool(v); err == nil { - enabled = b + enabled = !b } } return enabled && caps != nil && caps.Supports(pb.CapFileBase) == nil diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index c47ea3c2e0d7..ecd6e96f3d29 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -159,7 +159,7 @@ RUN [ "$(cat testfile)" == "contents0" ] _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -495,7 +495,7 @@ RUN [ "$(stat -c "%U %G" /dest)" == "user user" ] _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -539,7 +539,7 @@ COPY link/foo . }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -587,7 +587,7 @@ COPY --from=build /sub2/foo bar }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -839,7 +839,7 @@ COPY foo / _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -867,7 +867,7 @@ COPY foo / }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -903,7 +903,7 @@ COPY foo / _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -917,7 +917,7 @@ COPY foo / _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -963,7 +963,7 @@ COPY sub/l* alllinks/ _, err = f.Solve(context.TODO(), c, client.SolveOpt{ FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -2285,7 +2285,7 @@ COPY --from=base /out / }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -2344,7 +2344,7 @@ COPY files dest }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -2396,7 +2396,7 @@ COPY $FOO baz }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, @@ -2455,7 +2455,7 @@ COPY sub/dir1 subdest6 }, }, FrontendAttrs: map[string]string{ - "build-arg:BUILDKIT_USE_FILEOP": strconv.FormatBool(isFileOp), + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, LocalDirs: map[string]string{ builder.DefaultLocalNameDockerfile: dir, diff --git a/solver/pb/ops.pb.go b/solver/pb/ops.pb.go index b8ee30029033..394b325a5456 100644 --- a/solver/pb/ops.pb.go +++ b/solver/pb/ops.pb.go @@ -54,7 +54,7 @@ func (x NetMode) String() string { return proto.EnumName(NetMode_name, int32(x)) } func (NetMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{0} + return fileDescriptor_ops_8d64813b9835ab08, []int{0} } // MountType defines a type of a mount from a supported set @@ -87,7 +87,7 @@ func (x MountType) String() string { return proto.EnumName(MountType_name, int32(x)) } func (MountType) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{1} + return fileDescriptor_ops_8d64813b9835ab08, []int{1} } // CacheSharingOpt defines different sharing modes for cache mount @@ -117,7 +117,7 @@ func (x CacheSharingOpt) String() string { return proto.EnumName(CacheSharingOpt_name, int32(x)) } func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{2} + return fileDescriptor_ops_8d64813b9835ab08, []int{2} } // Op represents a vertex of the LLB DAG. @@ -138,7 +138,7 @@ func (m *Op) Reset() { *m = Op{} } func (m *Op) String() string { return proto.CompactTextString(m) } func (*Op) ProtoMessage() {} func (*Op) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{0} + return fileDescriptor_ops_8d64813b9835ab08, []int{0} } func (m *Op) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -368,7 +368,7 @@ func (m *Platform) Reset() { *m = Platform{} } func (m *Platform) String() string { return proto.CompactTextString(m) } func (*Platform) ProtoMessage() {} func (*Platform) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{1} + return fileDescriptor_ops_8d64813b9835ab08, []int{1} } func (m *Platform) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -440,7 +440,7 @@ func (m *Input) Reset() { *m = Input{} } func (m *Input) String() string { return proto.CompactTextString(m) } func (*Input) ProtoMessage() {} func (*Input) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{2} + return fileDescriptor_ops_8d64813b9835ab08, []int{2} } func (m *Input) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -476,7 +476,7 @@ func (m *ExecOp) Reset() { *m = ExecOp{} } func (m *ExecOp) String() string { return proto.CompactTextString(m) } func (*ExecOp) ProtoMessage() {} func (*ExecOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{3} + return fileDescriptor_ops_8d64813b9835ab08, []int{3} } func (m *ExecOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -538,7 +538,7 @@ func (m *Meta) Reset() { *m = Meta{} } func (m *Meta) String() string { return proto.CompactTextString(m) } func (*Meta) ProtoMessage() {} func (*Meta) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{4} + return fileDescriptor_ops_8d64813b9835ab08, []int{4} } func (m *Meta) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -622,7 +622,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{5} + return fileDescriptor_ops_8d64813b9835ab08, []int{5} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -708,7 +708,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{6} + return fileDescriptor_ops_8d64813b9835ab08, []int{6} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -766,7 +766,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{7} + return fileDescriptor_ops_8d64813b9835ab08, []int{7} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -845,7 +845,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{8} + return fileDescriptor_ops_8d64813b9835ab08, []int{8} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -918,7 +918,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{9} + return fileDescriptor_ops_8d64813b9835ab08, []int{9} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -970,7 +970,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{10} + return fileDescriptor_ops_8d64813b9835ab08, []int{10} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1025,7 +1025,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{11} + return fileDescriptor_ops_8d64813b9835ab08, []int{11} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1066,7 +1066,7 @@ func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{12} + return fileDescriptor_ops_8d64813b9835ab08, []int{12} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1127,7 +1127,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{13} + return fileDescriptor_ops_8d64813b9835ab08, []int{13} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1170,7 +1170,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{14} + return fileDescriptor_ops_8d64813b9835ab08, []int{14} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1232,7 +1232,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{15} + return fileDescriptor_ops_8d64813b9835ab08, []int{15} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1277,7 +1277,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{16} + return fileDescriptor_ops_8d64813b9835ab08, []int{16} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,7 +1325,7 @@ func (m *HostIP) Reset() { *m = HostIP{} } func (m *HostIP) String() string { return proto.CompactTextString(m) } func (*HostIP) ProtoMessage() {} func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{17} + return fileDescriptor_ops_8d64813b9835ab08, []int{17} } func (m *HostIP) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1372,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{18} + return fileDescriptor_ops_8d64813b9835ab08, []int{18} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1420,7 +1420,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{19} + return fileDescriptor_ops_8d64813b9835ab08, []int{19} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1622,30 +1622,30 @@ type FileActionCopy struct { // dest path Dest string `protobuf:"bytes,2,opt,name=dest,proto3" json:"dest,omitempty"` // optional owner override - Owner *ChownOpt `protobuf:"bytes,4,opt,name=owner,proto3" json:"owner,omitempty"` + Owner *ChownOpt `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` // optional permission bits override - Mode int32 `protobuf:"varint,5,opt,name=mode,proto3" json:"mode,omitempty"` + Mode int32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"` // followSymlink resolves symlinks in src - FollowSymlink bool `protobuf:"varint,6,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` + FollowSymlink bool `protobuf:"varint,5,opt,name=followSymlink,proto3" json:"followSymlink,omitempty"` // dirCopyContents only copies contents if src is a directory - DirCopyContents bool `protobuf:"varint,7,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` + DirCopyContents bool `protobuf:"varint,6,opt,name=dirCopyContents,proto3" json:"dirCopyContents,omitempty"` // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead - AttemptUnpackDockerCompatibility bool `protobuf:"varint,8,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` + AttemptUnpackDockerCompatibility bool `protobuf:"varint,7,opt,name=attemptUnpackDockerCompatibility,proto3" json:"attemptUnpackDockerCompatibility,omitempty"` // createDestPath creates dest path directories if needed - CreateDestPath bool `protobuf:"varint,9,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` + CreateDestPath bool `protobuf:"varint,8,opt,name=createDestPath,proto3" json:"createDestPath,omitempty"` // allowWildcard allows filepath.Match wildcards in src path - AllowWildcard bool `protobuf:"varint,10,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` + AllowWildcard bool `protobuf:"varint,9,opt,name=allowWildcard,proto3" json:"allowWildcard,omitempty"` // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files - AllowEmptyWildcard bool `protobuf:"varint,11,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` + AllowEmptyWildcard bool `protobuf:"varint,10,opt,name=allowEmptyWildcard,proto3" json:"allowEmptyWildcard,omitempty"` // optional created time override - Timestamp int64 `protobuf:"varint,12,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,11,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{20} + return fileDescriptor_ops_8d64813b9835ab08, []int{20} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1764,7 +1764,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{21} + return fileDescriptor_ops_8d64813b9835ab08, []int{21} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1841,7 +1841,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{22} + return fileDescriptor_ops_8d64813b9835ab08, []int{22} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1914,7 +1914,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{23} + return fileDescriptor_ops_8d64813b9835ab08, []int{23} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1969,7 +1969,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{24} + return fileDescriptor_ops_8d64813b9835ab08, []int{24} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2019,7 +2019,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{25} + return fileDescriptor_ops_8d64813b9835ab08, []int{25} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2159,7 +2159,7 @@ func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } func (*NamedUserOpt) ProtoMessage() {} func (*NamedUserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_ops_7f9890b817ed58ee, []int{26} + return fileDescriptor_ops_8d64813b9835ab08, []int{26} } func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3360,7 +3360,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i += copy(dAtA[i:], m.Dest) } if m.Owner != nil { - dAtA[i] = 0x22 + dAtA[i] = 0x1a i++ i = encodeVarintOps(dAtA, i, uint64(m.Owner.Size())) n22, err := m.Owner.MarshalTo(dAtA[i:]) @@ -3370,12 +3370,12 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i += n22 } if m.Mode != 0 { - dAtA[i] = 0x28 + dAtA[i] = 0x20 i++ i = encodeVarintOps(dAtA, i, uint64(m.Mode)) } if m.FollowSymlink { - dAtA[i] = 0x30 + dAtA[i] = 0x28 i++ if m.FollowSymlink { dAtA[i] = 1 @@ -3385,7 +3385,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.DirCopyContents { - dAtA[i] = 0x38 + dAtA[i] = 0x30 i++ if m.DirCopyContents { dAtA[i] = 1 @@ -3395,7 +3395,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.AttemptUnpackDockerCompatibility { - dAtA[i] = 0x40 + dAtA[i] = 0x38 i++ if m.AttemptUnpackDockerCompatibility { dAtA[i] = 1 @@ -3405,7 +3405,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.CreateDestPath { - dAtA[i] = 0x48 + dAtA[i] = 0x40 i++ if m.CreateDestPath { dAtA[i] = 1 @@ -3415,7 +3415,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.AllowWildcard { - dAtA[i] = 0x50 + dAtA[i] = 0x48 i++ if m.AllowWildcard { dAtA[i] = 1 @@ -3425,7 +3425,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.AllowEmptyWildcard { - dAtA[i] = 0x58 + dAtA[i] = 0x50 i++ if m.AllowEmptyWildcard { dAtA[i] = 1 @@ -3435,7 +3435,7 @@ func (m *FileActionCopy) MarshalTo(dAtA []byte) (int, error) { i++ } if m.Timestamp != 0 { - dAtA[i] = 0x60 + dAtA[i] = 0x58 i++ i = encodeVarintOps(dAtA, i, uint64(m.Timestamp)) } @@ -8068,7 +8068,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } m.Dest = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) } @@ -8101,7 +8101,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: + case 4: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) } @@ -8120,7 +8120,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { break } } - case 6: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field FollowSymlink", wireType) } @@ -8140,7 +8140,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.FollowSymlink = bool(v != 0) - case 7: + case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field DirCopyContents", wireType) } @@ -8160,7 +8160,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.DirCopyContents = bool(v != 0) - case 8: + case 7: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AttemptUnpackDockerCompatibility", wireType) } @@ -8180,7 +8180,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.AttemptUnpackDockerCompatibility = bool(v != 0) - case 9: + case 8: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field CreateDestPath", wireType) } @@ -8200,7 +8200,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.CreateDestPath = bool(v != 0) - case 10: + case 9: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AllowWildcard", wireType) } @@ -8220,7 +8220,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.AllowWildcard = bool(v != 0) - case 11: + case 10: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field AllowEmptyWildcard", wireType) } @@ -8240,7 +8240,7 @@ func (m *FileActionCopy) Unmarshal(dAtA []byte) error { } } m.AllowEmptyWildcard = bool(v != 0) - case 12: + case 11: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) } @@ -9171,129 +9171,129 @@ var ( ErrIntOverflowOps = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_7f9890b817ed58ee) } - -var fileDescriptor_ops_7f9890b817ed58ee = []byte{ - // 1927 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcf, 0x6e, 0x23, 0xc7, - 0xd1, 0x17, 0x87, 0x7f, 0xa7, 0x28, 0x69, 0xf9, 0xb5, 0xd7, 0xfb, 0x31, 0xca, 0x46, 0x92, 0xc7, - 0x8e, 0x41, 0x6b, 0x77, 0x29, 0x40, 0x06, 0x6c, 0xc3, 0x87, 0x20, 0xa2, 0xc8, 0x85, 0x68, 0x5b, - 0xa2, 0xd0, 0xd4, 0xae, 0x8f, 0x8b, 0xe1, 0x4c, 0x93, 0x1a, 0x90, 0x33, 0x3d, 0xe8, 0x69, 0xae, - 0xc4, 0x4b, 0x0e, 0xfb, 0x04, 0x06, 0x02, 0xe4, 0x96, 0x43, 0x2e, 0x01, 0xf2, 0x10, 0xb9, 0xfb, - 0x68, 0x04, 0x39, 0x38, 0x39, 0x38, 0xc1, 0xee, 0x8b, 0x04, 0xd5, 0xdd, 0xc3, 0x19, 0x72, 0x15, - 0xec, 0x2e, 0x12, 0xe4, 0xc4, 0xea, 0xaa, 0x5f, 0x57, 0x57, 0x57, 0x55, 0x57, 0xd5, 0x10, 0x6c, - 0x1e, 0x27, 0xed, 0x58, 0x70, 0xc9, 0x89, 0x15, 0x8f, 0x76, 0x1e, 0x4d, 0x02, 0x79, 0x35, 0x1f, - 0xb5, 0x3d, 0x1e, 0x1e, 0x4e, 0xf8, 0x84, 0x1f, 0x2a, 0xd1, 0x68, 0x3e, 0x56, 0x2b, 0xb5, 0x50, - 0x94, 0xde, 0xe2, 0xfc, 0xc1, 0x02, 0x6b, 0x10, 0x93, 0x0f, 0xa0, 0x12, 0x44, 0xf1, 0x5c, 0x26, - 0xcd, 0xc2, 0x7e, 0xb1, 0x55, 0x3f, 0xb2, 0xdb, 0xf1, 0xa8, 0xdd, 0x47, 0x0e, 0x35, 0x02, 0xb2, - 0x0f, 0x25, 0x76, 0xc3, 0xbc, 0xa6, 0xb5, 0x5f, 0x68, 0xd5, 0x8f, 0x00, 0x01, 0xbd, 0x1b, 0xe6, - 0x0d, 0xe2, 0xd3, 0x0d, 0xaa, 0x24, 0xe4, 0x63, 0xa8, 0x24, 0x7c, 0x2e, 0x3c, 0xd6, 0x2c, 0x2a, - 0xcc, 0x26, 0x62, 0x86, 0x8a, 0xa3, 0x50, 0x46, 0x8a, 0x9a, 0xc6, 0xc1, 0x8c, 0x35, 0x4b, 0x99, - 0xa6, 0xc7, 0xc1, 0x4c, 0x63, 0x94, 0x84, 0x7c, 0x08, 0xe5, 0xd1, 0x3c, 0x98, 0xf9, 0xcd, 0xb2, - 0x82, 0xd4, 0x11, 0xd2, 0x41, 0x86, 0xc2, 0x68, 0x19, 0x69, 0x41, 0x2d, 0x9e, 0xb9, 0x72, 0xcc, - 0x45, 0xd8, 0x84, 0xec, 0xc0, 0x0b, 0xc3, 0xa3, 0x4b, 0x29, 0xf9, 0x1c, 0xea, 0x1e, 0x8f, 0x12, - 0x29, 0xdc, 0x20, 0x92, 0x49, 0xb3, 0xae, 0xc0, 0xef, 0x23, 0xf8, 0x5b, 0x2e, 0xa6, 0x4c, 0x9c, - 0x64, 0x42, 0x9a, 0x47, 0x76, 0x4a, 0x60, 0xf1, 0xd8, 0xf9, 0x5d, 0x01, 0x6a, 0xa9, 0x56, 0xe2, - 0xc0, 0xe6, 0xb1, 0xf0, 0xae, 0x02, 0xc9, 0x3c, 0x39, 0x17, 0xac, 0x59, 0xd8, 0x2f, 0xb4, 0x6c, - 0xba, 0xc2, 0x23, 0xdb, 0x60, 0x0d, 0x86, 0xca, 0x51, 0x36, 0xb5, 0x06, 0x43, 0xd2, 0x84, 0xea, - 0x53, 0x57, 0x04, 0x6e, 0x24, 0x95, 0x67, 0x6c, 0x9a, 0x2e, 0xc9, 0x7d, 0xb0, 0x07, 0xc3, 0xa7, - 0x4c, 0x24, 0x01, 0x8f, 0x94, 0x3f, 0x6c, 0x9a, 0x31, 0xc8, 0x2e, 0xc0, 0x60, 0xf8, 0x98, 0xb9, - 0xa8, 0x34, 0x69, 0x96, 0xf7, 0x8b, 0x2d, 0x9b, 0xe6, 0x38, 0xce, 0x6f, 0xa0, 0xac, 0x62, 0x44, - 0xbe, 0x82, 0x8a, 0x1f, 0x4c, 0x58, 0x22, 0xb5, 0x39, 0x9d, 0xa3, 0xef, 0x7f, 0xda, 0xdb, 0xf8, - 0xfb, 0x4f, 0x7b, 0x07, 0xb9, 0x64, 0xe0, 0x31, 0x8b, 0x3c, 0x1e, 0x49, 0x37, 0x88, 0x98, 0x48, - 0x0e, 0x27, 0xfc, 0x91, 0xde, 0xd2, 0xee, 0xaa, 0x1f, 0x6a, 0x34, 0x90, 0x4f, 0xa0, 0x1c, 0x44, - 0x3e, 0xbb, 0x51, 0xf6, 0x17, 0x3b, 0xef, 0x19, 0x55, 0xf5, 0xc1, 0x5c, 0xc6, 0x73, 0xd9, 0x47, - 0x11, 0xd5, 0x08, 0x27, 0x86, 0x8a, 0x4e, 0x01, 0x72, 0x1f, 0x4a, 0x21, 0x93, 0xae, 0x3a, 0xbe, - 0x7e, 0x54, 0x43, 0xd7, 0x9e, 0x31, 0xe9, 0x52, 0xc5, 0xc5, 0xec, 0x0a, 0xf9, 0x1c, 0x5d, 0x6f, - 0x65, 0xd9, 0x75, 0x86, 0x1c, 0x6a, 0x04, 0xe4, 0x97, 0x50, 0x8d, 0x98, 0xbc, 0xe6, 0x62, 0xaa, - 0x5c, 0xb4, 0xad, 0x63, 0x7e, 0xce, 0xe4, 0x19, 0xf7, 0x19, 0x4d, 0x65, 0xce, 0x9f, 0x0a, 0x50, - 0x42, 0xc5, 0x84, 0x40, 0xc9, 0x15, 0x13, 0x9d, 0xae, 0x36, 0x55, 0x34, 0x69, 0x40, 0x91, 0x45, - 0xcf, 0xd5, 0x19, 0x36, 0x45, 0x12, 0x39, 0xde, 0xb5, 0x6f, 0x9c, 0x8e, 0x24, 0xee, 0x9b, 0x27, - 0x4c, 0x18, 0x5f, 0x2b, 0x9a, 0x7c, 0x02, 0x76, 0x2c, 0xf8, 0xcd, 0xe2, 0x19, 0xee, 0x2e, 0xe7, - 0x32, 0x09, 0x99, 0xbd, 0xe8, 0x39, 0xad, 0xc5, 0x86, 0x22, 0x07, 0x00, 0xec, 0x46, 0x0a, 0xf7, - 0x94, 0x27, 0x32, 0x69, 0x56, 0xd4, 0x6d, 0x54, 0x02, 0x23, 0xa3, 0x7f, 0x41, 0x73, 0x52, 0xe7, - 0x2f, 0x16, 0x94, 0xd5, 0x25, 0x49, 0x0b, 0x5d, 0x1a, 0xcf, 0x75, 0x74, 0x8a, 0x1d, 0x62, 0x5c, - 0x0a, 0x2a, 0x78, 0x4b, 0x8f, 0x62, 0x20, 0x77, 0xa0, 0x96, 0xb0, 0x19, 0xf3, 0x24, 0x17, 0x26, - 0x7f, 0x96, 0x6b, 0x34, 0xdd, 0xc7, 0x10, 0xeb, 0xdb, 0x28, 0x9a, 0x3c, 0x80, 0x0a, 0x57, 0x71, - 0x51, 0x17, 0xfa, 0x37, 0xd1, 0x32, 0x10, 0x54, 0x2e, 0x98, 0xeb, 0xf3, 0x68, 0xb6, 0x50, 0xd7, - 0xac, 0xd1, 0xe5, 0x9a, 0x3c, 0x00, 0x5b, 0x45, 0xe2, 0x72, 0x11, 0xb3, 0x66, 0x45, 0x45, 0x60, - 0x6b, 0x19, 0x25, 0x64, 0xd2, 0x4c, 0x8e, 0x2f, 0xcf, 0x73, 0xbd, 0x2b, 0x36, 0x88, 0x65, 0xf3, - 0x6e, 0xe6, 0xaf, 0x13, 0xc3, 0xa3, 0x4b, 0x29, 0xaa, 0x4d, 0x98, 0x27, 0x98, 0x44, 0xe8, 0xfb, - 0x0a, 0xaa, 0xd4, 0x0e, 0x53, 0x26, 0xcd, 0xe4, 0xc4, 0x81, 0xca, 0x70, 0x78, 0x8a, 0xc8, 0x7b, - 0x59, 0x65, 0xd0, 0x1c, 0x6a, 0x24, 0x4e, 0x1f, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0xfa, 0x5d, 0xf3, - 0x00, 0xad, 0x7e, 0x97, 0x3c, 0x82, 0x6a, 0x72, 0xe5, 0x8a, 0x20, 0x9a, 0x28, 0xdf, 0x6d, 0x1f, - 0xbd, 0xb7, 0xb4, 0x6a, 0xa8, 0xf9, 0xa8, 0x29, 0xc5, 0x38, 0x1c, 0xec, 0xa5, 0x19, 0xaf, 0xe9, - 0x6a, 0x40, 0x71, 0x1e, 0xf8, 0x4a, 0xcf, 0x16, 0x45, 0x12, 0x39, 0x93, 0x40, 0xe7, 0xd2, 0x16, - 0x45, 0x12, 0x03, 0x12, 0x72, 0x5f, 0xd7, 0xb1, 0x2d, 0xaa, 0x68, 0xf4, 0x31, 0x8f, 0x65, 0xc0, - 0x23, 0x77, 0x96, 0xfa, 0x38, 0x5d, 0x3b, 0xb3, 0xf4, 0x7e, 0xff, 0x93, 0xd3, 0x7e, 0x5b, 0x80, - 0x5a, 0x5a, 0x7c, 0xb1, 0x92, 0x04, 0x3e, 0x8b, 0x64, 0x30, 0x0e, 0x98, 0x30, 0x07, 0xe7, 0x38, - 0xe4, 0x11, 0x94, 0x5d, 0x29, 0x45, 0xfa, 0x40, 0xff, 0x3f, 0x5f, 0xb9, 0xdb, 0xc7, 0x28, 0xe9, - 0x45, 0x52, 0x2c, 0xa8, 0x46, 0xed, 0x7c, 0x01, 0x90, 0x31, 0xd1, 0xd6, 0x29, 0x5b, 0x18, 0xad, - 0x48, 0x92, 0xbb, 0x50, 0x7e, 0xee, 0xce, 0xe6, 0xcc, 0xe4, 0xb0, 0x5e, 0x7c, 0x69, 0x7d, 0x51, - 0x70, 0xfe, 0x6c, 0x41, 0xd5, 0x54, 0x72, 0xf2, 0x10, 0xaa, 0xaa, 0x92, 0x1b, 0x8b, 0x6e, 0x7f, - 0x18, 0x29, 0x84, 0x1c, 0x2e, 0x5b, 0x54, 0xce, 0x46, 0xa3, 0x4a, 0xb7, 0x2a, 0x63, 0x63, 0xd6, - 0xb0, 0x8a, 0x3e, 0x1b, 0x9b, 0x5e, 0xb4, 0x8d, 0xe8, 0x2e, 0x1b, 0x07, 0x51, 0x80, 0xfe, 0xa1, - 0x28, 0x22, 0x0f, 0xd3, 0x5b, 0x97, 0x94, 0xc6, 0x7b, 0x79, 0x8d, 0xaf, 0x5f, 0xba, 0x0f, 0xf5, - 0xdc, 0x31, 0xb7, 0xdc, 0xfa, 0xa3, 0xfc, 0xad, 0xcd, 0x91, 0x4a, 0x9d, 0x6e, 0xa4, 0x99, 0x17, - 0xfe, 0x03, 0xff, 0x7d, 0x06, 0x90, 0xa9, 0x7c, 0xfb, 0xc2, 0xe2, 0xbc, 0x28, 0x02, 0x0c, 0x62, - 0x2c, 0x9d, 0xbe, 0xab, 0x2a, 0xf2, 0x66, 0x30, 0x89, 0xb8, 0x60, 0xcf, 0xd4, 0x53, 0x55, 0xfb, - 0x6b, 0xb4, 0xae, 0x79, 0xea, 0xc5, 0x90, 0x63, 0xa8, 0xfb, 0x2c, 0xf1, 0x44, 0xa0, 0x12, 0xca, - 0x38, 0x7d, 0x0f, 0xef, 0x94, 0xe9, 0x69, 0x77, 0x33, 0x84, 0xf6, 0x55, 0x7e, 0x0f, 0x39, 0x82, - 0x4d, 0x76, 0x13, 0x73, 0x21, 0xcd, 0x29, 0xba, 0xe1, 0xdf, 0xd1, 0xa3, 0x03, 0xf2, 0xd5, 0x49, - 0xb4, 0xce, 0xb2, 0x05, 0x71, 0xa1, 0xe4, 0xb9, 0xb1, 0xee, 0x76, 0xf5, 0xa3, 0xe6, 0xda, 0x79, - 0x27, 0x6e, 0xac, 0x9d, 0xd6, 0xf9, 0x14, 0xef, 0xfa, 0xe2, 0x1f, 0x7b, 0x0f, 0x72, 0x2d, 0x2e, - 0xe4, 0xa3, 0xc5, 0xa1, 0xca, 0x97, 0x69, 0x20, 0x0f, 0xe7, 0x32, 0x98, 0x1d, 0xba, 0x71, 0x80, - 0xea, 0x70, 0x63, 0xbf, 0x4b, 0x95, 0xea, 0x9d, 0x5f, 0x41, 0x63, 0xdd, 0xee, 0x77, 0x89, 0xc1, - 0xce, 0xe7, 0x60, 0x2f, 0xed, 0x78, 0xd3, 0xc6, 0x5a, 0x3e, 0x78, 0x1f, 0x42, 0x3d, 0x77, 0x6f, - 0x04, 0x3e, 0x55, 0x40, 0xed, 0x7d, 0xbd, 0x70, 0x5e, 0xe0, 0xb4, 0x91, 0xf6, 0x9b, 0x5f, 0x00, - 0x5c, 0x49, 0x19, 0x3f, 0x53, 0x0d, 0xc8, 0x1c, 0x62, 0x23, 0x47, 0x21, 0xc8, 0x1e, 0xd4, 0x71, - 0x91, 0x18, 0xb9, 0xb6, 0x54, 0xed, 0x48, 0x34, 0xe0, 0xe7, 0x60, 0x8f, 0x97, 0xdb, 0x75, 0xe3, - 0xa8, 0x8d, 0xd3, 0xdd, 0x3f, 0x83, 0x5a, 0xc4, 0x8d, 0x4c, 0xf7, 0xc3, 0x6a, 0xc4, 0x95, 0xc8, - 0x79, 0x00, 0xff, 0xf7, 0xda, 0x68, 0x44, 0xee, 0x41, 0x65, 0x1c, 0xcc, 0xa4, 0x7a, 0xae, 0xd8, - 0x62, 0xcd, 0xca, 0xf9, 0x5b, 0x01, 0x20, 0x7b, 0x5a, 0xe8, 0x11, 0x7c, 0x77, 0x88, 0xd9, 0xd4, - 0xef, 0x6c, 0x06, 0xb5, 0xd0, 0x44, 0xd0, 0xe4, 0xd1, 0xfd, 0xd5, 0xe7, 0xd8, 0x4e, 0x03, 0xac, - 0x63, 0x7b, 0x64, 0x62, 0xfb, 0x2e, 0xe3, 0xcb, 0xf2, 0x84, 0x9d, 0xaf, 0x61, 0x6b, 0x45, 0xdd, - 0x5b, 0xbe, 0xd4, 0x2c, 0xcb, 0xf2, 0x21, 0x7b, 0x08, 0x15, 0xdd, 0xda, 0xb1, 0xfe, 0x22, 0x65, - 0xd4, 0x28, 0x5a, 0xd5, 0xf1, 0x8b, 0x74, 0xd0, 0xeb, 0x5f, 0x38, 0x47, 0x50, 0xd1, 0x93, 0x2c, - 0x69, 0x41, 0xd5, 0xf5, 0xf0, 0x6a, 0x69, 0xb9, 0xda, 0x4e, 0xc7, 0xdc, 0x63, 0xc5, 0xa6, 0xa9, - 0xd8, 0xf9, 0xab, 0x05, 0x90, 0xf1, 0xdf, 0x61, 0x56, 0xf8, 0x12, 0xb6, 0x13, 0xe6, 0xf1, 0xc8, - 0x77, 0xc5, 0x42, 0x49, 0xcd, 0xc4, 0x76, 0xdb, 0x96, 0x35, 0x64, 0x6e, 0x6e, 0x28, 0xbe, 0x79, - 0x6e, 0x68, 0x41, 0xc9, 0xe3, 0xf1, 0xc2, 0x3c, 0x5f, 0xb2, 0x7a, 0x91, 0x13, 0x1e, 0x2f, 0x70, - 0x6e, 0x47, 0x04, 0x69, 0x43, 0x25, 0x9c, 0xaa, 0xd9, 0x5e, 0x8f, 0x51, 0x77, 0x57, 0xb1, 0x67, - 0x53, 0xa4, 0xf1, 0x4b, 0x40, 0xa3, 0xc8, 0x03, 0x28, 0x87, 0x53, 0x3f, 0x10, 0x6a, 0xe2, 0xa8, - 0xeb, 0x7e, 0x9d, 0x87, 0x77, 0x03, 0x81, 0xf3, 0xbe, 0xc2, 0x10, 0x07, 0x2c, 0x11, 0x36, 0xab, - 0x0a, 0xd9, 0x58, 0xf3, 0x66, 0x78, 0xba, 0x41, 0x2d, 0x11, 0x76, 0x6a, 0x50, 0xd1, 0x7e, 0x75, - 0xfe, 0x58, 0x84, 0xed, 0x55, 0x2b, 0x31, 0x0f, 0x12, 0xe1, 0xa5, 0x79, 0x90, 0x08, 0x6f, 0x39, - 0x52, 0x59, 0xb9, 0x91, 0xca, 0x81, 0x32, 0xbf, 0x8e, 0xcc, 0x88, 0x98, 0x4e, 0x36, 0x57, 0xfc, - 0x3a, 0xc2, 0xe1, 0x41, 0x8b, 0x96, 0xbd, 0x18, 0x6f, 0x59, 0x36, 0xbd, 0xf8, 0x23, 0xd8, 0x1a, - 0xf3, 0xd9, 0x8c, 0x5f, 0x0f, 0x17, 0xe1, 0x2c, 0x88, 0xa6, 0xea, 0x4e, 0x35, 0xba, 0xca, 0x24, - 0x2d, 0xb8, 0xe3, 0x07, 0x02, 0xcd, 0x39, 0xe1, 0x91, 0x64, 0x38, 0x13, 0x57, 0x15, 0x6e, 0x9d, - 0x4d, 0xbe, 0x82, 0x7d, 0x57, 0x4a, 0x16, 0xc6, 0xf2, 0x49, 0x14, 0xbb, 0xde, 0xb4, 0xcb, 0x3d, - 0xf5, 0x1e, 0xc3, 0xd8, 0x95, 0xc1, 0x28, 0x98, 0x05, 0x72, 0xd1, 0xac, 0xa9, 0xad, 0x6f, 0xc4, - 0x91, 0x8f, 0x61, 0xdb, 0x13, 0xcc, 0x95, 0xac, 0xcb, 0x12, 0x79, 0xe1, 0xca, 0xab, 0xa6, 0xad, - 0x76, 0xae, 0x71, 0xf1, 0x0e, 0x2e, 0x5a, 0xfb, 0x6d, 0x30, 0xf3, 0x3d, 0x57, 0xf8, 0xea, 0xbb, - 0xaa, 0x46, 0x57, 0x99, 0xa4, 0x0d, 0x44, 0x31, 0x7a, 0x61, 0x2c, 0x17, 0x4b, 0x68, 0x5d, 0x41, - 0x6f, 0x91, 0xe0, 0x47, 0x8e, 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0x9b, 0x9b, 0x98, 0x6f, 0x34, - 0x63, 0x38, 0xdf, 0x15, 0xa0, 0xb1, 0x9e, 0x22, 0xe8, 0xe0, 0x18, 0xcd, 0x34, 0x8f, 0x0d, 0xe9, - 0xa5, 0xd3, 0xad, 0x9c, 0xd3, 0x31, 0x80, 0x58, 0x55, 0x30, 0x8b, 0x37, 0xa9, 0xa2, 0xdf, 0x2a, - 0x80, 0x2b, 0x26, 0x95, 0xd7, 0x4d, 0xfa, 0x7d, 0x01, 0xee, 0xac, 0xa5, 0xe1, 0x5b, 0x5b, 0xb4, - 0x0f, 0xf5, 0xd0, 0x9d, 0xb2, 0x0b, 0x57, 0xa8, 0xe0, 0x16, 0x75, 0x63, 0xcd, 0xb1, 0xfe, 0x0b, - 0xf6, 0x45, 0xb0, 0x99, 0xcf, 0xfd, 0x5b, 0x6d, 0x4b, 0x43, 0x79, 0xce, 0xe5, 0x63, 0x3e, 0x8f, - 0x7c, 0xd3, 0x8d, 0x56, 0x99, 0xaf, 0x07, 0xbc, 0x78, 0x4b, 0xc0, 0x9d, 0x73, 0xa8, 0xa5, 0x06, - 0x92, 0x3d, 0xf3, 0x01, 0x55, 0xc8, 0xbe, 0xcc, 0x9f, 0x24, 0x4c, 0xa0, 0xed, 0xfa, 0x6b, 0xea, - 0x03, 0x28, 0x4f, 0x04, 0x9f, 0xc7, 0xa6, 0xb6, 0xae, 0x20, 0xb4, 0xc4, 0x19, 0x42, 0xd5, 0x70, - 0xc8, 0x01, 0x54, 0x46, 0x8b, 0x73, 0x37, 0x64, 0x46, 0xa1, 0x7a, 0xd8, 0xb8, 0xf6, 0x0d, 0x02, - 0xab, 0x85, 0x46, 0x90, 0xbb, 0x50, 0x1a, 0x2d, 0xfa, 0x5d, 0x3d, 0x26, 0x63, 0xcd, 0xc1, 0x55, - 0xa7, 0xa2, 0x0d, 0x72, 0xbe, 0x81, 0xcd, 0xfc, 0x3e, 0x74, 0x4a, 0x94, 0xea, 0xb5, 0xa9, 0xa2, - 0xb3, 0xe2, 0x6a, 0xbd, 0xa1, 0xb8, 0x1e, 0xb4, 0xa0, 0x6a, 0x3e, 0x3e, 0x89, 0x0d, 0xe5, 0x27, - 0xe7, 0xc3, 0xde, 0x65, 0x63, 0x83, 0xd4, 0xa0, 0x74, 0x3a, 0x18, 0x5e, 0x36, 0x0a, 0x48, 0x9d, - 0x0f, 0xce, 0x7b, 0x0d, 0xeb, 0xe0, 0xd7, 0x60, 0x2f, 0x3f, 0x92, 0x90, 0xdd, 0xe9, 0x9f, 0x77, - 0x1b, 0x1b, 0x04, 0xa0, 0x32, 0xec, 0x9d, 0xd0, 0x1e, 0x82, 0xab, 0x50, 0x1c, 0x0e, 0x4f, 0x1b, - 0x16, 0xaa, 0x3a, 0x39, 0x3e, 0x39, 0xed, 0x35, 0x8a, 0x48, 0x5e, 0x9e, 0x5d, 0x3c, 0x1e, 0x36, - 0x4a, 0x07, 0x9f, 0xc1, 0x9d, 0xb5, 0x8f, 0x14, 0xb5, 0xfb, 0xf4, 0x98, 0xf6, 0x50, 0x53, 0x1d, - 0xaa, 0x17, 0xb4, 0xff, 0xf4, 0xf8, 0xb2, 0xd7, 0x28, 0xa0, 0xe0, 0x9b, 0xc1, 0xc9, 0xd7, 0xbd, - 0x6e, 0xc3, 0xea, 0xdc, 0xff, 0xfe, 0xe5, 0x6e, 0xe1, 0x87, 0x97, 0xbb, 0x85, 0x1f, 0x5f, 0xee, - 0x16, 0xfe, 0xf9, 0x72, 0xb7, 0xf0, 0xdd, 0xab, 0xdd, 0x8d, 0x1f, 0x5e, 0xed, 0x6e, 0xfc, 0xf8, - 0x6a, 0x77, 0x63, 0x54, 0x51, 0x7f, 0xf0, 0x7c, 0xfa, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd8, - 0x15, 0x80, 0x49, 0x20, 0x12, 0x00, 0x00, +func init() { proto.RegisterFile("ops.proto", fileDescriptor_ops_8d64813b9835ab08) } + +var fileDescriptor_ops_8d64813b9835ab08 = []byte{ + // 1924 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x5f, 0x6f, 0x1b, 0xc7, + 0x11, 0x17, 0x8f, 0x7f, 0x6f, 0x28, 0xc9, 0xec, 0xc6, 0x49, 0x59, 0xd5, 0x95, 0x94, 0x4b, 0x1a, + 0x30, 0xb2, 0x4d, 0x01, 0x0a, 0x90, 0x04, 0x79, 0x28, 0x2a, 0xfe, 0x31, 0xc4, 0x24, 0x16, 0x85, + 0xa5, 0xed, 0x3c, 0x1a, 0xc7, 0xbb, 0x25, 0x75, 0x20, 0xef, 0xf6, 0xb0, 0xb7, 0xb4, 0xc4, 0x97, + 0x3e, 0xf8, 0x13, 0x04, 0x28, 0xd0, 0xb7, 0x3e, 0xf4, 0xa5, 0x40, 0x3f, 0x44, 0xdf, 0xf3, 0x18, + 0x14, 0x7d, 0x48, 0xfb, 0x90, 0x16, 0xf6, 0x17, 0x29, 0x66, 0x77, 0x8f, 0x77, 0xa4, 0x15, 0xd8, + 0x46, 0x8b, 0x3e, 0x71, 0x76, 0xe6, 0xb7, 0xb3, 0xb3, 0x33, 0xb3, 0x33, 0x73, 0x04, 0x9b, 0xc7, + 0x49, 0x3b, 0x16, 0x5c, 0x72, 0x62, 0xc5, 0xe3, 0xbd, 0xfb, 0xd3, 0x40, 0x5e, 0x2e, 0xc6, 0x6d, + 0x8f, 0x87, 0xc7, 0x53, 0x3e, 0xe5, 0xc7, 0x4a, 0x34, 0x5e, 0x4c, 0xd4, 0x4a, 0x2d, 0x14, 0xa5, + 0xb7, 0x38, 0x7f, 0xb2, 0xc0, 0x1a, 0xc6, 0xe4, 0x7d, 0xa8, 0x04, 0x51, 0xbc, 0x90, 0x49, 0xb3, + 0x70, 0x58, 0x6c, 0xd5, 0x4f, 0xec, 0x76, 0x3c, 0x6e, 0x0f, 0x90, 0x43, 0x8d, 0x80, 0x1c, 0x42, + 0x89, 0x5d, 0x33, 0xaf, 0x69, 0x1d, 0x16, 0x5a, 0xf5, 0x13, 0x40, 0x40, 0xff, 0x9a, 0x79, 0xc3, + 0xf8, 0x6c, 0x8b, 0x2a, 0x09, 0xf9, 0x08, 0x2a, 0x09, 0x5f, 0x08, 0x8f, 0x35, 0x8b, 0x0a, 0xb3, + 0x8d, 0x98, 0x91, 0xe2, 0x28, 0x94, 0x91, 0xa2, 0xa6, 0x49, 0x30, 0x67, 0xcd, 0x52, 0xa6, 0xe9, + 0x41, 0x30, 0xd7, 0x18, 0x25, 0x21, 0x1f, 0x40, 0x79, 0xbc, 0x08, 0xe6, 0x7e, 0xb3, 0xac, 0x20, + 0x75, 0x84, 0x74, 0x90, 0xa1, 0x30, 0x5a, 0x46, 0x5a, 0x50, 0x8b, 0xe7, 0xae, 0x9c, 0x70, 0x11, + 0x36, 0x21, 0x3b, 0xf0, 0xc2, 0xf0, 0xe8, 0x4a, 0x4a, 0x3e, 0x83, 0xba, 0xc7, 0xa3, 0x44, 0x0a, + 0x37, 0x88, 0x64, 0xd2, 0xac, 0x2b, 0xf0, 0xbb, 0x08, 0xfe, 0x86, 0x8b, 0x19, 0x13, 0xdd, 0x4c, + 0x48, 0xf3, 0xc8, 0x4e, 0x09, 0x2c, 0x1e, 0x3b, 0x7f, 0x28, 0x40, 0x2d, 0xd5, 0x4a, 0x1c, 0xd8, + 0x3e, 0x15, 0xde, 0x65, 0x20, 0x99, 0x27, 0x17, 0x82, 0x35, 0x0b, 0x87, 0x85, 0x96, 0x4d, 0xd7, + 0x78, 0x64, 0x17, 0xac, 0xe1, 0x48, 0x39, 0xca, 0xa6, 0xd6, 0x70, 0x44, 0x9a, 0x50, 0x7d, 0xe2, + 0x8a, 0xc0, 0x8d, 0xa4, 0xf2, 0x8c, 0x4d, 0xd3, 0x25, 0xb9, 0x03, 0xf6, 0x70, 0xf4, 0x84, 0x89, + 0x24, 0xe0, 0x91, 0xf2, 0x87, 0x4d, 0x33, 0x06, 0xd9, 0x07, 0x18, 0x8e, 0x1e, 0x30, 0x17, 0x95, + 0x26, 0xcd, 0xf2, 0x61, 0xb1, 0x65, 0xd3, 0x1c, 0xc7, 0xf9, 0x1d, 0x94, 0x55, 0x8c, 0xc8, 0x97, + 0x50, 0xf1, 0x83, 0x29, 0x4b, 0xa4, 0x36, 0xa7, 0x73, 0xf2, 0xdd, 0x8f, 0x07, 0x5b, 0xff, 0xfc, + 0xf1, 0xe0, 0x28, 0x97, 0x0c, 0x3c, 0x66, 0x91, 0xc7, 0x23, 0xe9, 0x06, 0x11, 0x13, 0xc9, 0xf1, + 0x94, 0xdf, 0xd7, 0x5b, 0xda, 0x3d, 0xf5, 0x43, 0x8d, 0x06, 0xf2, 0x31, 0x94, 0x83, 0xc8, 0x67, + 0xd7, 0xca, 0xfe, 0x62, 0xe7, 0x1d, 0xa3, 0xaa, 0x3e, 0x5c, 0xc8, 0x78, 0x21, 0x07, 0x28, 0xa2, + 0x1a, 0xe1, 0xc4, 0x50, 0xd1, 0x29, 0x40, 0xee, 0x40, 0x29, 0x64, 0xd2, 0x55, 0xc7, 0xd7, 0x4f, + 0x6a, 0xe8, 0xda, 0x87, 0x4c, 0xba, 0x54, 0x71, 0x31, 0xbb, 0x42, 0xbe, 0x40, 0xd7, 0x5b, 0x59, + 0x76, 0x3d, 0x44, 0x0e, 0x35, 0x02, 0xf2, 0x6b, 0xa8, 0x46, 0x4c, 0x5e, 0x71, 0x31, 0x53, 0x2e, + 0xda, 0xd5, 0x31, 0x3f, 0x67, 0xf2, 0x21, 0xf7, 0x19, 0x4d, 0x65, 0xce, 0x5f, 0x0a, 0x50, 0x42, + 0xc5, 0x84, 0x40, 0xc9, 0x15, 0x53, 0x9d, 0xae, 0x36, 0x55, 0x34, 0x69, 0x40, 0x91, 0x45, 0xcf, + 0xd4, 0x19, 0x36, 0x45, 0x12, 0x39, 0xde, 0x95, 0x6f, 0x9c, 0x8e, 0x24, 0xee, 0x5b, 0x24, 0x4c, + 0x18, 0x5f, 0x2b, 0x9a, 0x7c, 0x0c, 0x76, 0x2c, 0xf8, 0xf5, 0xf2, 0x29, 0xee, 0x2e, 0xe7, 0x32, + 0x09, 0x99, 0xfd, 0xe8, 0x19, 0xad, 0xc5, 0x86, 0x22, 0x47, 0x00, 0xec, 0x5a, 0x0a, 0xf7, 0x8c, + 0x27, 0x32, 0x69, 0x56, 0xd4, 0x6d, 0x54, 0x02, 0x23, 0x63, 0x70, 0x41, 0x73, 0x52, 0xe7, 0x6f, + 0x16, 0x94, 0xd5, 0x25, 0x49, 0x0b, 0x5d, 0x1a, 0x2f, 0x74, 0x74, 0x8a, 0x1d, 0x62, 0x5c, 0x0a, + 0x2a, 0x78, 0x2b, 0x8f, 0x62, 0x20, 0xf7, 0xa0, 0x96, 0xb0, 0x39, 0xf3, 0x24, 0x17, 0x26, 0x7f, + 0x56, 0x6b, 0x34, 0xdd, 0xc7, 0x10, 0xeb, 0xdb, 0x28, 0x9a, 0xdc, 0x85, 0x0a, 0x57, 0x71, 0x51, + 0x17, 0xfa, 0x89, 0x68, 0x19, 0x08, 0x2a, 0x17, 0xcc, 0xf5, 0x79, 0x34, 0x5f, 0xaa, 0x6b, 0xd6, + 0xe8, 0x6a, 0x4d, 0xee, 0x82, 0xad, 0x22, 0xf1, 0x68, 0x19, 0xb3, 0x66, 0x45, 0x45, 0x60, 0x67, + 0x15, 0x25, 0x64, 0xd2, 0x4c, 0x8e, 0x2f, 0xcf, 0x73, 0xbd, 0x4b, 0x36, 0x8c, 0x65, 0xf3, 0x76, + 0xe6, 0xaf, 0xae, 0xe1, 0xd1, 0x95, 0x14, 0xd5, 0x26, 0xcc, 0x13, 0x4c, 0x22, 0xf4, 0x5d, 0x05, + 0x55, 0x6a, 0x47, 0x29, 0x93, 0x66, 0x72, 0xe2, 0x40, 0x65, 0x34, 0x3a, 0x43, 0xe4, 0x7b, 0x59, + 0x65, 0xd0, 0x1c, 0x6a, 0x24, 0xce, 0x00, 0x6a, 0xe9, 0x31, 0xf8, 0xcc, 0x06, 0x3d, 0xf3, 0x00, + 0xad, 0x41, 0x8f, 0xdc, 0x87, 0x6a, 0x72, 0xe9, 0x8a, 0x20, 0x9a, 0x2a, 0xdf, 0xed, 0x9e, 0xbc, + 0xb3, 0xb2, 0x6a, 0xa4, 0xf9, 0xa8, 0x29, 0xc5, 0x38, 0x1c, 0xec, 0x95, 0x19, 0xaf, 0xe8, 0x6a, + 0x40, 0x71, 0x11, 0xf8, 0x4a, 0xcf, 0x0e, 0x45, 0x12, 0x39, 0xd3, 0x40, 0xe7, 0xd2, 0x0e, 0x45, + 0x12, 0x03, 0x12, 0x72, 0x5f, 0xd7, 0xb1, 0x1d, 0xaa, 0x68, 0xf4, 0x31, 0x8f, 0x65, 0xc0, 0x23, + 0x77, 0x9e, 0xfa, 0x38, 0x5d, 0x3b, 0xf3, 0xf4, 0x7e, 0xff, 0x97, 0xd3, 0x7e, 0x5f, 0x80, 0x5a, + 0x5a, 0x7c, 0xb1, 0x92, 0x04, 0x3e, 0x8b, 0x64, 0x30, 0x09, 0x98, 0x30, 0x07, 0xe7, 0x38, 0xe4, + 0x3e, 0x94, 0x5d, 0x29, 0x45, 0xfa, 0x40, 0x7f, 0x9e, 0xaf, 0xdc, 0xed, 0x53, 0x94, 0xf4, 0x23, + 0x29, 0x96, 0x54, 0xa3, 0xf6, 0x3e, 0x07, 0xc8, 0x98, 0x68, 0xeb, 0x8c, 0x2d, 0x8d, 0x56, 0x24, + 0xc9, 0x6d, 0x28, 0x3f, 0x73, 0xe7, 0x0b, 0x66, 0x72, 0x58, 0x2f, 0xbe, 0xb0, 0x3e, 0x2f, 0x38, + 0x7f, 0xb5, 0xa0, 0x6a, 0x2a, 0x39, 0xb9, 0x07, 0x55, 0x55, 0xc9, 0x8d, 0x45, 0x37, 0x3f, 0x8c, + 0x14, 0x42, 0x8e, 0x57, 0x2d, 0x2a, 0x67, 0xa3, 0x51, 0xa5, 0x5b, 0x95, 0xb1, 0x31, 0x6b, 0x58, + 0x45, 0x9f, 0x4d, 0x4c, 0x2f, 0xda, 0x45, 0x74, 0x8f, 0x4d, 0x82, 0x28, 0x40, 0xff, 0x50, 0x14, + 0x91, 0x7b, 0xe9, 0xad, 0x4b, 0x4a, 0xe3, 0x7b, 0x79, 0x8d, 0xaf, 0x5e, 0x7a, 0x00, 0xf5, 0xdc, + 0x31, 0x37, 0xdc, 0xfa, 0xc3, 0xfc, 0xad, 0xcd, 0x91, 0x4a, 0x9d, 0x6e, 0xa4, 0x99, 0x17, 0xfe, + 0x0b, 0xff, 0x7d, 0x0a, 0x90, 0xa9, 0x7c, 0xf3, 0xc2, 0xe2, 0x3c, 0x2f, 0x02, 0x0c, 0x63, 0x2c, + 0x9d, 0xbe, 0xab, 0x2a, 0xf2, 0x76, 0x30, 0x8d, 0xb8, 0x60, 0x4f, 0xd5, 0x53, 0x55, 0xfb, 0x6b, + 0xb4, 0xae, 0x79, 0xea, 0xc5, 0x90, 0x53, 0xa8, 0xfb, 0x2c, 0xf1, 0x44, 0xa0, 0x12, 0xca, 0x38, + 0xfd, 0x00, 0xef, 0x94, 0xe9, 0x69, 0xf7, 0x32, 0x84, 0xf6, 0x55, 0x7e, 0x0f, 0x39, 0x81, 0x6d, + 0x76, 0x1d, 0x73, 0x21, 0xcd, 0x29, 0xba, 0xe1, 0xdf, 0xd2, 0xa3, 0x03, 0xf2, 0xd5, 0x49, 0xb4, + 0xce, 0xb2, 0x05, 0x71, 0xa1, 0xe4, 0xb9, 0xb1, 0xee, 0x76, 0xf5, 0x93, 0xe6, 0xc6, 0x79, 0x5d, + 0x37, 0xd6, 0x4e, 0xeb, 0x7c, 0x82, 0x77, 0x7d, 0xfe, 0xaf, 0x83, 0xbb, 0xb9, 0x16, 0x17, 0xf2, + 0xf1, 0xf2, 0x58, 0xe5, 0xcb, 0x2c, 0x90, 0xc7, 0x0b, 0x19, 0xcc, 0x8f, 0xdd, 0x38, 0x40, 0x75, + 0xb8, 0x71, 0xd0, 0xa3, 0x4a, 0xf5, 0xde, 0x6f, 0xa0, 0xb1, 0x69, 0xf7, 0xdb, 0xc4, 0x60, 0xef, + 0x33, 0xb0, 0x57, 0x76, 0xbc, 0x6e, 0x63, 0x2d, 0x1f, 0xbc, 0x0f, 0xa0, 0x9e, 0xbb, 0x37, 0x02, + 0x9f, 0x28, 0xa0, 0xf6, 0xbe, 0x5e, 0x38, 0xcf, 0x71, 0xda, 0x48, 0xfb, 0xcd, 0xaf, 0x00, 0x2e, + 0xa5, 0x8c, 0x9f, 0xaa, 0x06, 0x64, 0x0e, 0xb1, 0x91, 0xa3, 0x10, 0xe4, 0x00, 0xea, 0xb8, 0x48, + 0x8c, 0x5c, 0x5b, 0xaa, 0x76, 0x24, 0x1a, 0xf0, 0x4b, 0xb0, 0x27, 0xab, 0xed, 0xba, 0x71, 0xd4, + 0x26, 0xe9, 0xee, 0x5f, 0x40, 0x2d, 0xe2, 0x46, 0xa6, 0xfb, 0x61, 0x35, 0xe2, 0x4a, 0xe4, 0xdc, + 0x85, 0x9f, 0xbd, 0x32, 0x1a, 0x91, 0xf7, 0xa0, 0x32, 0x09, 0xe6, 0x52, 0x3d, 0x57, 0x6c, 0xb1, + 0x66, 0xe5, 0xfc, 0xa3, 0x00, 0x90, 0x3d, 0x2d, 0xf4, 0x08, 0xbe, 0x3b, 0xc4, 0x6c, 0xeb, 0x77, + 0x36, 0x87, 0x5a, 0x68, 0x22, 0x68, 0xf2, 0xe8, 0xce, 0xfa, 0x73, 0x6c, 0xa7, 0x01, 0xd6, 0xb1, + 0x3d, 0x31, 0xb1, 0x7d, 0x9b, 0xf1, 0x65, 0x75, 0xc2, 0xde, 0x57, 0xb0, 0xb3, 0xa6, 0xee, 0x0d, + 0x5f, 0x6a, 0x96, 0x65, 0xf9, 0x90, 0xdd, 0x83, 0x8a, 0x6e, 0xed, 0x58, 0x7f, 0x91, 0x32, 0x6a, + 0x14, 0xad, 0xea, 0xf8, 0x45, 0x3a, 0xe8, 0x0d, 0x2e, 0x9c, 0x13, 0xa8, 0xe8, 0x49, 0x96, 0xb4, + 0xa0, 0xea, 0x7a, 0x78, 0xb5, 0xb4, 0x5c, 0xed, 0xa6, 0x63, 0xee, 0xa9, 0x62, 0xd3, 0x54, 0xec, + 0xfc, 0xdd, 0x02, 0xc8, 0xf8, 0x6f, 0x31, 0x2b, 0x7c, 0x01, 0xbb, 0x09, 0xf3, 0x78, 0xe4, 0xbb, + 0x62, 0xa9, 0xa4, 0x66, 0x62, 0xbb, 0x69, 0xcb, 0x06, 0x32, 0x37, 0x37, 0x14, 0x5f, 0x3f, 0x37, + 0xb4, 0xa0, 0xe4, 0xf1, 0x78, 0x69, 0x9e, 0x2f, 0x59, 0xbf, 0x48, 0x97, 0xc7, 0x4b, 0x9c, 0xdb, + 0x11, 0x41, 0xda, 0x50, 0x09, 0x67, 0x6a, 0xb6, 0xd7, 0x63, 0xd4, 0xed, 0x75, 0xec, 0xc3, 0x19, + 0xd2, 0xf8, 0x25, 0xa0, 0x51, 0xe4, 0x2e, 0x94, 0xc3, 0x99, 0x1f, 0x08, 0x35, 0x71, 0xd4, 0x75, + 0xbf, 0xce, 0xc3, 0x7b, 0x81, 0xc0, 0x79, 0x5f, 0x61, 0x88, 0x03, 0x96, 0x08, 0x9b, 0x55, 0x85, + 0x6c, 0x6c, 0x78, 0x33, 0x3c, 0xdb, 0xa2, 0x96, 0x08, 0x3b, 0x35, 0xa8, 0x68, 0xbf, 0x3a, 0x7f, + 0x2e, 0xc2, 0xee, 0xba, 0x95, 0x98, 0x07, 0x89, 0xf0, 0xd2, 0x3c, 0x48, 0x84, 0xb7, 0x1a, 0xa9, + 0xac, 0xdc, 0x48, 0xe5, 0x40, 0x99, 0x5f, 0x45, 0x4c, 0xe4, 0x3f, 0x62, 0xba, 0x97, 0xfc, 0x2a, + 0xc2, 0xe1, 0x41, 0x8b, 0xd6, 0x7a, 0x71, 0xd9, 0xf4, 0xe2, 0x0f, 0x61, 0x67, 0xc2, 0xe7, 0x73, + 0x7e, 0x35, 0x5a, 0x86, 0xf3, 0x20, 0x9a, 0x99, 0x86, 0xbc, 0xce, 0x24, 0x2d, 0xb8, 0xe5, 0x07, + 0x02, 0xcd, 0xe9, 0xf2, 0x48, 0xb2, 0x48, 0x4d, 0x91, 0x88, 0xdb, 0x64, 0x93, 0x2f, 0xe1, 0xd0, + 0x95, 0x92, 0x85, 0xb1, 0x7c, 0x1c, 0xc5, 0xae, 0x37, 0xeb, 0x71, 0x4f, 0xbd, 0xc7, 0x30, 0x76, + 0x65, 0x30, 0x0e, 0xe6, 0x81, 0x5c, 0x2a, 0x67, 0xd4, 0xe8, 0x6b, 0x71, 0xe4, 0x23, 0xd8, 0xf5, + 0x04, 0x73, 0x25, 0xeb, 0xb1, 0x44, 0x5e, 0xb8, 0xf2, 0xb2, 0x59, 0x53, 0x3b, 0x37, 0xb8, 0x78, + 0x07, 0x17, 0xad, 0xfd, 0x26, 0x98, 0xfb, 0x9e, 0x2b, 0xfc, 0xa6, 0xad, 0xef, 0xb0, 0xc6, 0x24, + 0x6d, 0x20, 0x8a, 0xd1, 0x0f, 0x63, 0xb9, 0x5c, 0x41, 0x41, 0x41, 0x6f, 0x90, 0xe0, 0x47, 0x8e, + 0x0c, 0x42, 0x96, 0x48, 0x37, 0x8c, 0xd5, 0xc7, 0x57, 0x91, 0x66, 0x0c, 0xe7, 0xdb, 0x02, 0x34, + 0x36, 0x53, 0x04, 0x1d, 0x1c, 0xa3, 0x99, 0xe6, 0xb1, 0x21, 0xbd, 0x72, 0xba, 0x95, 0x73, 0x3a, + 0x06, 0x10, 0xab, 0x0a, 0xc6, 0x6a, 0x9b, 0x2a, 0x3a, 0x0b, 0x60, 0xe9, 0xa7, 0x03, 0xb8, 0x66, + 0x52, 0x79, 0xd3, 0xa4, 0x3f, 0x16, 0xe0, 0xd6, 0x46, 0x1a, 0xbe, 0xb1, 0x45, 0x87, 0x50, 0x0f, + 0xdd, 0x19, 0xbb, 0x70, 0x85, 0x0a, 0x6e, 0x51, 0x37, 0xd6, 0x1c, 0xeb, 0x7f, 0x60, 0x5f, 0x04, + 0xdb, 0xf9, 0xdc, 0xbf, 0xd1, 0xb6, 0x34, 0x94, 0xe7, 0x5c, 0x3e, 0xe0, 0x8b, 0xc8, 0x37, 0xdd, + 0x68, 0x9d, 0xf9, 0x6a, 0xc0, 0x8b, 0x37, 0x04, 0xdc, 0x39, 0x87, 0x5a, 0x6a, 0x20, 0x39, 0x30, + 0x1f, 0x50, 0x85, 0xec, 0xcb, 0xfc, 0x71, 0xc2, 0x04, 0xda, 0xae, 0xbf, 0xa6, 0xde, 0x87, 0xf2, + 0x54, 0xf0, 0x45, 0x6c, 0x6a, 0xeb, 0x1a, 0x42, 0x4b, 0x9c, 0x11, 0x54, 0x0d, 0x87, 0x1c, 0x41, + 0x65, 0xbc, 0x3c, 0x77, 0x43, 0x66, 0x14, 0xaa, 0x87, 0x8d, 0x6b, 0xdf, 0x20, 0xb0, 0x5a, 0x68, + 0x04, 0xb9, 0x0d, 0xa5, 0xf1, 0x72, 0xd0, 0xd3, 0x63, 0x32, 0xd6, 0x1c, 0x5c, 0x75, 0x2a, 0xda, + 0x20, 0xe7, 0x6b, 0xd8, 0xce, 0xef, 0x43, 0xa7, 0x44, 0xa9, 0x5e, 0x9b, 0x2a, 0x3a, 0x2b, 0xae, + 0xd6, 0x6b, 0x8a, 0xeb, 0x51, 0x0b, 0xaa, 0xe6, 0xe3, 0x93, 0xd8, 0x50, 0x7e, 0x7c, 0x3e, 0xea, + 0x3f, 0x6a, 0x6c, 0x91, 0x1a, 0x94, 0xce, 0x86, 0xa3, 0x47, 0x8d, 0x02, 0x52, 0xe7, 0xc3, 0xf3, + 0x7e, 0xc3, 0x3a, 0xfa, 0x2d, 0xd8, 0xab, 0x8f, 0x24, 0x64, 0x77, 0x06, 0xe7, 0xbd, 0xc6, 0x16, + 0x01, 0xa8, 0x8c, 0xfa, 0x5d, 0xda, 0x47, 0x70, 0x15, 0x8a, 0xa3, 0xd1, 0x59, 0xc3, 0x42, 0x55, + 0xdd, 0xd3, 0xee, 0x59, 0xbf, 0x51, 0x44, 0xf2, 0xd1, 0xc3, 0x8b, 0x07, 0xa3, 0x46, 0xe9, 0xe8, + 0x53, 0xb8, 0xb5, 0xf1, 0x91, 0xa2, 0x76, 0x9f, 0x9d, 0xd2, 0x3e, 0x6a, 0xaa, 0x43, 0xf5, 0x82, + 0x0e, 0x9e, 0x9c, 0x3e, 0xea, 0x37, 0x0a, 0x28, 0xf8, 0x7a, 0xd8, 0xfd, 0xaa, 0xdf, 0x6b, 0x58, + 0x9d, 0x3b, 0xdf, 0xbd, 0xd8, 0x2f, 0x7c, 0xff, 0x62, 0xbf, 0xf0, 0xc3, 0x8b, 0xfd, 0xc2, 0xbf, + 0x5f, 0xec, 0x17, 0xbe, 0x7d, 0xb9, 0xbf, 0xf5, 0xfd, 0xcb, 0xfd, 0xad, 0x1f, 0x5e, 0xee, 0x6f, + 0x8d, 0x2b, 0xea, 0x0f, 0x9e, 0x4f, 0xfe, 0x13, 0x00, 0x00, 0xff, 0xff, 0x9b, 0x24, 0xd0, 0xaa, + 0x20, 0x12, 0x00, 0x00, } diff --git a/solver/pb/ops.proto b/solver/pb/ops.proto index 4946de2e9bbc..cbbbdae18a46 100644 --- a/solver/pb/ops.proto +++ b/solver/pb/ops.proto @@ -208,7 +208,7 @@ message FileOp { message FileAction { int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // could be real input or target (target index + max input index) int64 secondaryInput = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; // --//-- - int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; + int64 output = 3 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; oneof action { // FileActionCopy copies files from secondaryInput on top of input FileActionCopy copy = 4; @@ -227,23 +227,23 @@ message FileActionCopy { // dest path string dest = 2; // optional owner override - ChownOpt owner = 4; + ChownOpt owner = 3; // optional permission bits override - int32 mode = 5; + int32 mode = 4; // followSymlink resolves symlinks in src - bool followSymlink = 6; + bool followSymlink = 5; // dirCopyContents only copies contents if src is a directory - bool dirCopyContents = 7; + bool dirCopyContents = 6; // attemptUnpackDockerCompatibility detects if src is an archive to unpack it instead - bool attemptUnpackDockerCompatibility = 8; + bool attemptUnpackDockerCompatibility = 7; // createDestPath creates dest path directories if needed - bool createDestPath = 9; + bool createDestPath = 8; // allowWildcard allows filepath.Match wildcards in src path - bool allowWildcard = 10; + bool allowWildcard = 9; // allowEmptyWildcard doesn't fail the whole copy if wildcard doesn't resolve to files - bool allowEmptyWildcard = 11; + bool allowEmptyWildcard = 10; // optional created time override - int64 timestamp = 12; + int64 timestamp = 11; } message FileActionMkFile { From 6fd30e7b43dd73cc817cb9c963161a9b8f77661b Mon Sep 17 00:00:00 2001 From: Tibor Vass Date: Fri, 15 Mar 2019 01:19:03 +0000 Subject: [PATCH 24/25] dockerfile: add more tests to fix fileop Signed-off-by: Tibor Vass --- frontend/dockerfile/dockerfile_test.go | 52 ++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index ecd6e96f3d29..a4ce401c8854 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -90,6 +90,7 @@ var fileOpTests = []integration.Test{ testCopyOverrideFiles, testCopyVarSubstitution, testCopyWildcards, + testCopyRelative, } var opts []integration.TestOpt @@ -2507,6 +2508,57 @@ COPY sub/dir1 subdest6 require.Equal(t, "foo-contents", string(dt)) } +func testCopyRelative(t *testing.T, sb integration.Sandbox) { + f := getFrontend(t, sb) + isFileOp := getFileOp(t, sb) + + dockerfile := []byte(` +FROM busybox +WORKDIR /test1 +WORKDIR test2 +RUN sh -c "[ "$PWD" = '/test1/test2' ]" +COPY foo ./ +RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" +ADD foo ./bar/baz +RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" +COPY foo ./bar/baz2 +RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" +WORKDIR .. +COPY foo ./ +RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" +COPY foo /test3/ +RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" +WORKDIR /test4 +COPY . . +RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" +WORKDIR /test5/test6 +COPY foo ../ +RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" +`) + + dir, err := tmpdir( + fstest.CreateFile("Dockerfile", dockerfile, 0600), + fstest.CreateFile("foo", []byte(`hello`), 0600), + ) + require.NoError(t, err) + defer os.RemoveAll(dir) + + c, err := client.New(context.TODO(), sb.Address()) + require.NoError(t, err) + defer c.Close() + + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + FrontendAttrs: map[string]string{ + "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), + }, + LocalDirs: map[string]string{ + builder.DefaultLocalNameDockerfile: dir, + builder.DefaultLocalNameContext: dir, + }, + }, nil) + require.NoError(t, err) +} + func testDockerfileFromGit(t *testing.T, sb integration.Sandbox) { f := getFrontend(t, sb) From 5effbff31421cd1beba789bdb56c1734bbc0530e Mon Sep 17 00:00:00 2001 From: Tonis Tiigi Date: Fri, 15 Mar 2019 17:45:24 -0700 Subject: [PATCH 25/25] dockerfile: improve symlinks copy tests Signed-off-by: Tonis Tiigi --- frontend/dockerfile/dockerfile_test.go | 28 +++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) diff --git a/frontend/dockerfile/dockerfile_test.go b/frontend/dockerfile/dockerfile_test.go index a4ce401c8854..23ca0e6f7357 100644 --- a/frontend/dockerfile/dockerfile_test.go +++ b/frontend/dockerfile/dockerfile_test.go @@ -948,7 +948,7 @@ COPY sub/l* alllinks/ fstest.CreateFile("bar", []byte(`bar-contents`), 0600), fstest.Symlink("bar", "foo"), fstest.CreateDir("sub", 0700), - fstest.CreateFile("sub/lfile", []byte(`baz-contents`), 0600), + fstest.CreateFile("sub/lfile", []byte(`lfile-contents`), 0600), fstest.Symlink("subfile", "sub/l0"), fstest.CreateFile("sub/subfile", []byte(`subfile-contents`), 0600), fstest.Symlink("second", "sub/l1"), @@ -962,7 +962,17 @@ COPY sub/l* alllinks/ require.NoError(t, err) defer c.Close() + destDir, err := ioutil.TempDir("", "buildkit") + require.NoError(t, err) + defer os.RemoveAll(destDir) + _, err = f.Solve(context.TODO(), c, client.SolveOpt{ + Exports: []client.ExportEntry{ + { + Type: client.ExporterLocal, + OutputDir: destDir, + }, + }, FrontendAttrs: map[string]string{ "build-arg:BUILDKIT_DISABLE_FILEOP": strconv.FormatBool(!isFileOp), }, @@ -972,6 +982,22 @@ COPY sub/l* alllinks/ }, }, nil) require.NoError(t, err) + + dt, err := ioutil.ReadFile(filepath.Join(destDir, "foo")) + require.NoError(t, err) + require.Equal(t, "bar-contents", string(dt)) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l0")) + require.NoError(t, err) + require.Equal(t, "subfile-contents", string(dt)) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/lfile")) + require.NoError(t, err) + require.Equal(t, "lfile-contents", string(dt)) + + dt, err = ioutil.ReadFile(filepath.Join(destDir, "alllinks/l1")) + require.NoError(t, err) + require.Equal(t, "baz-contents", string(dt)) } func testHTTPDockerfile(t *testing.T, sb integration.Sandbox) {