diff --git a/.gitignore b/.gitignore
index daf913b..0f7092f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -22,3 +22,6 @@ _testmain.go
*.exe
*.test
*.prof
+
+# acpush binary
+acpush
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
new file mode 100644
index 0000000..3d1d313
--- /dev/null
+++ b/Godeps/Godeps.json
@@ -0,0 +1,95 @@
+{
+ "ImportPath": "github.com/appc/acpush",
+ "GoVersion": "go1.5.1",
+ "Packages": [
+ "./..."
+ ],
+ "Deps": [
+ {
+ "ImportPath": "github.com/appc/spec/aci",
+ "Comment": "v0.7.1-17-g274cd4f",
+ "Rev": "274cd4f221f5421745727fc79729c235ad9ff121"
+ },
+ {
+ "ImportPath": "github.com/appc/spec/discovery",
+ "Comment": "v0.7.1-17-g274cd4f",
+ "Rev": "274cd4f221f5421745727fc79729c235ad9ff121"
+ },
+ {
+ "ImportPath": "github.com/appc/spec/pkg/device",
+ "Comment": "v0.7.1-17-g274cd4f",
+ "Rev": "274cd4f221f5421745727fc79729c235ad9ff121"
+ },
+ {
+ "ImportPath": "github.com/appc/spec/pkg/tarheader",
+ "Comment": "v0.7.1-17-g274cd4f",
+ "Rev": "274cd4f221f5421745727fc79729c235ad9ff121"
+ },
+ {
+ "ImportPath": "github.com/appc/spec/schema",
+ "Comment": "v0.7.1-17-g274cd4f",
+ "Rev": "274cd4f221f5421745727fc79729c235ad9ff121"
+ },
+ {
+ "ImportPath": "github.com/coreos/go-semver/semver",
+ "Rev": "d043ae190b3202550d026daf009359bb5d761672"
+ },
+ {
+ "ImportPath": "github.com/coreos/ioprogress",
+ "Rev": "4637e494fd9b23c5565ee193e89f91fdc1639bc0"
+ },
+ {
+ "ImportPath": "github.com/coreos/rkt/common",
+ "Comment": "v0.10.0-39-g621d40a",
+ "Rev": "621d40ac592488cb0d7f58c10e5d79fbd6d0505c"
+ },
+ {
+ "ImportPath": "github.com/coreos/rkt/rkt/config",
+ "Comment": "v0.10.0-39-g621d40a",
+ "Rev": "621d40ac592488cb0d7f58c10e5d79fbd6d0505c"
+ },
+ {
+ "ImportPath": "github.com/cpuguy83/go-md2man/md2man",
+ "Comment": "v1.0.4",
+ "Rev": "71acacd42f85e5e82f70a55327789582a5200a90"
+ },
+ {
+ "ImportPath": "github.com/inconshreveable/mousetrap",
+ "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
+ },
+ {
+ "ImportPath": "github.com/russross/blackfriday",
+ "Comment": "v1.3-14-g4b26653",
+ "Rev": "4b26653fe00067c22322249d58654737e44b0dfb"
+ },
+ {
+ "ImportPath": "github.com/shurcooL/sanitized_anchor_name",
+ "Rev": "10ef21a441db47d8b13ebcc5fd2310f636973c77"
+ },
+ {
+ "ImportPath": "github.com/spf13/cobra",
+ "Rev": "83773614293e6c9406454a53ce5e263500ce6e44"
+ },
+ {
+ "ImportPath": "github.com/spf13/pflag",
+ "Rev": "08b1a584251b5b62f458943640fc8ebd4d50aaa5"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/ssh/terminal",
+ "Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
+ },
+ {
+ "ImportPath": "golang.org/x/net/html",
+ "Rev": "e1a5816c9bfcaeceeaeb821e8eea1c61ecd3220e"
+ },
+ {
+ "ImportPath": "k8s.io/kubernetes/pkg/api/resource",
+ "Comment": "v1.2.0-alpha.2-449-g1524d74",
+ "Rev": "1524d7490a2dfbbcd2a98bcebd6518c06f3fbace"
+ },
+ {
+ "ImportPath": "speter.net/go/exp/math/dec/inf",
+ "Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"
+ }
+ ]
+}
diff --git a/Godeps/Readme b/Godeps/Readme
new file mode 100644
index 0000000..4cdaa53
--- /dev/null
+++ b/Godeps/Readme
@@ -0,0 +1,5 @@
+This directory tree is generated automatically by godep.
+
+Please do not edit.
+
+See https://github.com/tools/godep for more information.
diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore
new file mode 100644
index 0000000..f037d68
--- /dev/null
+++ b/Godeps/_workspace/.gitignore
@@ -0,0 +1,2 @@
+/pkg
+/bin
diff --git a/Godeps/_workspace/src/github.com/appc/spec/aci/build.go b/Godeps/_workspace/src/github.com/appc/spec/aci/build.go
new file mode 100644
index 0000000..a594eb9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/aci/build.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aci
+
+import (
+ "archive/tar"
+ "io"
+ "os"
+ "path/filepath"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader"
+)
+
+// TarHeaderWalkFunc is the type of the function which allows setting tar
+// headers or filtering out tar entries when building an ACI. It will be
+// applied to every entry in the tar file.
+//
+// If true is returned, the entry will be included in the final ACI; if false,
+// the entry will not be included.
+type TarHeaderWalkFunc func(hdr *tar.Header) bool
+
+// BuildWalker creates a filepath.WalkFunc that walks over the given root
+// (which should represent an ACI layout on disk) and adds the files in the
+// rootfs/ subdirectory to the given ArchiveWriter
+func BuildWalker(root string, aw ArchiveWriter, cb TarHeaderWalkFunc) filepath.WalkFunc {
+ // cache of inode -> filepath, used to leverage hard links in the archive
+ inos := map[uint64]string{}
+ return func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ relpath, err := filepath.Rel(root, path)
+ if err != nil {
+ return err
+ }
+ if relpath == "." {
+ return nil
+ }
+ if relpath == ManifestFile {
+ // ignore; this will be written by the archive writer
+ // TODO(jonboulle): does this make sense? maybe just remove from archivewriter?
+ return nil
+ }
+
+ link := ""
+ var r io.Reader
+ switch info.Mode() & os.ModeType {
+ case os.ModeSocket:
+ return nil
+ case os.ModeNamedPipe:
+ case os.ModeCharDevice:
+ case os.ModeDevice:
+ case os.ModeDir:
+ case os.ModeSymlink:
+ target, err := os.Readlink(path)
+ if err != nil {
+ return err
+ }
+ link = target
+ default:
+ file, err := os.Open(path)
+ if err != nil {
+ return err
+ }
+ defer file.Close()
+ r = file
+ }
+
+ hdr, err := tar.FileInfoHeader(info, link)
+ if err != nil {
+ panic(err)
+ }
+ // Because os.FileInfo's Name method returns only the base
+ // name of the file it describes, it may be necessary to
+ // modify the Name field of the returned header to provide the
+ // full path name of the file.
+ hdr.Name = relpath
+ tarheader.Populate(hdr, info, inos)
+ // If the file is a hard link to a file we've already seen, we
+ // don't need the contents
+ if hdr.Typeflag == tar.TypeLink {
+ hdr.Size = 0
+ r = nil
+ }
+
+ if cb != nil {
+ if !cb(hdr) {
+ return nil
+ }
+ }
+
+ if err := aw.AddFile(hdr, r); err != nil {
+ return err
+ }
+
+ return nil
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/aci/doc.go b/Godeps/_workspace/src/github.com/appc/spec/aci/doc.go
new file mode 100644
index 0000000..624d431
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/aci/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package aci contains various functions for working with App Container Images.
+package aci
diff --git a/Godeps/_workspace/src/github.com/appc/spec/aci/file.go b/Godeps/_workspace/src/github.com/appc/spec/aci/file.go
new file mode 100644
index 0000000..4ec6826
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/aci/file.go
@@ -0,0 +1,246 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aci
+
+import (
+ "archive/tar"
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os/exec"
+ "path/filepath"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+)
+
+type FileType string
+
+const (
+ TypeGzip = FileType("gz")
+ TypeBzip2 = FileType("bz2")
+ TypeXz = FileType("xz")
+ TypeTar = FileType("tar")
+ TypeText = FileType("text")
+ TypeUnknown = FileType("unknown")
+
+ readLen = 512 // max bytes to sniff
+
+ hexHdrGzip = "1f8b"
+ hexHdrBzip2 = "425a68"
+ hexHdrXz = "fd377a585a00"
+ hexSigTar = "7573746172"
+
+ tarOffset = 257
+
+ textMime = "text/plain; charset=utf-8"
+)
+
+var (
+ hdrGzip []byte
+ hdrBzip2 []byte
+ hdrXz []byte
+ sigTar []byte
+ tarEnd int
+)
+
+func mustDecodeHex(s string) []byte {
+ b, err := hex.DecodeString(s)
+ if err != nil {
+ panic(err)
+ }
+ return b
+}
+
+func init() {
+ hdrGzip = mustDecodeHex(hexHdrGzip)
+ hdrBzip2 = mustDecodeHex(hexHdrBzip2)
+ hdrXz = mustDecodeHex(hexHdrXz)
+ sigTar = mustDecodeHex(hexSigTar)
+ tarEnd = tarOffset + len(sigTar)
+}
+
+// DetectFileType attempts to detect the type of file that the given reader
+// represents by comparing it against known file signatures (magic numbers)
+func DetectFileType(r io.Reader) (FileType, error) {
+ var b bytes.Buffer
+ n, err := io.CopyN(&b, r, readLen)
+ if err != nil && err != io.EOF {
+ return TypeUnknown, err
+ }
+ bs := b.Bytes()
+ switch {
+ case bytes.HasPrefix(bs, hdrGzip):
+ return TypeGzip, nil
+ case bytes.HasPrefix(bs, hdrBzip2):
+ return TypeBzip2, nil
+ case bytes.HasPrefix(bs, hdrXz):
+ return TypeXz, nil
+ case n > int64(tarEnd) && bytes.Equal(bs[tarOffset:tarEnd], sigTar):
+ return TypeTar, nil
+ case http.DetectContentType(bs) == textMime:
+ return TypeText, nil
+ default:
+ return TypeUnknown, nil
+ }
+}
+
+// XzReader is an io.ReadCloser which decompresses xz compressed data.
+type XzReader struct {
+ io.ReadCloser
+ cmd *exec.Cmd
+ closech chan error
+}
+
+// NewXzReader shells out to a command line xz executable (if
+// available) to decompress the given io.Reader using the xz
+// compression format and returns an *XzReader.
+// It is the caller's responsibility to call Close on the XzReader when done.
+func NewXzReader(r io.Reader) (*XzReader, error) {
+ rpipe, wpipe := io.Pipe()
+ ex, err := exec.LookPath("xz")
+ if err != nil {
+ log.Fatalf("couldn't find xz executable: %v", err)
+ }
+ cmd := exec.Command(ex, "--decompress", "--stdout")
+
+ closech := make(chan error)
+
+ cmd.Stdin = r
+ cmd.Stdout = wpipe
+
+ go func() {
+ err := cmd.Run()
+ wpipe.CloseWithError(err)
+ closech <- err
+ }()
+
+ return &XzReader{rpipe, cmd, closech}, nil
+}
+
+func (r *XzReader) Close() error {
+ r.ReadCloser.Close()
+ r.cmd.Process.Kill()
+ return <-r.closech
+}
+
+// ManifestFromImage extracts a new schema.ImageManifest from the given ACI image.
+func ManifestFromImage(rs io.ReadSeeker) (*schema.ImageManifest, error) {
+ var im schema.ImageManifest
+
+ tr, err := NewCompressedTarReader(rs)
+ if err != nil {
+ return nil, err
+ }
+ defer tr.Close()
+
+ for {
+ hdr, err := tr.Next()
+ switch err {
+ case io.EOF:
+ return nil, errors.New("missing manifest")
+ case nil:
+ if filepath.Clean(hdr.Name) == ManifestFile {
+ data, err := ioutil.ReadAll(tr)
+ if err != nil {
+ return nil, err
+ }
+ if err := im.UnmarshalJSON(data); err != nil {
+ return nil, err
+ }
+ return &im, nil
+ }
+ default:
+ return nil, fmt.Errorf("error extracting tarball: %v", err)
+ }
+ }
+}
+
+// TarReadCloser embeds a *tar.Reader and the related io.Closer
+// It is the caller's responsibility to call Close on TarReadCloser when
+// done.
+type TarReadCloser struct {
+ *tar.Reader
+ io.Closer
+}
+
+func (r *TarReadCloser) Close() error {
+ return r.Closer.Close()
+}
+
+// NewCompressedTarReader creates a new TarReadCloser reading from the
+// given ACI image.
+// It is the caller's responsibility to call Close on the TarReadCloser
+// when done.
+func NewCompressedTarReader(rs io.ReadSeeker) (*TarReadCloser, error) {
+ cr, err := NewCompressedReader(rs)
+ if err != nil {
+ return nil, err
+ }
+ return &TarReadCloser{tar.NewReader(cr), cr}, nil
+}
+
+// NewCompressedReader creates a new io.ReaderCloser from the given ACI image.
+// It is the caller's responsibility to call Close on the Reader when done.
+func NewCompressedReader(rs io.ReadSeeker) (io.ReadCloser, error) {
+
+ var (
+ dr io.ReadCloser
+ err error
+ )
+
+ _, err = rs.Seek(0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ ftype, err := DetectFileType(rs)
+ if err != nil {
+ return nil, err
+ }
+
+ _, err = rs.Seek(0, 0)
+ if err != nil {
+ return nil, err
+ }
+
+ switch ftype {
+ case TypeGzip:
+ dr, err = gzip.NewReader(rs)
+ if err != nil {
+ return nil, err
+ }
+ case TypeBzip2:
+ dr = ioutil.NopCloser(bzip2.NewReader(rs))
+ case TypeXz:
+ dr, err = NewXzReader(rs)
+ if err != nil {
+ return nil, err
+ }
+ case TypeTar:
+ dr = ioutil.NopCloser(rs)
+ case TypeUnknown:
+ return nil, errors.New("error: unknown image filetype")
+ default:
+ return nil, errors.New("no type returned from DetectFileType?")
+ }
+ return dr, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/aci/layout.go b/Godeps/_workspace/src/github.com/appc/spec/aci/layout.go
new file mode 100644
index 0000000..ce4ac7f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/aci/layout.go
@@ -0,0 +1,187 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aci
+
+/*
+
+Image Layout
+
+The on-disk layout of an app container is straightforward.
+It includes a rootfs with all of the files that will exist in the root of the app and a manifest describing the image.
+The layout MUST contain an image manifest.
+
+/manifest
+/rootfs/
+/rootfs/usr/bin/mysql
+
+*/
+
+import (
+ "archive/tar"
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+const (
+ // Path to manifest file inside the layout
+ ManifestFile = "manifest"
+ // Path to rootfs directory inside the layout
+ RootfsDir = "rootfs"
+)
+
+type ErrOldVersion struct {
+ version types.SemVer
+}
+
+func (e ErrOldVersion) Error() string {
+ return fmt.Sprintf("ACVersion too old. Found major version %v, expected %v", e.version.Major, schema.AppContainerVersion.Major)
+}
+
+var (
+ ErrNoRootFS = errors.New("no rootfs found in layout")
+ ErrNoManifest = errors.New("no image manifest found in layout")
+)
+
+// ValidateLayout takes a directory and validates that the layout of the directory
+// matches that expected by the Application Container Image format.
+// If any errors are encountered during the validation, it will abort and
+// return the first one.
+func ValidateLayout(dir string) error {
+ fi, err := os.Stat(dir)
+ if err != nil {
+ return fmt.Errorf("error accessing layout: %v", err)
+ }
+ if !fi.IsDir() {
+ return fmt.Errorf("given path %q is not a directory", dir)
+ }
+ var flist []string
+ var imOK, rfsOK bool
+ var im io.Reader
+ walkLayout := func(fpath string, fi os.FileInfo, err error) error {
+ rpath, err := filepath.Rel(dir, fpath)
+ if err != nil {
+ return err
+ }
+ switch rpath {
+ case ".":
+ case ManifestFile:
+ im, err = os.Open(fpath)
+ if err != nil {
+ return err
+ }
+ imOK = true
+ case RootfsDir:
+ if !fi.IsDir() {
+ return errors.New("rootfs is not a directory")
+ }
+ rfsOK = true
+ default:
+ flist = append(flist, rpath)
+ }
+ return nil
+ }
+ if err := filepath.Walk(dir, walkLayout); err != nil {
+ return err
+ }
+ return validate(imOK, im, rfsOK, flist)
+}
+
+// ValidateArchive takes a *tar.Reader and validates that the layout of the
+// filesystem the reader encapsulates matches that expected by the
+// Application Container Image format. If any errors are encountered during
+// the validation, it will abort and return the first one.
+func ValidateArchive(tr *tar.Reader) error {
+ var fseen map[string]bool = make(map[string]bool)
+ var imOK, rfsOK bool
+ var im bytes.Buffer
+Tar:
+ for {
+ hdr, err := tr.Next()
+ switch {
+ case err == nil:
+ case err == io.EOF:
+ break Tar
+ default:
+ return err
+ }
+ name := filepath.Clean(hdr.Name)
+ switch name {
+ case ".":
+ case ManifestFile:
+ _, err := io.Copy(&im, tr)
+ if err != nil {
+ return err
+ }
+ imOK = true
+ case RootfsDir:
+ if !hdr.FileInfo().IsDir() {
+ return fmt.Errorf("rootfs is not a directory")
+ }
+ rfsOK = true
+ default:
+ if _, seen := fseen[name]; seen {
+ return fmt.Errorf("duplicate file entry in archive: %s", name)
+ }
+ fseen[name] = true
+ }
+ }
+ var flist []string
+ for key := range fseen {
+ flist = append(flist, key)
+ }
+ return validate(imOK, &im, rfsOK, flist)
+}
+
+func validate(imOK bool, im io.Reader, rfsOK bool, files []string) error {
+ defer func() {
+ if rc, ok := im.(io.Closer); ok {
+ rc.Close()
+ }
+ }()
+ if !imOK {
+ return ErrNoManifest
+ }
+ if !rfsOK {
+ return ErrNoRootFS
+ }
+ b, err := ioutil.ReadAll(im)
+ if err != nil {
+ return fmt.Errorf("error reading image manifest: %v", err)
+ }
+ var a schema.ImageManifest
+ if err := a.UnmarshalJSON(b); err != nil {
+ return fmt.Errorf("image manifest validation failed: %v", err)
+ }
+ if a.ACVersion.LessThanMajor(schema.AppContainerVersion) {
+ return ErrOldVersion{
+ version: a.ACVersion,
+ }
+ }
+ for _, f := range files {
+ if !strings.HasPrefix(f, "rootfs") {
+ return fmt.Errorf("unrecognized file path in layout: %q", f)
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/aci/writer.go b/Godeps/_workspace/src/github.com/appc/spec/aci/writer.go
new file mode 100644
index 0000000..328bbc6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/aci/writer.go
@@ -0,0 +1,98 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package aci
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "io"
+ "time"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+)
+
+// ArchiveWriter writes App Container Images. Users wanting to create an ACI or
+// should create an ArchiveWriter and add files to it; the ACI will be written
+// to the underlying tar.Writer
+type ArchiveWriter interface {
+ AddFile(hdr *tar.Header, r io.Reader) error
+ Close() error
+}
+
+type imageArchiveWriter struct {
+ *tar.Writer
+ am *schema.ImageManifest
+}
+
+// NewImageWriter creates a new ArchiveWriter which will generate an App
+// Container Image based on the given manifest and write it to the given
+// tar.Writer
+func NewImageWriter(am schema.ImageManifest, w *tar.Writer) ArchiveWriter {
+ aw := &imageArchiveWriter{
+ w,
+ &am,
+ }
+ return aw
+}
+
+func (aw *imageArchiveWriter) AddFile(hdr *tar.Header, r io.Reader) error {
+ err := aw.Writer.WriteHeader(hdr)
+ if err != nil {
+ return err
+ }
+
+ if r != nil {
+ _, err := io.Copy(aw.Writer, r)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (aw *imageArchiveWriter) addFileNow(path string, contents []byte) error {
+ buf := bytes.NewBuffer(contents)
+ now := time.Now()
+ hdr := tar.Header{
+ Name: path,
+ Mode: 0644,
+ Uid: 0,
+ Gid: 0,
+ Size: int64(buf.Len()),
+ ModTime: now,
+ Typeflag: tar.TypeReg,
+ Uname: "root",
+ Gname: "root",
+ ChangeTime: now,
+ }
+ return aw.AddFile(&hdr, buf)
+}
+
+func (aw *imageArchiveWriter) addManifest(name string, m json.Marshaler) error {
+ out, err := m.MarshalJSON()
+ if err != nil {
+ return err
+ }
+ return aw.addFileNow(name, out)
+}
+
+func (aw *imageArchiveWriter) Close() error {
+ if err := aw.addManifest(ManifestFile, aw.am); err != nil {
+ return err
+ }
+ return aw.Writer.Close()
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/discovery.go b/Godeps/_workspace/src/github.com/appc/spec/discovery/discovery.go
new file mode 100644
index 0000000..ba95c2a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/discovery.go
@@ -0,0 +1,260 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package discovery
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "regexp"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/golang.org/x/net/html"
+ "github.com/appc/acpush/Godeps/_workspace/src/golang.org/x/net/html/atom"
+)
+
+type acMeta struct {
+ name string
+ prefix string
+ uri string
+}
+
+type ACIEndpoint struct {
+ ACI string
+ ASC string
+}
+
+type Endpoints struct {
+ ACIEndpoints []ACIEndpoint
+ Keys []string
+ ACIPushEndpoints []string
+}
+
+func (e *Endpoints) Append(ep Endpoints) {
+ e.ACIEndpoints = append(e.ACIEndpoints, ep.ACIEndpoints...)
+ e.Keys = append(e.Keys, ep.Keys...)
+ e.ACIPushEndpoints = append(e.ACIPushEndpoints, ep.ACIPushEndpoints...)
+}
+
+const (
+ defaultVersion = "latest"
+)
+
+var (
+ templateExpression = regexp.MustCompile(`{.*?}`)
+ errEnough = errors.New("enough discovery information found")
+)
+
+func appendMeta(meta []acMeta, attrs []html.Attribute) []acMeta {
+ m := acMeta{}
+
+ for _, a := range attrs {
+ if a.Namespace != "" {
+ continue
+ }
+
+ switch a.Key {
+ case "name":
+ m.name = a.Val
+
+ case "content":
+ parts := strings.SplitN(strings.TrimSpace(a.Val), " ", 2)
+ if len(parts) < 2 {
+ break
+ }
+ m.prefix = parts[0]
+ m.uri = strings.TrimSpace(parts[1])
+ }
+ }
+
+ // TODO(eyakubovich): should prefix be optional?
+ if !strings.HasPrefix(m.name, "ac-") || m.prefix == "" || m.uri == "" {
+ return meta
+ }
+
+ return append(meta, m)
+}
+
+func extractACMeta(r io.Reader) []acMeta {
+ var meta []acMeta
+
+ z := html.NewTokenizer(r)
+
+ for {
+ switch z.Next() {
+ case html.ErrorToken:
+ return meta
+
+ case html.StartTagToken, html.SelfClosingTagToken:
+ tok := z.Token()
+ if tok.DataAtom == atom.Meta {
+ meta = appendMeta(meta, tok.Attr)
+ }
+ }
+ }
+}
+
+func renderTemplate(tpl string, kvs ...string) (string, bool) {
+ for i := 0; i < len(kvs); i += 2 {
+ k := kvs[i]
+ v := kvs[i+1]
+ tpl = strings.Replace(tpl, k, v, -1)
+ }
+ return tpl, !templateExpression.MatchString(tpl)
+}
+
+func createTemplateVars(app App) []string {
+ tplVars := []string{"{name}", app.Name.String()}
+ // If a label is called "name", it will be ignored as it appears after
+ // in the slice
+ for n, v := range app.Labels {
+ tplVars = append(tplVars, fmt.Sprintf("{%s}", n), v)
+ }
+ return tplVars
+}
+
+func doDiscover(pre string, app App, insecure bool) (*Endpoints, error) {
+ app = *app.Copy()
+ if app.Labels["version"] == "" {
+ app.Labels["version"] = defaultVersion
+ }
+
+ _, body, err := httpsOrHTTP(pre, insecure)
+ if err != nil {
+ return nil, err
+ }
+ defer body.Close()
+
+ meta := extractACMeta(body)
+
+ tplVars := createTemplateVars(app)
+
+ de := &Endpoints{}
+
+ for _, m := range meta {
+ if !strings.HasPrefix(app.Name.String(), m.prefix) {
+ continue
+ }
+
+ switch m.name {
+ case "ac-discovery":
+ // Ignore not handled variables as {ext} isn't already rendered.
+ uri, _ := renderTemplate(m.uri, tplVars...)
+ asc, ok := renderTemplate(uri, "{ext}", "aci.asc")
+ if !ok {
+ continue
+ }
+ aci, ok := renderTemplate(uri, "{ext}", "aci")
+ if !ok {
+ continue
+ }
+ de.ACIEndpoints = append(de.ACIEndpoints, ACIEndpoint{ACI: aci, ASC: asc})
+
+ case "ac-discovery-pubkeys":
+ de.Keys = append(de.Keys, m.uri)
+ case "ac-push-discovery":
+ uri, _ := renderTemplate(m.uri, tplVars...)
+ de.ACIPushEndpoints = append(de.ACIPushEndpoints, uri)
+ }
+ }
+
+ return de, nil
+}
+
+// DiscoverWalk will make HTTPS requests to find discovery meta tags and
+// optionally will use HTTP if insecure is set. Based on the response of the
+// discoverFn it will continue to recurse up the tree.
+func DiscoverWalk(app App, insecure bool, discoverFn DiscoverWalkFunc) (err error) {
+ var (
+ eps *Endpoints
+ )
+
+ parts := strings.Split(string(app.Name), "/")
+ for i := range parts {
+ end := len(parts) - i
+ pre := strings.Join(parts[:end], "/")
+
+ eps, err = doDiscover(pre, app, insecure)
+ if derr := discoverFn(pre, eps, err); derr != nil {
+ return derr
+ }
+ }
+
+ return
+}
+
+// DiscoverWalkFunc can stop a DiscoverWalk by returning non-nil error.
+type DiscoverWalkFunc func(prefix string, eps *Endpoints, err error) error
+
+// FailedAttempt represents a failed discovery attempt. This is for debugging
+// and user feedback.
+type FailedAttempt struct {
+ Prefix string
+ Error error
+}
+
+func walker(out *Endpoints, attempts *[]FailedAttempt, testFn DiscoverWalkFunc) DiscoverWalkFunc {
+ return func(pre string, eps *Endpoints, err error) error {
+ if err != nil {
+ *attempts = append(*attempts, FailedAttempt{pre, err})
+ return nil
+ }
+ out.Append(*eps)
+ if err := testFn(pre, eps, err); err != nil {
+ return err
+ }
+ return nil
+ }
+}
+
+// DiscoverEndpoints will make HTTPS requests to find the ac-discovery meta
+// tags and optionally will use HTTP if insecure is set. It will not give up
+// until it has exhausted the path or found an image discovery.
+func DiscoverEndpoints(app App, insecure bool) (out *Endpoints, attempts []FailedAttempt, err error) {
+ out = &Endpoints{}
+ testFn := func(pre string, eps *Endpoints, err error) error {
+ if len(out.ACIEndpoints) != 0 || len(out.Keys) != 0 || len(out.ACIPushEndpoints) != 0 {
+ return errEnough
+ }
+ return nil
+ }
+
+ err = DiscoverWalk(app, insecure, walker(out, &attempts, testFn))
+ if err != nil && err != errEnough {
+ return nil, attempts, err
+ }
+
+ return out, attempts, nil
+}
+
+// DiscoverPublicKey will make HTTPS requests to find the ac-public-keys meta
+// tags and optionally will use HTTP if insecure is set. It will not give up
+// until it has exhausted the path or found an public key.
+func DiscoverPublicKeys(app App, insecure bool) (out *Endpoints, attempts []FailedAttempt, err error) {
+ out = &Endpoints{}
+ testFn := func(pre string, eps *Endpoints, err error) error {
+ if len(out.Keys) != 0 {
+ return errEnough
+ }
+ return nil
+ }
+
+ err = DiscoverWalk(app, insecure, walker(out, &attempts, testFn))
+ if err != nil && err != errEnough {
+ return nil, attempts, err
+ }
+
+ return out, attempts, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/doc.go b/Godeps/_workspace/src/github.com/appc/spec/discovery/doc.go
new file mode 100644
index 0000000..55bfc3a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package discovery contains an experimental implementation of the Image
+// Discovery section of the appc specification.
+package discovery
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/http.go b/Godeps/_workspace/src/github.com/appc/spec/discovery/http.go
new file mode 100644
index 0000000..4198201
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/http.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package discovery
+
+import (
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "time"
+)
+
+const (
+ defaultDialTimeout = 5 * time.Second
+)
+
+var (
+ // Client is the default http.Client used for discovery requests.
+ Client *http.Client
+
+ // httpGet is the internal object used by discovery to retrieve URLs; it is
+ // defined here so it can be overridden for testing
+ httpGet httpGetter
+)
+
+// httpGetter is an interface used to wrap http.Client for real requests and
+// allow easy mocking in local tests.
+type httpGetter interface {
+ Get(url string) (resp *http.Response, err error)
+}
+
+func init() {
+ t := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: func(n, a string) (net.Conn, error) {
+ return net.DialTimeout(n, a, defaultDialTimeout)
+ },
+ }
+ Client = &http.Client{
+ Transport: t,
+ }
+ httpGet = Client
+}
+
+func httpsOrHTTP(name string, insecure bool) (urlStr string, body io.ReadCloser, err error) {
+ fetch := func(scheme string) (urlStr string, res *http.Response, err error) {
+ u, err := url.Parse(scheme + "://" + name)
+ if err != nil {
+ return "", nil, err
+ }
+ u.RawQuery = "ac-discovery=1"
+ urlStr = u.String()
+ res, err = httpGet.Get(urlStr)
+ return
+ }
+ closeBody := func(res *http.Response) {
+ if res != nil {
+ res.Body.Close()
+ }
+ }
+ urlStr, res, err := fetch("https")
+ if err != nil || res.StatusCode != http.StatusOK {
+ if insecure {
+ closeBody(res)
+ urlStr, res, err = fetch("http")
+ }
+ }
+
+ if res != nil && res.StatusCode != http.StatusOK {
+ err = fmt.Errorf("expected a 200 OK got %d", res.StatusCode)
+ }
+
+ if err != nil {
+ closeBody(res)
+ return "", nil, err
+ }
+ return urlStr, res.Body, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp.html b/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp.html
new file mode 100644
index 0000000..10c80eb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp.html
@@ -0,0 +1,15 @@
+
+
+
+
+ My app
+
+
+
+
+
+
+
+ My App
+
+
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp2.html b/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp2.html
new file mode 100644
index 0000000..270b4c6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/myapp2.html
@@ -0,0 +1,15 @@
+
+
+
+
+ My app
+
+
+
+
+
+
+
+ My App
+
+
diff --git a/Godeps/_workspace/src/github.com/appc/spec/discovery/parse.go b/Godeps/_workspace/src/github.com/appc/spec/discovery/parse.go
new file mode 100644
index 0000000..c1c12a6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/discovery/parse.go
@@ -0,0 +1,142 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package discovery
+
+import (
+ "fmt"
+ "net/url"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+type App struct {
+ Name types.ACIdentifier
+ Labels map[types.ACIdentifier]string
+}
+
+func NewApp(name string, labels map[types.ACIdentifier]string) (*App, error) {
+ if labels == nil {
+ labels = make(map[types.ACIdentifier]string, 0)
+ }
+ acn, err := types.NewACIdentifier(name)
+ if err != nil {
+ return nil, err
+ }
+ return &App{
+ Name: *acn,
+ Labels: labels,
+ }, nil
+}
+
+// NewAppFromString takes a command line app parameter and returns a map of labels.
+//
+// Example app parameters:
+// example.com/reduce-worker:1.0.0
+// example.com/reduce-worker,channel=alpha,label=value
+// example.com/reduce-worker:1.0.0,label=value
+//
+// As can be seen in above examples - colon, comma and equal sign have
+// special meaning. If any of them has to be a part of a label's value
+// then consider writing your own string to App parser.
+func NewAppFromString(app string) (*App, error) {
+ var (
+ name string
+ labels map[types.ACIdentifier]string
+ )
+
+ preparedApp, err := prepareAppString(app)
+ if err != nil {
+ return nil, err
+ }
+ v, err := url.ParseQuery(preparedApp)
+ if err != nil {
+ return nil, err
+ }
+ labels = make(map[types.ACIdentifier]string, 0)
+ for key, val := range v {
+ if len(val) > 1 {
+ return nil, fmt.Errorf("label %s with multiple values %q", key, val)
+ }
+ if key == "name" {
+ name = val[0]
+ continue
+ }
+ labelName, err := types.NewACIdentifier(key)
+ if err != nil {
+ return nil, err
+ }
+ labels[*labelName] = val[0]
+ }
+ a, err := NewApp(name, labels)
+ if err != nil {
+ return nil, err
+ }
+ return a, nil
+}
+
+func prepareAppString(app string) (string, error) {
+ if err := checkColon(app); err != nil {
+ return "", err
+ }
+
+ app = "name=" + strings.Replace(app, ":", ",version=", 1)
+ return makeQueryString(app)
+}
+
+func checkColon(app string) error {
+ firstComma := strings.IndexRune(app, ',')
+ firstColon := strings.IndexRune(app, ':')
+ if firstColon > firstComma && firstComma > -1 {
+ return fmt.Errorf("malformed app string - colon may appear only right after the app name")
+ }
+ if strings.Count(app, ":") > 1 {
+ return fmt.Errorf("malformed app string - colon may appear at most once")
+ }
+ return nil
+}
+
+func makeQueryString(app string) (string, error) {
+ parts := strings.Split(app, ",")
+ escapedParts := make([]string, len(parts))
+ for i, s := range parts {
+ p := strings.SplitN(s, "=", 2)
+ if len(p) != 2 {
+ return "", fmt.Errorf("malformed app string - has a label without a value: %s", p[0])
+ }
+ escapedParts[i] = fmt.Sprintf("%s=%s", p[0], url.QueryEscape(p[1]))
+ }
+ return strings.Join(escapedParts, "&"), nil
+}
+
+func (a *App) Copy() *App {
+ ac := &App{
+ Name: a.Name,
+ Labels: make(map[types.ACIdentifier]string, 0),
+ }
+ for k, v := range a.Labels {
+ ac.Labels[k] = v
+ }
+ return ac
+}
+
+// String returns the URL-like image name
+func (a *App) String() string {
+ img := a.Name.String()
+ for n, v := range a.Labels {
+ img += fmt.Sprintf(",%s=%s", n, v)
+ }
+ return img
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/device/device_posix.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/device/device_posix.go
new file mode 100644
index 0000000..a0bdd77
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/device/device_posix.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux freebsd netbsd openbsd darwin
+
+package device
+
+/*
+#define _BSD_SOURCE
+#define _DEFAULT_SOURCE
+#include
+
+unsigned int
+my_major(dev_t dev)
+{
+ return major(dev);
+}
+
+unsigned int
+my_minor(dev_t dev)
+{
+ return minor(dev);
+}
+
+dev_t
+my_makedev(unsigned int maj, unsigned int min)
+{
+ return makedev(maj, min);
+}
+*/
+import "C"
+
+func Major(rdev uint64) uint {
+ major := C.my_major(C.dev_t(rdev))
+ return uint(major)
+}
+
+func Minor(rdev uint64) uint {
+ minor := C.my_minor(C.dev_t(rdev))
+ return uint(minor)
+}
+
+func Makedev(maj uint, min uint) uint64 {
+ dev := C.my_makedev(C.uint(maj), C.uint(min))
+ return uint64(dev)
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/doc.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/doc.go
new file mode 100644
index 0000000..047a0c0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package tarheader contains a simple abstraction to accurately create
+// tar.Headers on different operating systems.
+package tarheader
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_darwin.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_darwin.go
new file mode 100644
index 0000000..8f68ee7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_darwin.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//+build darwin
+
+package tarheader
+
+import (
+ "archive/tar"
+ "os"
+ "syscall"
+ "time"
+)
+
+func init() {
+ populateHeaderStat = append(populateHeaderStat, populateHeaderCtime)
+}
+
+func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return
+ }
+
+ sec, nsec := st.Ctimespec.Unix()
+ ctime := time.Unix(sec, nsec)
+ h.ChangeTime = ctime
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_linux.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_linux.go
new file mode 100644
index 0000000..2055024
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_linux.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux
+
+package tarheader
+
+import (
+ "archive/tar"
+ "os"
+ "syscall"
+ "time"
+)
+
+func init() {
+ populateHeaderStat = append(populateHeaderStat, populateHeaderCtime)
+}
+
+func populateHeaderCtime(h *tar.Header, fi os.FileInfo, _ map[uint64]string) {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return
+ }
+
+ sec, nsec := st.Ctim.Unix()
+ ctime := time.Unix(sec, nsec)
+ h.ChangeTime = ctime
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_posix.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_posix.go
new file mode 100644
index 0000000..da4111e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/pop_posix.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build linux freebsd netbsd openbsd
+
+package tarheader
+
+import (
+ "archive/tar"
+ "os"
+ "syscall"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/pkg/device"
+)
+
+func init() {
+ populateHeaderStat = append(populateHeaderStat, populateHeaderUnix)
+}
+
+func populateHeaderUnix(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {
+ st, ok := fi.Sys().(*syscall.Stat_t)
+ if !ok {
+ return
+ }
+ h.Uid = int(st.Uid)
+ h.Gid = int(st.Gid)
+ if st.Mode&syscall.S_IFMT == syscall.S_IFBLK || st.Mode&syscall.S_IFMT == syscall.S_IFCHR {
+ h.Devminor = int64(device.Minor(uint64(st.Rdev)))
+ h.Devmajor = int64(device.Major(uint64(st.Rdev)))
+ }
+ // If we have already seen this inode, generate a hardlink
+ p, ok := seen[uint64(st.Ino)]
+ if ok {
+ h.Linkname = p
+ h.Typeflag = tar.TypeLink
+ } else {
+ seen[uint64(st.Ino)] = h.Name
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/tarheader.go b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/tarheader.go
new file mode 100644
index 0000000..dc16c33
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/pkg/tarheader/tarheader.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package tarheader
+
+import (
+ "archive/tar"
+ "os"
+)
+
+var populateHeaderStat []func(h *tar.Header, fi os.FileInfo, seen map[uint64]string)
+
+func Populate(h *tar.Header, fi os.FileInfo, seen map[uint64]string) {
+ for _, pop := range populateHeaderStat {
+ pop(h, fi, seen)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/doc.go b/Godeps/_workspace/src/github.com/appc/spec/schema/doc.go
new file mode 100644
index 0000000..ba38154
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/doc.go
@@ -0,0 +1,25 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package schema provides definitions for the JSON schema of the different
+// manifests in the App Container Specification. The manifests are canonically
+// represented in their respective structs:
+// - `ImageManifest`
+// - `PodManifest`
+//
+// Validation is performed through serialization: if a blob of JSON data will
+// unmarshal to one of the *Manifests, it is considered a valid implementation
+// of the standard. Similarly, if a constructed *Manifest struct marshals
+// successfully to JSON, it must be valid.
+package schema
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/image.go b/Godeps/_workspace/src/github.com/appc/spec/schema/image.go
new file mode 100644
index 0000000..87922fc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/image.go
@@ -0,0 +1,95 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+ "errors"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+const (
+ ACIExtension = ".aci"
+ ImageManifestKind = types.ACKind("ImageManifest")
+)
+
+type ImageManifest struct {
+ ACKind types.ACKind `json:"acKind"`
+ ACVersion types.SemVer `json:"acVersion"`
+ Name types.ACIdentifier `json:"name"`
+ Labels types.Labels `json:"labels,omitempty"`
+ App *types.App `json:"app,omitempty"`
+ Annotations types.Annotations `json:"annotations,omitempty"`
+ Dependencies types.Dependencies `json:"dependencies,omitempty"`
+ PathWhitelist []string `json:"pathWhitelist,omitempty"`
+}
+
+// imageManifest is a model to facilitate extra validation during the
+// unmarshalling of the ImageManifest
+type imageManifest ImageManifest
+
+func BlankImageManifest() *ImageManifest {
+ return &ImageManifest{ACKind: ImageManifestKind, ACVersion: AppContainerVersion}
+}
+
+func (im *ImageManifest) UnmarshalJSON(data []byte) error {
+ a := imageManifest(*im)
+ err := json.Unmarshal(data, &a)
+ if err != nil {
+ return err
+ }
+ nim := ImageManifest(a)
+ if err := nim.assertValid(); err != nil {
+ return err
+ }
+ *im = nim
+ return nil
+}
+
+func (im ImageManifest) MarshalJSON() ([]byte, error) {
+ if err := im.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(imageManifest(im))
+}
+
+var imKindError = types.InvalidACKindError(ImageManifestKind)
+
+// assertValid performs extra assertions on an ImageManifest to ensure that
+// fields are set appropriately, etc. It is used exclusively when marshalling
+// and unmarshalling an ImageManifest. Most field-specific validation is
+// performed through the individual types being marshalled; assertValid()
+// should only deal with higher-level validation.
+func (im *ImageManifest) assertValid() error {
+ if im.ACKind != ImageManifestKind {
+ return imKindError
+ }
+ if im.ACVersion.Empty() {
+ return errors.New(`acVersion must be set`)
+ }
+ if im.Name.Empty() {
+ return errors.New(`name must be set`)
+ }
+ return nil
+}
+
+func (im *ImageManifest) GetLabel(name string) (val string, ok bool) {
+ return im.Labels.Get(name)
+}
+
+func (im *ImageManifest) GetAnnotation(name string) (val string, ok bool) {
+ return im.Annotations.Get(name)
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/kind.go b/Godeps/_workspace/src/github.com/appc/spec/schema/kind.go
new file mode 100644
index 0000000..a4c547c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/kind.go
@@ -0,0 +1,42 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+type Kind struct {
+ ACVersion types.SemVer `json:"acVersion"`
+ ACKind types.ACKind `json:"acKind"`
+}
+
+type kind Kind
+
+func (k *Kind) UnmarshalJSON(data []byte) error {
+ nk := kind{}
+ err := json.Unmarshal(data, &nk)
+ if err != nil {
+ return err
+ }
+ *k = Kind(nk)
+ return nil
+}
+
+func (k Kind) MarshalJSON() ([]byte, error) {
+ return json.Marshal(kind(k))
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/doc.go b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/doc.go
new file mode 100644
index 0000000..9cc5734
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/doc.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package lastditch provides fallback redefinitions of parts of
+// schemas provided by schema package.
+//
+// Almost no validation of schemas is done (besides checking if data
+// really is `JSON`-encoded and kind is either `ImageManifest` or
+// `PodManifest`. This is to get as much data as possible from an
+// invalid manifest. The main aim of the package is to be used for the
+// better error reporting. The another aim might be to force some
+// operation (like removing a pod), which would otherwise fail because
+// of an invalid manifest.
+//
+// To avoid validation during deserialization, types provided by this
+// package use plain strings.
+package lastditch
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/image.go b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/image.go
new file mode 100644
index 0000000..3a54893
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/image.go
@@ -0,0 +1,45 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lastditch
+
+import (
+ "encoding/json"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+type ImageManifest struct {
+ ACVersion string `json:"acVersion"`
+ ACKind string `json:"acKind"`
+ Name string `json:"name"`
+ Labels Labels `json:"labels,omitempty"`
+}
+
+// a type just to avoid a recursion during unmarshalling
+type imageManifest ImageManifest
+
+func (im *ImageManifest) UnmarshalJSON(data []byte) error {
+ i := imageManifest(*im)
+ err := json.Unmarshal(data, &i)
+ if err != nil {
+ return err
+ }
+ if i.ACKind != string(schema.ImageManifestKind) {
+ return types.InvalidACKindError(schema.ImageManifestKind)
+ }
+ *im = ImageManifest(i)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/labels.go b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/labels.go
new file mode 100644
index 0000000..5cf93a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/labels.go
@@ -0,0 +1,38 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lastditch
+
+import (
+ "encoding/json"
+)
+
+type Labels []Label
+
+// a type just to avoid a recursion during unmarshalling
+type labels Labels
+
+type Label struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+func (l *Labels) UnmarshalJSON(data []byte) error {
+ var jl labels
+ if err := json.Unmarshal(data, &jl); err != nil {
+ return err
+ }
+ *l = Labels(jl)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/pod.go b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/pod.go
new file mode 100644
index 0000000..f038b4c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/lastditch/pod.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package lastditch
+
+import (
+ "encoding/json"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+type PodManifest struct {
+ ACVersion string `json:"acVersion"`
+ ACKind string `json:"acKind"`
+ Apps AppList `json:"apps"`
+}
+
+type AppList []RuntimeApp
+
+type RuntimeApp struct {
+ Name string `json:"name"`
+ Image RuntimeImage `json:"image"`
+}
+
+type RuntimeImage struct {
+ Name string `json:"name"`
+ ID string `json:"id"`
+ Labels Labels `json:"labels,omitempty"`
+}
+
+// a type just to avoid a recursion during unmarshalling
+type podManifest PodManifest
+
+func (pm *PodManifest) UnmarshalJSON(data []byte) error {
+ p := podManifest(*pm)
+ err := json.Unmarshal(data, &p)
+ if err != nil {
+ return err
+ }
+ if p.ACKind != string(schema.PodManifestKind) {
+ return types.InvalidACKindError(schema.PodManifestKind)
+ }
+ *pm = PodManifest(p)
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/pod.go b/Godeps/_workspace/src/github.com/appc/spec/schema/pod.go
new file mode 100644
index 0000000..d0fd149
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/pod.go
@@ -0,0 +1,160 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+const PodManifestKind = types.ACKind("PodManifest")
+
+type PodManifest struct {
+ ACVersion types.SemVer `json:"acVersion"`
+ ACKind types.ACKind `json:"acKind"`
+ Apps AppList `json:"apps"`
+ Volumes []types.Volume `json:"volumes"`
+ Isolators []types.Isolator `json:"isolators"`
+ Annotations types.Annotations `json:"annotations"`
+ Ports []types.ExposedPort `json:"ports"`
+}
+
+// podManifest is a model to facilitate extra validation during the
+// unmarshalling of the PodManifest
+type podManifest PodManifest
+
+func BlankPodManifest() *PodManifest {
+ return &PodManifest{ACKind: PodManifestKind, ACVersion: AppContainerVersion}
+}
+
+func (pm *PodManifest) UnmarshalJSON(data []byte) error {
+ p := podManifest(*pm)
+ err := json.Unmarshal(data, &p)
+ if err != nil {
+ return err
+ }
+ npm := PodManifest(p)
+ if err := npm.assertValid(); err != nil {
+ return err
+ }
+ *pm = npm
+ return nil
+}
+
+func (pm PodManifest) MarshalJSON() ([]byte, error) {
+ if err := pm.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(podManifest(pm))
+}
+
+var pmKindError = types.InvalidACKindError(PodManifestKind)
+
+// assertValid performs extra assertions on an PodManifest to
+// ensure that fields are set appropriately, etc. It is used exclusively when
+// marshalling and unmarshalling an PodManifest. Most
+// field-specific validation is performed through the individual types being
+// marshalled; assertValid() should only deal with higher-level validation.
+func (pm *PodManifest) assertValid() error {
+ if pm.ACKind != PodManifestKind {
+ return pmKindError
+ }
+ return nil
+}
+
+type AppList []RuntimeApp
+
+type appList AppList
+
+func (al *AppList) UnmarshalJSON(data []byte) error {
+ a := appList{}
+ err := json.Unmarshal(data, &a)
+ if err != nil {
+ return err
+ }
+ nal := AppList(a)
+ if err := nal.assertValid(); err != nil {
+ return err
+ }
+ *al = nal
+ return nil
+}
+
+func (al AppList) MarshalJSON() ([]byte, error) {
+ if err := al.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(appList(al))
+}
+
+func (al AppList) assertValid() error {
+ seen := map[types.ACName]bool{}
+ for _, a := range al {
+ if _, ok := seen[a.Name]; ok {
+ return fmt.Errorf(`duplicate apps of name %q`, a.Name)
+ }
+ seen[a.Name] = true
+ }
+ return nil
+}
+
+// Get retrieves an app by the specified name from the AppList; if there is
+// no such app, nil is returned. The returned *RuntimeApp MUST be considered
+// read-only.
+func (al AppList) Get(name types.ACName) *RuntimeApp {
+ for _, a := range al {
+ if name.Equals(a.Name) {
+ aa := a
+ return &aa
+ }
+ }
+ return nil
+}
+
+// Mount describes the mapping between a volume and the path it is mounted
+// inside of an app's filesystem.
+type Mount struct {
+ Volume types.ACName `json:"volume"`
+ Path string `json:"path"`
+}
+
+func (r Mount) assertValid() error {
+ if r.Volume.Empty() {
+ return errors.New("volume must be set")
+ }
+ if r.Path == "" {
+ return errors.New("path must be set")
+ }
+ return nil
+}
+
+// RuntimeApp describes an application referenced in a PodManifest
+type RuntimeApp struct {
+ Name types.ACName `json:"name"`
+ Image RuntimeImage `json:"image"`
+ App *types.App `json:"app,omitempty"`
+ Mounts []Mount `json:"mounts,omitempty"`
+ Annotations types.Annotations `json:"annotations,omitempty"`
+}
+
+// RuntimeImage describes an image referenced in a RuntimeApp
+type RuntimeImage struct {
+ Name *types.ACIdentifier `json:"name,omitempty"`
+ ID types.Hash `json:"id"`
+ Labels types.Labels `json:"labels,omitempty"`
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/acidentifier.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/acidentifier.go
new file mode 100644
index 0000000..904eda5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/acidentifier.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "regexp"
+ "strings"
+)
+
+var (
+ // ValidACIdentifier is a regular expression that defines a valid ACIdentifier
+ ValidACIdentifier = regexp.MustCompile("^[a-z0-9]+([-._~/][a-z0-9]+)*$")
+
+ invalidACIdentifierChars = regexp.MustCompile("[^a-z0-9-._~/]")
+ invalidACIdentifierEdges = regexp.MustCompile("(^[-._~/]+)|([-._~/]+$)")
+
+ ErrEmptyACIdentifier = ACIdentifierError("ACIdentifier cannot be empty")
+ ErrInvalidEdgeInACIdentifier = ACIdentifierError("ACIdentifier must start and end with only lower case " +
+ "alphanumeric characters")
+ ErrInvalidCharInACIdentifier = ACIdentifierError("ACIdentifier must contain only lower case " +
+ `alphanumeric characters plus "-._~/"`)
+)
+
+// ACIdentifier (an App-Container Identifier) is a format used by keys in image names
+// and image labels of the App Container Standard. An ACIdentifier is restricted to numeric
+// and lowercase URI unreserved characters defined in URI RFC[1]; all alphabetical characters
+// must be lowercase only. Furthermore, the first and last character ("edges") must be
+// alphanumeric, and an ACIdentifier cannot be empty. Programmatically, an ACIdentifier must
+// conform to the regular expression ValidACIdentifier.
+//
+// [1] http://tools.ietf.org/html/rfc3986#section-2.3
+type ACIdentifier string
+
+func (n ACIdentifier) String() string {
+ return string(n)
+}
+
+// Set sets the ACIdentifier to the given value, if it is valid; if not,
+// an error is returned.
+func (n *ACIdentifier) Set(s string) error {
+ nn, err := NewACIdentifier(s)
+ if err == nil {
+ *n = *nn
+ }
+ return err
+}
+
+// Equals checks whether a given ACIdentifier is equal to this one.
+func (n ACIdentifier) Equals(o ACIdentifier) bool {
+ return strings.ToLower(string(n)) == strings.ToLower(string(o))
+}
+
+// Empty returns a boolean indicating whether this ACIdentifier is empty.
+func (n ACIdentifier) Empty() bool {
+ return n.String() == ""
+}
+
+// NewACIdentifier generates a new ACIdentifier from a string. If the given string is
+// not a valid ACIdentifier, nil and an error are returned.
+func NewACIdentifier(s string) (*ACIdentifier, error) {
+ n := ACIdentifier(s)
+ if err := n.assertValid(); err != nil {
+ return nil, err
+ }
+ return &n, nil
+}
+
+// MustACIdentifier generates a new ACIdentifier from a string, If the given string is
+// not a valid ACIdentifier, it panics.
+func MustACIdentifier(s string) *ACIdentifier {
+ n, err := NewACIdentifier(s)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (n ACIdentifier) assertValid() error {
+ s := string(n)
+ if len(s) == 0 {
+ return ErrEmptyACIdentifier
+ }
+ if invalidACIdentifierChars.MatchString(s) {
+ return ErrInvalidCharInACIdentifier
+ }
+ if invalidACIdentifierEdges.MatchString(s) {
+ return ErrInvalidEdgeInACIdentifier
+ }
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (n *ACIdentifier) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ nn, err := NewACIdentifier(s)
+ if err != nil {
+ return err
+ }
+ *n = *nn
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (n ACIdentifier) MarshalJSON() ([]byte, error) {
+ if err := n.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(n.String())
+}
+
+// SanitizeACIdentifier replaces every invalid ACIdentifier character in s with an underscore
+// making it a legal ACIdentifier string. If the character is an upper case letter it
+// replaces it with its lower case. It also removes illegal edge characters
+// (hyphens, period, underscore, tilde and slash).
+//
+// This is a helper function and its algorithm is not part of the spec. It
+// should not be called without the user explicitly asking for a suggestion.
+func SanitizeACIdentifier(s string) (string, error) {
+ s = strings.ToLower(s)
+ s = invalidACIdentifierChars.ReplaceAllString(s, "_")
+ s = invalidACIdentifierEdges.ReplaceAllString(s, "")
+
+ if s == "" {
+ return "", errors.New("must contain at least one valid character")
+ }
+
+ return s, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/ackind.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/ackind.go
new file mode 100644
index 0000000..1793ca8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/ackind.go
@@ -0,0 +1,67 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+var (
+ ErrNoACKind = ACKindError("ACKind must be set")
+)
+
+// ACKind wraps a string to define a field which must be set with one of
+// several ACKind values. If it is unset, or has an invalid value, the field
+// will refuse to marshal/unmarshal.
+type ACKind string
+
+func (a ACKind) String() string {
+ return string(a)
+}
+
+func (a ACKind) assertValid() error {
+ s := a.String()
+ switch s {
+ case "ImageManifest", "PodManifest":
+ return nil
+ case "":
+ return ErrNoACKind
+ default:
+ msg := fmt.Sprintf("bad ACKind: %s", s)
+ return ACKindError(msg)
+ }
+}
+
+func (a ACKind) MarshalJSON() ([]byte, error) {
+ if err := a.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(a.String())
+}
+
+func (a *ACKind) UnmarshalJSON(data []byte) error {
+ var s string
+ err := json.Unmarshal(data, &s)
+ if err != nil {
+ return err
+ }
+ na := ACKind(s)
+ if err := na.assertValid(); err != nil {
+ return err
+ }
+ *a = na
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/acname.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/acname.go
new file mode 100644
index 0000000..5ececff
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/acname.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "regexp"
+ "strings"
+)
+
+var (
+ // ValidACName is a regular expression that defines a valid ACName
+ ValidACName = regexp.MustCompile("^[a-z0-9]+([-][a-z0-9]+)*$")
+
+ invalidACNameChars = regexp.MustCompile("[^a-z0-9-]")
+ invalidACNameEdges = regexp.MustCompile("(^[-]+)|([-]+$)")
+
+ ErrEmptyACName = ACNameError("ACName cannot be empty")
+ ErrInvalidEdgeInACName = ACNameError("ACName must start and end with only lower case " +
+ "alphanumeric characters")
+ ErrInvalidCharInACName = ACNameError("ACName must contain only lower case " +
+ `alphanumeric characters plus "-"`)
+)
+
+// ACName (an App-Container Name) is a format used by keys in different formats
+// of the App Container Standard. An ACName is restricted to numeric and lowercase
+// characters accepted by the DNS RFC[1] plus "-"; all alphabetical characters must
+// be lowercase only. Furthermore, the first and last character ("edges") must be
+// alphanumeric, and an ACName cannot be empty. Programmatically, an ACName must
+// conform to the regular expression ValidACName.
+//
+// [1] http://tools.ietf.org/html/rfc1123#page-13
+type ACName string
+
+func (n ACName) String() string {
+ return string(n)
+}
+
+// Set sets the ACName to the given value, if it is valid; if not,
+// an error is returned.
+func (n *ACName) Set(s string) error {
+ nn, err := NewACName(s)
+ if err == nil {
+ *n = *nn
+ }
+ return err
+}
+
+// Equals checks whether a given ACName is equal to this one.
+func (n ACName) Equals(o ACName) bool {
+ return strings.ToLower(string(n)) == strings.ToLower(string(o))
+}
+
+// Empty returns a boolean indicating whether this ACName is empty.
+func (n ACName) Empty() bool {
+ return n.String() == ""
+}
+
+// NewACName generates a new ACName from a string. If the given string is
+// not a valid ACName, nil and an error are returned.
+func NewACName(s string) (*ACName, error) {
+ n := ACName(s)
+ if err := n.assertValid(); err != nil {
+ return nil, err
+ }
+ return &n, nil
+}
+
+// MustACName generates a new ACName from a string, If the given string is
+// not a valid ACName, it panics.
+func MustACName(s string) *ACName {
+ n, err := NewACName(s)
+ if err != nil {
+ panic(err)
+ }
+ return n
+}
+
+func (n ACName) assertValid() error {
+ s := string(n)
+ if len(s) == 0 {
+ return ErrEmptyACName
+ }
+ if invalidACNameChars.MatchString(s) {
+ return ErrInvalidCharInACName
+ }
+ if invalidACNameEdges.MatchString(s) {
+ return ErrInvalidEdgeInACName
+ }
+ return nil
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (n *ACName) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ nn, err := NewACName(s)
+ if err != nil {
+ return err
+ }
+ *n = *nn
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (n ACName) MarshalJSON() ([]byte, error) {
+ if err := n.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(n.String())
+}
+
+// SanitizeACName replaces every invalid ACName character in s with a dash
+// making it a legal ACName string. If the character is an upper case letter it
+// replaces it with its lower case. It also removes illegal edge characters
+// (hyphens).
+//
+// This is a helper function and its algorithm is not part of the spec. It
+// should not be called without the user explicitly asking for a suggestion.
+func SanitizeACName(s string) (string, error) {
+ s = strings.ToLower(s)
+ s = invalidACNameChars.ReplaceAllString(s, "-")
+ s = invalidACNameEdges.ReplaceAllString(s, "")
+
+ if s == "" {
+ return "", errors.New("must contain at least one valid character")
+ }
+
+ return s, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/annotations.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/annotations.go
new file mode 100644
index 0000000..ce7743b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/annotations.go
@@ -0,0 +1,106 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+)
+
+type Annotations []Annotation
+
+type annotations Annotations
+
+type Annotation struct {
+ Name ACIdentifier `json:"name"`
+ Value string `json:"value"`
+}
+
+func (a Annotations) assertValid() error {
+ seen := map[ACIdentifier]string{}
+ for _, anno := range a {
+ _, ok := seen[anno.Name]
+ if ok {
+ return fmt.Errorf(`duplicate annotations of name %q`, anno.Name)
+ }
+ seen[anno.Name] = anno.Value
+ }
+ if c, ok := seen["created"]; ok {
+ if _, err := NewDate(c); err != nil {
+ return err
+ }
+ }
+ if h, ok := seen["homepage"]; ok {
+ if _, err := NewURL(h); err != nil {
+ return err
+ }
+ }
+ if d, ok := seen["documentation"]; ok {
+ if _, err := NewURL(d); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (a Annotations) MarshalJSON() ([]byte, error) {
+ if err := a.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(annotations(a))
+}
+
+func (a *Annotations) UnmarshalJSON(data []byte) error {
+ var ja annotations
+ if err := json.Unmarshal(data, &ja); err != nil {
+ return err
+ }
+ na := Annotations(ja)
+ if err := na.assertValid(); err != nil {
+ return err
+ }
+ *a = na
+ return nil
+}
+
+// Retrieve the value of an annotation by the given name from Annotations, if
+// it exists.
+func (a Annotations) Get(name string) (val string, ok bool) {
+ for _, anno := range a {
+ if anno.Name.String() == name {
+ return anno.Value, true
+ }
+ }
+ return "", false
+}
+
+// Set sets the value of an annotation by the given name, overwriting if one already exists.
+func (a *Annotations) Set(name ACIdentifier, value string) {
+ for i, anno := range *a {
+ if anno.Name.Equals(name) {
+ (*a)[i] = Annotation{
+ Name: name,
+ Value: value,
+ }
+ return
+ }
+ }
+ anno := Annotation{
+ Name: name,
+ Value: value,
+ }
+ *a = append(*a, anno)
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/app.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/app.go
new file mode 100644
index 0000000..df13bf1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/app.go
@@ -0,0 +1,90 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "path"
+)
+
+type App struct {
+ Exec Exec `json:"exec"`
+ EventHandlers []EventHandler `json:"eventHandlers,omitempty"`
+ User string `json:"user"`
+ Group string `json:"group"`
+ SupplementaryGIDs []int `json:"supplementaryGIDs,omitempty"`
+ WorkingDirectory string `json:"workingDirectory,omitempty"`
+ Environment Environment `json:"environment,omitempty"`
+ MountPoints []MountPoint `json:"mountPoints,omitempty"`
+ Ports []Port `json:"ports,omitempty"`
+ Isolators Isolators `json:"isolators,omitempty"`
+}
+
+// app is a model to facilitate extra validation during the
+// unmarshalling of the App
+type app App
+
+func (a *App) UnmarshalJSON(data []byte) error {
+ ja := app(*a)
+ err := json.Unmarshal(data, &ja)
+ if err != nil {
+ return err
+ }
+ na := App(ja)
+ if err := na.assertValid(); err != nil {
+ return err
+ }
+ if na.Environment == nil {
+ na.Environment = make(Environment, 0)
+ }
+ *a = na
+ return nil
+}
+
+func (a App) MarshalJSON() ([]byte, error) {
+ if err := a.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(app(a))
+}
+
+func (a *App) assertValid() error {
+ if err := a.Exec.assertValid(); err != nil {
+ return err
+ }
+ if a.User == "" {
+ return errors.New(`user is required`)
+ }
+ if a.Group == "" {
+ return errors.New(`group is required`)
+ }
+ if !path.IsAbs(a.WorkingDirectory) && a.WorkingDirectory != "" {
+ return errors.New("workingDirectory must be an absolute path")
+ }
+ eh := make(map[string]bool)
+ for _, e := range a.EventHandlers {
+ name := e.Name
+ if eh[name] {
+ return fmt.Errorf("Only one eventHandler of name %q allowed", name)
+ }
+ eh[name] = true
+ }
+ if err := a.Environment.assertValid(); err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/date.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/date.go
new file mode 100644
index 0000000..4458bf4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/date.go
@@ -0,0 +1,60 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+)
+
+// Date wraps time.Time to marshal/unmarshal to/from JSON strings in strict
+// accordance with RFC3339
+// TODO(jonboulle): golang's implementation seems slightly buggy here;
+// according to http://tools.ietf.org/html/rfc3339#section-5.6 , applications
+// may choose to separate the date and time with a space instead of a T
+// character (for example, `date --rfc-3339` on GNU coreutils) - but this is
+// considered an error by go's parser. File a bug?
+type Date time.Time
+
+func NewDate(s string) (*Date, error) {
+ t, err := time.Parse(time.RFC3339, s)
+ if err != nil {
+ return nil, fmt.Errorf("bad Date: %v", err)
+ }
+ d := Date(t)
+ return &d, nil
+}
+
+func (d Date) String() string {
+ return time.Time(d).Format(time.RFC3339)
+}
+
+func (d *Date) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ nd, err := NewDate(s)
+ if err != nil {
+ return err
+ }
+ *d = *nd
+ return nil
+}
+
+func (d Date) MarshalJSON() ([]byte, error) {
+ return json.Marshal(d.String())
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/dependencies.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/dependencies.go
new file mode 100644
index 0000000..fb399e4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/dependencies.go
@@ -0,0 +1,58 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+type Dependencies []Dependency
+
+type Dependency struct {
+ ImageName ACIdentifier `json:"imageName"`
+ ImageID *Hash `json:"imageID,omitempty"`
+ Labels Labels `json:"labels,omitempty"`
+ Size uint `json:"size,omitempty"`
+}
+
+type dependency Dependency
+
+func (d Dependency) assertValid() error {
+ if len(d.ImageName) < 1 {
+ return errors.New(`imageName cannot be empty`)
+ }
+ return nil
+}
+
+func (d Dependency) MarshalJSON() ([]byte, error) {
+ if err := d.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(dependency(d))
+}
+
+func (d *Dependency) UnmarshalJSON(data []byte) error {
+ var jd dependency
+ if err := json.Unmarshal(data, &jd); err != nil {
+ return err
+ }
+ nd := Dependency(jd)
+ if err := nd.assertValid(); err != nil {
+ return err
+ }
+ *d = nd
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/doc.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/doc.go
new file mode 100644
index 0000000..9c54085
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/doc.go
@@ -0,0 +1,18 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types contains structs representing the various types in the app
+// container specification. It is used by the [schema manifest types](../)
+// to enforce validation.
+package types
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/environment.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/environment.go
new file mode 100644
index 0000000..f152a6b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/environment.go
@@ -0,0 +1,110 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "regexp"
+)
+
+var (
+ envPattern = regexp.MustCompile("^[A-Za-z_][A-Za-z_0-9]*$")
+)
+
+type Environment []EnvironmentVariable
+
+type environment Environment
+
+type EnvironmentVariable struct {
+ Name string `json:"name"`
+ Value string `json:"value"`
+}
+
+func (ev EnvironmentVariable) assertValid() error {
+ if len(ev.Name) == 0 {
+ return fmt.Errorf(`environment variable name must not be empty`)
+ }
+ if !envPattern.MatchString(ev.Name) {
+ return fmt.Errorf(`environment variable does not have valid identifier %q`, ev.Name)
+ }
+ return nil
+}
+
+func (e Environment) assertValid() error {
+ seen := map[string]bool{}
+ for _, env := range e {
+ if err := env.assertValid(); err != nil {
+ return err
+ }
+ _, ok := seen[env.Name]
+ if ok {
+ return fmt.Errorf(`duplicate environment variable of name %q`, env.Name)
+ }
+ seen[env.Name] = true
+ }
+
+ return nil
+}
+
+func (e Environment) MarshalJSON() ([]byte, error) {
+ if err := e.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(environment(e))
+}
+
+func (e *Environment) UnmarshalJSON(data []byte) error {
+ var je environment
+ if err := json.Unmarshal(data, &je); err != nil {
+ return err
+ }
+ ne := Environment(je)
+ if err := ne.assertValid(); err != nil {
+ return err
+ }
+ *e = ne
+ return nil
+}
+
+// Retrieve the value of an environment variable by the given name from
+// Environment, if it exists.
+func (e Environment) Get(name string) (value string, ok bool) {
+ for _, env := range e {
+ if env.Name == name {
+ return env.Value, true
+ }
+ }
+ return "", false
+}
+
+// Set sets the value of an environment variable by the given name,
+// overwriting if one already exists.
+func (e *Environment) Set(name string, value string) {
+ for i, env := range *e {
+ if env.Name == name {
+ (*e)[i] = EnvironmentVariable{
+ Name: name,
+ Value: value,
+ }
+ return
+ }
+ }
+ env := EnvironmentVariable{
+ Name: name,
+ Value: value,
+ }
+ *e = append(*e, env)
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/errors.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/errors.go
new file mode 100644
index 0000000..bb46515
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/errors.go
@@ -0,0 +1,49 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import "fmt"
+
+// An ACKindError is returned when the wrong ACKind is set in a manifest
+type ACKindError string
+
+func (e ACKindError) Error() string {
+ return string(e)
+}
+
+func InvalidACKindError(kind ACKind) ACKindError {
+ return ACKindError(fmt.Sprintf("missing or bad ACKind (must be %#v)", kind))
+}
+
+// An ACVersionError is returned when a bad ACVersion is set in a manifest
+type ACVersionError string
+
+func (e ACVersionError) Error() string {
+ return string(e)
+}
+
+// An ACIdentifierError is returned when a bad value is used for an ACIdentifier
+type ACIdentifierError string
+
+func (e ACIdentifierError) Error() string {
+ return string(e)
+}
+
+// An ACNameError is returned when a bad value is used for an ACName
+type ACNameError string
+
+func (e ACNameError) Error() string {
+ return string(e)
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/event_handler.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/event_handler.go
new file mode 100644
index 0000000..f40c642
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/event_handler.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+)
+
+type EventHandler struct {
+ Name string `json:"name"`
+ Exec Exec `json:"exec"`
+}
+
+type eventHandler EventHandler
+
+func (e EventHandler) assertValid() error {
+ s := e.Name
+ switch s {
+ case "pre-start", "post-stop":
+ return nil
+ case "":
+ return errors.New(`eventHandler "name" cannot be empty`)
+ default:
+ return fmt.Errorf(`bad eventHandler "name": %q`, s)
+ }
+}
+
+func (e EventHandler) MarshalJSON() ([]byte, error) {
+ if err := e.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(eventHandler(e))
+}
+
+func (e *EventHandler) UnmarshalJSON(data []byte) error {
+ var je eventHandler
+ err := json.Unmarshal(data, &je)
+ if err != nil {
+ return err
+ }
+ ne := EventHandler(je)
+ if err := ne.assertValid(); err != nil {
+ return err
+ }
+ *e = ne
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/exec.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/exec.go
new file mode 100644
index 0000000..b05f672
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/exec.go
@@ -0,0 +1,53 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "path/filepath"
+)
+
+type Exec []string
+
+type exec Exec
+
+func (e Exec) assertValid() error {
+ if len(e) > 0 && !filepath.IsAbs(e[0]) {
+ return errors.New(`exec[0] must be absolute path`)
+ }
+ return nil
+}
+
+func (e Exec) MarshalJSON() ([]byte, error) {
+ if err := e.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(exec(e))
+}
+
+func (e *Exec) UnmarshalJSON(data []byte) error {
+ var je exec
+ err := json.Unmarshal(data, &je)
+ if err != nil {
+ return err
+ }
+ ne := Exec(je)
+ if err := ne.assertValid(); err != nil {
+ return err
+ }
+ *e = ne
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/hash.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/hash.go
new file mode 100644
index 0000000..1c060a4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/hash.go
@@ -0,0 +1,118 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "crypto/sha512"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+const (
+ maxHashSize = (sha512.Size / 2) + len("sha512-")
+)
+
+// Hash encodes a hash specified in a string of the form:
+// "-"
+// for example
+// "sha512-06c733b1838136838e6d2d3e8fa5aea4c7905e92[...]"
+// Valid types are currently:
+// * sha512
+type Hash struct {
+ typ string
+ Val string
+}
+
+func NewHash(s string) (*Hash, error) {
+ elems := strings.Split(s, "-")
+ if len(elems) != 2 {
+ return nil, errors.New("badly formatted hash string")
+ }
+ nh := Hash{
+ typ: elems[0],
+ Val: elems[1],
+ }
+ if err := nh.assertValid(); err != nil {
+ return nil, err
+ }
+ return &nh, nil
+}
+
+func (h Hash) String() string {
+ return fmt.Sprintf("%s-%s", h.typ, h.Val)
+}
+
+func (h *Hash) Set(s string) error {
+ nh, err := NewHash(s)
+ if err == nil {
+ *h = *nh
+ }
+ return err
+}
+
+func (h Hash) Empty() bool {
+ return reflect.DeepEqual(h, Hash{})
+}
+
+func (h Hash) assertValid() error {
+ switch h.typ {
+ case "sha512":
+ case "":
+ return fmt.Errorf("unexpected empty hash type")
+ default:
+ return fmt.Errorf("unrecognized hash type: %v", h.typ)
+ }
+ if h.Val == "" {
+ return fmt.Errorf("unexpected empty hash value")
+ }
+ return nil
+}
+
+func (h *Hash) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ nh, err := NewHash(s)
+ if err != nil {
+ return err
+ }
+ *h = *nh
+ return nil
+}
+
+func (h Hash) MarshalJSON() ([]byte, error) {
+ if err := h.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(h.String())
+}
+
+func NewHashSHA512(b []byte) *Hash {
+ h := sha512.New()
+ h.Write(b)
+ nh, _ := NewHash(fmt.Sprintf("sha512-%x", h.Sum(nil)))
+ return nh
+}
+
+func ShortHash(hash string) string {
+ if len(hash) > maxHashSize {
+ return hash[:maxHashSize]
+ }
+ return hash
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator.go
new file mode 100644
index 0000000..ecdab00
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+)
+
+var (
+ isolatorMap map[ACIdentifier]IsolatorValueConstructor
+)
+
+func init() {
+ isolatorMap = make(map[ACIdentifier]IsolatorValueConstructor)
+}
+
+type IsolatorValueConstructor func() IsolatorValue
+
+func AddIsolatorValueConstructor(n ACIdentifier, i IsolatorValueConstructor) {
+ isolatorMap[n] = i
+}
+
+func AddIsolatorName(n ACIdentifier, ns map[ACIdentifier]struct{}) {
+ ns[n] = struct{}{}
+}
+
+// Isolators encapsulates a list of individual Isolators for the ImageManifest
+// and PodManifest schemas.
+type Isolators []Isolator
+
+// GetByName returns the last isolator in the list by the given name.
+func (is *Isolators) GetByName(name ACIdentifier) *Isolator {
+ var i Isolator
+ for j := len(*is) - 1; j >= 0; j-- {
+ i = []Isolator(*is)[j]
+ if i.Name == name {
+ return &i
+ }
+ }
+ return nil
+}
+
+// Unrecognized returns a set of isolators that are not recognized.
+// An isolator is not recognized if it has not had an associated
+// constructor registered with AddIsolatorValueConstructor.
+func (is *Isolators) Unrecognized() Isolators {
+ u := Isolators{}
+ for _, i := range *is {
+ if i.value == nil {
+ u = append(u, i)
+ }
+ }
+ return u
+}
+
+// IsolatorValue encapsulates the actual value of an Isolator which may be
+// serialized as any arbitrary JSON blob. Specific Isolator types should
+// implement this interface to facilitate unmarshalling and validation.
+type IsolatorValue interface {
+ UnmarshalJSON(b []byte) error
+ AssertValid() error
+}
+
+// Isolator is a model for unmarshalling isolator types from their JSON-encoded
+// representation.
+type Isolator struct {
+ // Name is the name of the Isolator type as defined in the specification.
+ Name ACIdentifier `json:"name"`
+ // ValueRaw captures the raw JSON value of an Isolator that was
+ // unmarshalled. This field is used for unmarshalling only. It MUST NOT
+ // be referenced by external users of the Isolator struct. It is
+ // exported only to satisfy Go's unfortunate requirement that fields
+ // must be capitalized to be unmarshalled successfully.
+ ValueRaw *json.RawMessage `json:"value"`
+ // value captures the "true" value of the isolator.
+ value IsolatorValue
+}
+
+// isolator is a shadow type used for unmarshalling.
+type isolator Isolator
+
+// Value returns the raw Value of this Isolator. Users should perform a type
+// switch/assertion on this value to extract the underlying isolator type.
+func (i *Isolator) Value() IsolatorValue {
+ return i.value
+}
+
+// UnmarshalJSON populates this Isolator from a JSON-encoded representation. To
+// unmarshal the Value of the Isolator, it will use the appropriate constructor
+// as registered by AddIsolatorValueConstructor.
+func (i *Isolator) UnmarshalJSON(b []byte) error {
+ var ii isolator
+ err := json.Unmarshal(b, &ii)
+ if err != nil {
+ return err
+ }
+
+ var dst IsolatorValue
+ con, ok := isolatorMap[ii.Name]
+ if ok {
+ dst = con()
+ err = dst.UnmarshalJSON(*ii.ValueRaw)
+ if err != nil {
+ return err
+ }
+ err = dst.AssertValid()
+ if err != nil {
+ return err
+ }
+ }
+
+ i.value = dst
+ i.ValueRaw = ii.ValueRaw
+ i.Name = ii.Name
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_linux_specific.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_linux_specific.go
new file mode 100644
index 0000000..ae4bfee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_linux_specific.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+)
+
+const (
+ LinuxCapabilitiesRetainSetName = "os/linux/capabilities-retain-set"
+ LinuxCapabilitiesRevokeSetName = "os/linux/capabilities-remove-set"
+)
+
+var LinuxIsolatorNames = make(map[ACIdentifier]struct{})
+
+func init() {
+ for name, con := range map[ACIdentifier]IsolatorValueConstructor{
+ LinuxCapabilitiesRevokeSetName: func() IsolatorValue { return &LinuxCapabilitiesRevokeSet{} },
+ LinuxCapabilitiesRetainSetName: func() IsolatorValue { return &LinuxCapabilitiesRetainSet{} },
+ } {
+ AddIsolatorName(name, LinuxIsolatorNames)
+ AddIsolatorValueConstructor(name, con)
+ }
+}
+
+type LinuxCapabilitiesSet interface {
+ Set() []LinuxCapability
+ AssertValid() error
+}
+
+type LinuxCapability string
+
+type linuxCapabilitiesSetValue struct {
+ Set []LinuxCapability `json:"set"`
+}
+
+type linuxCapabilitiesSetBase struct {
+ val linuxCapabilitiesSetValue
+}
+
+func (l linuxCapabilitiesSetBase) AssertValid() error {
+ if len(l.val.Set) == 0 {
+ return errors.New("set must be non-empty")
+ }
+ return nil
+}
+
+func (l *linuxCapabilitiesSetBase) UnmarshalJSON(b []byte) error {
+ var v linuxCapabilitiesSetValue
+ err := json.Unmarshal(b, &v)
+ if err != nil {
+ return err
+ }
+
+ l.val = v
+
+ return err
+}
+
+func (l linuxCapabilitiesSetBase) Set() []LinuxCapability {
+ return l.val.Set
+}
+
+type LinuxCapabilitiesRetainSet struct {
+ linuxCapabilitiesSetBase
+}
+
+func NewLinuxCapabilitiesRetainSet(caps ...string) (*LinuxCapabilitiesRetainSet, error) {
+ l := LinuxCapabilitiesRetainSet{
+ linuxCapabilitiesSetBase{
+ linuxCapabilitiesSetValue{
+ make([]LinuxCapability, len(caps)),
+ },
+ },
+ }
+ for i, c := range caps {
+ l.linuxCapabilitiesSetBase.val.Set[i] = LinuxCapability(c)
+ }
+ if err := l.AssertValid(); err != nil {
+ return nil, err
+ }
+ return &l, nil
+}
+
+func (l LinuxCapabilitiesRetainSet) AsIsolator() Isolator {
+ b, err := json.Marshal(l)
+ if err != nil {
+ panic(err)
+ }
+ rm := json.RawMessage(b)
+ return Isolator{
+ Name: LinuxCapabilitiesRetainSetName,
+ ValueRaw: &rm,
+ value: &l,
+ }
+}
+
+type LinuxCapabilitiesRevokeSet struct {
+ linuxCapabilitiesSetBase
+}
+
+func NewLinuxCapabilitiesRevokeSet(caps ...string) (*LinuxCapabilitiesRevokeSet, error) {
+ l := LinuxCapabilitiesRevokeSet{
+ linuxCapabilitiesSetBase{
+ linuxCapabilitiesSetValue{
+ make([]LinuxCapability, len(caps)),
+ },
+ },
+ }
+ for i, c := range caps {
+ l.linuxCapabilitiesSetBase.val.Set[i] = LinuxCapability(c)
+ }
+ if err := l.AssertValid(); err != nil {
+ return nil, err
+ }
+ return &l, nil
+}
+
+func (l LinuxCapabilitiesRevokeSet) AsIsolator() Isolator {
+ b, err := json.Marshal(l)
+ if err != nil {
+ panic(err)
+ }
+ rm := json.RawMessage(b)
+ return Isolator{
+ Name: LinuxCapabilitiesRevokeSetName,
+ ValueRaw: &rm,
+ value: &l,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_resources.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_resources.go
new file mode 100644
index 0000000..fff4abd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/isolator_resources.go
@@ -0,0 +1,206 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource"
+)
+
+var (
+ ErrDefaultTrue = errors.New("default must be false")
+ ErrDefaultRequired = errors.New("default must be true")
+ ErrRequestNonEmpty = errors.New("request not supported by this resource, must be empty")
+
+ ResourceIsolatorNames = make(map[ACIdentifier]struct{})
+)
+
+const (
+ ResourceBlockBandwidthName = "resource/block-bandwidth"
+ ResourceBlockIOPSName = "resource/block-iops"
+ ResourceCPUName = "resource/cpu"
+ ResourceMemoryName = "resource/memory"
+ ResourceNetworkBandwidthName = "resource/network-bandwidth"
+)
+
+func init() {
+ for name, con := range map[ACIdentifier]IsolatorValueConstructor{
+ ResourceBlockBandwidthName: func() IsolatorValue { return &ResourceBlockBandwidth{} },
+ ResourceBlockIOPSName: func() IsolatorValue { return &ResourceBlockIOPS{} },
+ ResourceCPUName: func() IsolatorValue { return &ResourceCPU{} },
+ ResourceMemoryName: func() IsolatorValue { return &ResourceMemory{} },
+ ResourceNetworkBandwidthName: func() IsolatorValue { return &ResourceNetworkBandwidth{} },
+ } {
+ AddIsolatorName(name, ResourceIsolatorNames)
+ AddIsolatorValueConstructor(name, con)
+ }
+}
+
+type Resource interface {
+ Limit() *resource.Quantity
+ Request() *resource.Quantity
+ Default() bool
+}
+
+type ResourceBase struct {
+ val resourceValue
+}
+
+type resourceValue struct {
+ Default bool `json:"default"`
+ Request *resource.Quantity `json:"request"`
+ Limit *resource.Quantity `json:"limit"`
+}
+
+func (r ResourceBase) Limit() *resource.Quantity {
+ return r.val.Limit
+}
+func (r ResourceBase) Request() *resource.Quantity {
+ return r.val.Request
+}
+func (r ResourceBase) Default() bool {
+ return r.val.Default
+}
+
+func (r *ResourceBase) UnmarshalJSON(b []byte) error {
+ return json.Unmarshal(b, &r.val)
+}
+
+func (r ResourceBase) AssertValid() error {
+ return nil
+}
+
+type ResourceBlockBandwidth struct {
+ ResourceBase
+}
+
+func (r ResourceBlockBandwidth) AssertValid() error {
+ if r.Default() != true {
+ return ErrDefaultRequired
+ }
+ if r.Request() != nil {
+ return ErrRequestNonEmpty
+ }
+ return nil
+}
+
+type ResourceBlockIOPS struct {
+ ResourceBase
+}
+
+func (r ResourceBlockIOPS) AssertValid() error {
+ if r.Default() != true {
+ return ErrDefaultRequired
+ }
+ if r.Request() != nil {
+ return ErrRequestNonEmpty
+ }
+ return nil
+}
+
+type ResourceCPU struct {
+ ResourceBase
+}
+
+func (r ResourceCPU) String() string {
+ return fmt.Sprintf("ResourceCPU(request=%s, limit=%s)", r.Request(), r.Limit())
+}
+
+func (r ResourceCPU) AssertValid() error {
+ if r.Default() != false {
+ return ErrDefaultTrue
+ }
+ return nil
+}
+
+func NewResourceCPUIsolator(request, limit string) (*ResourceCPU, error) {
+ req, err := resource.ParseQuantity(request)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing request: %v", err)
+ }
+ lim, err := resource.ParseQuantity(limit)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing limit: %v", err)
+ }
+ res := &ResourceCPU{
+ ResourceBase{
+ resourceValue{
+ Request: req,
+ Limit: lim,
+ },
+ },
+ }
+ if err := res.AssertValid(); err != nil {
+ // should never happen
+ return nil, err
+ }
+ return res, nil
+}
+
+type ResourceMemory struct {
+ ResourceBase
+}
+
+func (r ResourceMemory) String() string {
+ return fmt.Sprintf("ResourceMemory(request=%s, limit=%s)", r.Request(), r.Limit())
+}
+
+func (r ResourceMemory) AssertValid() error {
+ if r.Default() != false {
+ return ErrDefaultTrue
+ }
+ return nil
+}
+
+func NewResourceMemoryIsolator(request, limit string) (*ResourceMemory, error) {
+ req, err := resource.ParseQuantity(request)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing request: %v", err)
+ }
+ lim, err := resource.ParseQuantity(limit)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing limit: %v", err)
+ }
+ res := &ResourceMemory{
+ ResourceBase{
+ resourceValue{
+ Request: req,
+ Limit: lim,
+ },
+ },
+ }
+ if err := res.AssertValid(); err != nil {
+ // should never happen
+ return nil, err
+ }
+ return res, nil
+}
+
+type ResourceNetworkBandwidth struct {
+ ResourceBase
+}
+
+func (r ResourceNetworkBandwidth) AssertValid() error {
+ if r.Default() != true {
+ return ErrDefaultRequired
+ }
+ if r.Request() != nil {
+ return ErrRequestNonEmpty
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/labels.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/labels.go
new file mode 100644
index 0000000..ebd2bb1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/labels.go
@@ -0,0 +1,134 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "sort"
+)
+
+var ValidOSArch = map[string][]string{
+ "linux": {"amd64", "i386", "aarch64", "aarch64_be", "armv6l", "armv7l", "armv7b"},
+ "freebsd": {"amd64", "i386", "arm"},
+ "darwin": {"x86_64", "i386"},
+}
+
+type Labels []Label
+
+type labels Labels
+
+type Label struct {
+ Name ACIdentifier `json:"name"`
+ Value string `json:"value"`
+}
+
+// IsValidOsArch checks if a OS-architecture combination is valid given a map
+// of valid OS-architectures
+func IsValidOSArch(labels map[ACIdentifier]string, validOSArch map[string][]string) error {
+ if os, ok := labels["os"]; ok {
+ if validArchs, ok := validOSArch[os]; !ok {
+ // Not a whitelisted OS. TODO: how to warn rather than fail?
+ validOses := make([]string, 0, len(validOSArch))
+ for validOs := range validOSArch {
+ validOses = append(validOses, validOs)
+ }
+ sort.Strings(validOses)
+ return fmt.Errorf(`bad os %#v (must be one of: %v)`, os, validOses)
+ } else {
+ // Whitelisted OS. We check arch here, as arch makes sense only
+ // when os is defined.
+ if arch, ok := labels["arch"]; ok {
+ found := false
+ for _, validArch := range validArchs {
+ if arch == validArch {
+ found = true
+ break
+ }
+ }
+ if !found {
+ return fmt.Errorf(`bad arch %#v for %v (must be one of: %v)`, arch, os, validArchs)
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func (l Labels) assertValid() error {
+ seen := map[ACIdentifier]string{}
+ for _, lbl := range l {
+ if lbl.Name == "name" {
+ return fmt.Errorf(`invalid label name: "name"`)
+ }
+ _, ok := seen[lbl.Name]
+ if ok {
+ return fmt.Errorf(`duplicate labels of name %q`, lbl.Name)
+ }
+ seen[lbl.Name] = lbl.Value
+ }
+ return IsValidOSArch(seen, ValidOSArch)
+}
+
+func (l Labels) MarshalJSON() ([]byte, error) {
+ if err := l.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(labels(l))
+}
+
+func (l *Labels) UnmarshalJSON(data []byte) error {
+ var jl labels
+ if err := json.Unmarshal(data, &jl); err != nil {
+ return err
+ }
+ nl := Labels(jl)
+ if err := nl.assertValid(); err != nil {
+ return err
+ }
+ *l = nl
+ return nil
+}
+
+// Get retrieves the value of the label by the given name from Labels, if it exists
+func (l Labels) Get(name string) (val string, ok bool) {
+ for _, lbl := range l {
+ if lbl.Name.String() == name {
+ return lbl.Value, true
+ }
+ }
+ return "", false
+}
+
+// ToMap creates a map[ACIdentifier]string.
+func (l Labels) ToMap() map[ACIdentifier]string {
+ labelsMap := make(map[ACIdentifier]string)
+ for _, lbl := range l {
+ labelsMap[lbl.Name] = lbl.Value
+ }
+ return labelsMap
+}
+
+// LabelsFromMap creates Labels from a map[ACIdentifier]string
+func LabelsFromMap(labelsMap map[ACIdentifier]string) (Labels, error) {
+ labels := Labels{}
+ for n, v := range labelsMap {
+ labels = append(labels, Label{Name: n, Value: v})
+ }
+ if err := labels.assertValid(); err != nil {
+ return nil, err
+ }
+ return labels, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/mountpoint.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/mountpoint.go
new file mode 100644
index 0000000..69ff114
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/mountpoint.go
@@ -0,0 +1,85 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+type MountPoint struct {
+ Name ACName `json:"name"`
+ Path string `json:"path"`
+ ReadOnly bool `json:"readOnly,omitempty"`
+}
+
+func (mount MountPoint) assertValid() error {
+ if mount.Name.Empty() {
+ return errors.New("name must be set")
+ }
+ if len(mount.Path) == 0 {
+ return errors.New("path must be set")
+ }
+ return nil
+}
+
+// MountPointFromString takes a command line mountpoint parameter and returns a mountpoint
+//
+// It is useful for actool patch-manifest --mounts
+//
+// Example mountpoint parameters:
+// database,path=/tmp,readOnly=true
+func MountPointFromString(mp string) (*MountPoint, error) {
+ var mount MountPoint
+
+ mp = "name=" + mp
+ v, err := url.ParseQuery(strings.Replace(mp, ",", "&", -1))
+ if err != nil {
+ return nil, err
+ }
+ for key, val := range v {
+ if len(val) > 1 {
+ return nil, fmt.Errorf("label %s with multiple values %q", key, val)
+ }
+
+ switch key {
+ case "name":
+ acn, err := NewACName(val[0])
+ if err != nil {
+ return nil, err
+ }
+ mount.Name = *acn
+ case "path":
+ mount.Path = val[0]
+ case "readOnly":
+ ro, err := strconv.ParseBool(val[0])
+ if err != nil {
+ return nil, err
+ }
+ mount.ReadOnly = ro
+ default:
+ return nil, fmt.Errorf("unknown mountpoint parameter %q", key)
+ }
+ }
+ err = mount.assertValid()
+ if err != nil {
+ return nil, err
+ }
+
+ return &mount, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/port.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/port.go
new file mode 100644
index 0000000..519025e
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/port.go
@@ -0,0 +1,133 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+type Port struct {
+ Name ACName `json:"name"`
+ Protocol string `json:"protocol"`
+ Port uint `json:"port"`
+ Count uint `json:"count"`
+ SocketActivated bool `json:"socketActivated"`
+}
+
+type ExposedPort struct {
+ Name ACName `json:"name"`
+ HostPort uint `json:"hostPort"`
+}
+
+type port Port
+
+func (p *Port) UnmarshalJSON(data []byte) error {
+ var pp port
+ if err := json.Unmarshal(data, &pp); err != nil {
+ return err
+ }
+ np := Port(pp)
+ if err := np.assertValid(); err != nil {
+ return err
+ }
+ if np.Count == 0 {
+ np.Count = 1
+ }
+ *p = np
+ return nil
+}
+
+func (p Port) MarshalJSON() ([]byte, error) {
+ if err := p.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(port(p))
+}
+
+func (p Port) assertValid() error {
+ // Although there are no guarantees, most (if not all)
+ // transport protocols use 16 bit ports
+ if p.Port > 65535 || p.Port < 1 {
+ return errors.New("port must be in 1-65535 range")
+ }
+ if p.Port+p.Count > 65536 {
+ return errors.New("end of port range must be in 1-65535 range")
+ }
+ return nil
+}
+
+// PortFromString takes a command line port parameter and returns a port
+//
+// It is useful for actool patch-manifest --ports
+//
+// Example port parameters:
+// health-check,protocol=udp,port=8000
+// query,protocol=tcp,port=8080,count=1,socketActivated=true
+func PortFromString(pt string) (*Port, error) {
+ var port Port
+
+ pt = "name=" + pt
+ v, err := url.ParseQuery(strings.Replace(pt, ",", "&", -1))
+ if err != nil {
+ return nil, err
+ }
+ for key, val := range v {
+ if len(val) > 1 {
+ return nil, fmt.Errorf("label %s with multiple values %q", key, val)
+ }
+
+ switch key {
+ case "name":
+ acn, err := NewACName(val[0])
+ if err != nil {
+ return nil, err
+ }
+ port.Name = *acn
+ case "protocol":
+ port.Protocol = val[0]
+ case "port":
+ p, err := strconv.ParseUint(val[0], 10, 16)
+ if err != nil {
+ return nil, err
+ }
+ port.Port = uint(p)
+ case "count":
+ cnt, err := strconv.ParseUint(val[0], 10, 16)
+ if err != nil {
+ return nil, err
+ }
+ port.Count = uint(cnt)
+ case "socketActivated":
+ sa, err := strconv.ParseBool(val[0])
+ if err != nil {
+ return nil, err
+ }
+ port.SocketActivated = sa
+ default:
+ return nil, fmt.Errorf("unknown port parameter %q", key)
+ }
+ }
+ err = port.assertValid()
+ if err != nil {
+ return nil, err
+ }
+
+ return &port, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/semver.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/semver.go
new file mode 100644
index 0000000..fbe21de
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/semver.go
@@ -0,0 +1,91 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/coreos/go-semver/semver"
+)
+
+var (
+ ErrNoZeroSemVer = ACVersionError("SemVer cannot be zero")
+ ErrBadSemVer = ACVersionError("SemVer is bad")
+)
+
+// SemVer implements the Unmarshaler interface to define a field that must be
+// a semantic version string
+// TODO(jonboulle): extend upstream instead of wrapping?
+type SemVer semver.Version
+
+// NewSemVer generates a new SemVer from a string. If the given string does
+// not represent a valid SemVer, nil and an error are returned.
+func NewSemVer(s string) (*SemVer, error) {
+ nsv, err := semver.NewVersion(s)
+ if err != nil {
+ return nil, ErrBadSemVer
+ }
+ v := SemVer(*nsv)
+ if v.Empty() {
+ return nil, ErrNoZeroSemVer
+ }
+ return &v, nil
+}
+
+func (sv SemVer) LessThanMajor(versionB SemVer) bool {
+ majorA := semver.Version(sv).Major
+ majorB := semver.Version(versionB).Major
+ if majorA < majorB {
+ return true
+ }
+ return false
+}
+
+func (sv SemVer) LessThanExact(versionB SemVer) bool {
+ vA := semver.Version(sv)
+ vB := semver.Version(versionB)
+ return vA.LessThan(vB)
+}
+
+func (sv SemVer) String() string {
+ s := semver.Version(sv)
+ return s.String()
+}
+
+func (sv SemVer) Empty() bool {
+ return semver.Version(sv) == semver.Version{}
+}
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (sv *SemVer) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ v, err := NewSemVer(s)
+ if err != nil {
+ return err
+ }
+ *sv = *v
+ return nil
+}
+
+// MarshalJSON implements the json.Marshaler interface
+func (sv SemVer) MarshalJSON() ([]byte, error) {
+ if sv.Empty() {
+ return nil, ErrNoZeroSemVer
+ }
+ return json.Marshal(sv.String())
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/url.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/url.go
new file mode 100644
index 0000000..d4f8f33
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/url.go
@@ -0,0 +1,71 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "fmt"
+ "net/url"
+)
+
+// URL wraps url.URL to marshal/unmarshal to/from JSON strings and enforce
+// that the scheme is HTTP/HTTPS only
+type URL url.URL
+
+func NewURL(s string) (*URL, error) {
+ uu, err := url.Parse(s)
+ if err != nil {
+ return nil, fmt.Errorf("bad URL: %v", err)
+ }
+ nu := URL(*uu)
+ if err := nu.assertValidScheme(); err != nil {
+ return nil, err
+ }
+ return &nu, nil
+}
+
+func (u URL) String() string {
+ uu := url.URL(u)
+ return uu.String()
+}
+
+func (u URL) assertValidScheme() error {
+ switch u.Scheme {
+ case "http", "https":
+ return nil
+ default:
+ return fmt.Errorf("bad URL scheme, must be http/https")
+ }
+}
+
+func (u *URL) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ nu, err := NewURL(s)
+ if err != nil {
+ return err
+ }
+ *u = *nu
+ return nil
+}
+
+func (u URL) MarshalJSON() ([]byte, error) {
+ if err := u.assertValidScheme(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(u.String())
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/uuid.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/uuid.go
new file mode 100644
index 0000000..4925b76
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/uuid.go
@@ -0,0 +1,92 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/hex"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+)
+
+var (
+ ErrNoEmptyUUID = errors.New("UUID cannot be empty")
+)
+
+// UUID encodes an RFC4122-compliant UUID, marshaled to/from a string
+// TODO(jonboulle): vendor a package for this?
+// TODO(jonboulle): consider more flexibility in input string formats.
+// Right now, we only accept:
+// "6733C088-A507-4694-AABF-EDBE4FC5266F"
+// "6733C088A5074694AABFEDBE4FC5266F"
+type UUID [16]byte
+
+func (u UUID) String() string {
+ return fmt.Sprintf("%x-%x-%x-%x-%x", u[0:4], u[4:6], u[6:8], u[8:10], u[10:16])
+}
+
+func (u *UUID) Set(s string) error {
+ nu, err := NewUUID(s)
+ if err == nil {
+ *u = *nu
+ }
+ return err
+}
+
+// NewUUID generates a new UUID from the given string. If the string does not
+// represent a valid UUID, nil and an error are returned.
+func NewUUID(s string) (*UUID, error) {
+ s = strings.Replace(s, "-", "", -1)
+ if len(s) != 32 {
+ return nil, errors.New("bad UUID length != 32")
+ }
+ dec, err := hex.DecodeString(s)
+ if err != nil {
+ return nil, err
+ }
+ var u UUID
+ for i, b := range dec {
+ u[i] = b
+ }
+ return &u, nil
+}
+
+func (u UUID) Empty() bool {
+ return reflect.DeepEqual(u, UUID{})
+}
+
+func (u *UUID) UnmarshalJSON(data []byte) error {
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ uu, err := NewUUID(s)
+ if uu.Empty() {
+ return ErrNoEmptyUUID
+ }
+ if err == nil {
+ *u = *uu
+ }
+ return err
+}
+
+func (u UUID) MarshalJSON() ([]byte, error) {
+ if u.Empty() {
+ return nil, ErrNoEmptyUUID
+ }
+ return json.Marshal(u.String())
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/types/volume.go b/Godeps/_workspace/src/github.com/appc/spec/schema/types/volume.go
new file mode 100644
index 0000000..eeee3a6
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/types/volume.go
@@ -0,0 +1,137 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/url"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// Volume encapsulates a volume which should be mounted into the filesystem
+// of all apps in a PodManifest
+type Volume struct {
+ Name ACName `json:"name"`
+ Kind string `json:"kind"`
+
+ // currently used only by "host"
+ // TODO(jonboulle): factor out?
+ Source string `json:"source,omitempty"`
+ ReadOnly *bool `json:"readOnly,omitempty"`
+}
+
+type volume Volume
+
+func (v Volume) assertValid() error {
+ if v.Name.Empty() {
+ return errors.New("name must be set")
+ }
+
+ switch v.Kind {
+ case "empty":
+ if v.Source != "" {
+ return errors.New("source for empty volume must be empty")
+ }
+ return nil
+ case "host":
+ if v.Source == "" {
+ return errors.New("source for host volume cannot be empty")
+ }
+ if !filepath.IsAbs(v.Source) {
+ return errors.New("source for host volume must be absolute path")
+ }
+ return nil
+ default:
+ return errors.New(`unrecognized volume kind: should be one of "empty", "host"`)
+ }
+}
+
+func (v *Volume) UnmarshalJSON(data []byte) error {
+ var vv volume
+ if err := json.Unmarshal(data, &vv); err != nil {
+ return err
+ }
+ nv := Volume(vv)
+ if err := nv.assertValid(); err != nil {
+ return err
+ }
+ *v = nv
+ return nil
+}
+
+func (v Volume) MarshalJSON() ([]byte, error) {
+ if err := v.assertValid(); err != nil {
+ return nil, err
+ }
+ return json.Marshal(volume(v))
+}
+
+func (v Volume) String() string {
+ s := fmt.Sprintf("%s,kind=%s,readOnly=%t", v.Name, v.Kind, *v.ReadOnly)
+ if v.Source != "" {
+ s = s + fmt.Sprintf("source=%s", v.Source)
+ }
+ return s
+}
+
+// VolumeFromString takes a command line volume parameter and returns a volume
+//
+// Example volume parameters:
+// database,kind=host,source=/tmp,readOnly=true
+func VolumeFromString(vp string) (*Volume, error) {
+ var vol Volume
+
+ vp = "name=" + vp
+ v, err := url.ParseQuery(strings.Replace(vp, ",", "&", -1))
+ if err != nil {
+ return nil, err
+ }
+ for key, val := range v {
+ if len(val) > 1 {
+ return nil, fmt.Errorf("label %s with multiple values %q", key, val)
+ }
+
+ switch key {
+ case "name":
+ acn, err := NewACName(val[0])
+ if err != nil {
+ return nil, err
+ }
+ vol.Name = *acn
+ case "kind":
+ vol.Kind = val[0]
+ case "source":
+ vol.Source = val[0]
+ case "readOnly":
+ ro, err := strconv.ParseBool(val[0])
+ if err != nil {
+ return nil, err
+ }
+ vol.ReadOnly = &ro
+ default:
+ return nil, fmt.Errorf("unknown volume parameter %q", key)
+ }
+ }
+ err = vol.assertValid()
+ if err != nil {
+ return nil, err
+ }
+
+ return &vol, nil
+}
diff --git a/Godeps/_workspace/src/github.com/appc/spec/schema/version.go b/Godeps/_workspace/src/github.com/appc/spec/schema/version.go
new file mode 100644
index 0000000..ae95af9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/appc/spec/schema/version.go
@@ -0,0 +1,39 @@
+// Copyright 2015 The appc Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package schema
+
+import (
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+const (
+ // version represents the canonical version of the appc spec and tooling.
+ // For now, the schema and tooling is coupled with the spec itself, so
+ // this must be kept in sync with the VERSION file in the root of the repo.
+ version string = "0.7.1+git"
+)
+
+var (
+ // AppContainerVersion is the SemVer representation of version
+ AppContainerVersion types.SemVer
+)
+
+func init() {
+ v, err := types.NewSemVer(version)
+ if err != nil {
+ panic(err)
+ }
+ AppContainerVersion = *v
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-semver/semver/semver.go b/Godeps/_workspace/src/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 0000000..db0b988
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,244 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+type Version struct {
+ Major int64
+ Minor int64
+ Patch int64
+ PreRelease PreRelease
+ Metadata string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+ parts := strings.SplitN(*input, delim, 2)
+
+ if len(parts) == 2 {
+ *input = parts[0]
+ val = parts[1]
+ }
+
+ return val
+}
+
+func NewVersion(version string) (*Version, error) {
+ v := Version{}
+
+ v.Metadata = splitOff(&version, "+")
+ v.PreRelease = PreRelease(splitOff(&version, "-"))
+
+ dotParts := strings.SplitN(version, ".", 3)
+
+ if len(dotParts) != 3 {
+ return nil, errors.New(fmt.Sprintf("%s is not in dotted-tri format", version))
+ }
+
+ parsed := make([]int64, 3, 3)
+
+ for i, v := range dotParts[:3] {
+ val, err := strconv.ParseInt(v, 10, 64)
+ parsed[i] = val
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ v.Major = parsed[0]
+ v.Minor = parsed[1]
+ v.Patch = parsed[2]
+
+ return &v, nil
+}
+
+func Must(v *Version, err error) *Version {
+ if err != nil {
+ panic(err)
+ }
+ return v
+}
+
+func (v *Version) String() string {
+ var buffer bytes.Buffer
+
+ fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+ if v.PreRelease != "" {
+ fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+ }
+
+ if v.Metadata != "" {
+ fmt.Fprintf(&buffer, "+%s", v.Metadata)
+ }
+
+ return buffer.String()
+}
+
+func (v *Version) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+ l := len(data)
+ if l == 0 || string(data) == `""` {
+ return nil
+ }
+ if l < 2 || data[0] != '"' || data[l-1] != '"' {
+ return errors.New("invalid semver string")
+ }
+ vv, err := NewVersion(string(data[1 : l-1]))
+ if err != nil {
+ return err
+ }
+ *v = *vv
+ return nil
+}
+
+func (v *Version) LessThan(versionB Version) bool {
+ versionA := *v
+ cmp := recursiveCompare(versionA.Slice(), versionB.Slice())
+
+ if cmp == 0 {
+ cmp = preReleaseCompare(versionA, versionB)
+ }
+
+ if cmp == -1 {
+ return true
+ }
+
+ return false
+}
+
+/* Slice converts the comparable parts of the semver into a slice of strings */
+func (v *Version) Slice() []int64 {
+ return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p *PreRelease) Slice() []string {
+ preRelease := string(*p)
+ return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+ a := versionA.PreRelease
+ b := versionB.PreRelease
+
+ /* Handle the case where if two versions are otherwise equal it is the
+ * one without a PreRelease that is greater */
+ if len(a) == 0 && (len(b) > 0) {
+ return 1
+ } else if len(b) == 0 && (len(a) > 0) {
+ return -1
+ }
+
+ // If there is a prelease, check and compare each part.
+ return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+ if len(versionA) == 0 {
+ return 0
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+ // Handle slice length disparity.
+ if len(versionA) == 0 {
+ // Nothing to compare too, so we return 0
+ return 0
+ } else if len(versionB) == 0 {
+ // We're longer than versionB so return 1.
+ return 1
+ }
+
+ a := versionA[0]
+ b := versionB[0]
+
+ aInt := false
+ bInt := false
+
+ aI, err := strconv.Atoi(versionA[0])
+ if err == nil {
+ aInt = true
+ }
+
+ bI, err := strconv.Atoi(versionB[0])
+ if err == nil {
+ bInt = true
+ }
+
+ // Handle Integer Comparison
+ if aInt && bInt {
+ if aI > bI {
+ return 1
+ } else if aI < bI {
+ return -1
+ }
+ }
+
+ // Handle String Comparison
+ if a > b {
+ return 1
+ } else if a < b {
+ return -1
+ }
+
+ return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+ v.Major += 1
+ v.Minor = 0
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+ v.Minor += 1
+ v.Patch = 0
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+ v.Patch += 1
+ v.PreRelease = PreRelease("")
+ v.Metadata = ""
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/go-semver/semver/sort.go b/Godeps/_workspace/src/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 0000000..e256b41
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+ "sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+ return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+ s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+ return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+ sort.Sort(Versions(versions))
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/ioprogress/LICENSE b/Godeps/_workspace/src/github.com/coreos/ioprogress/LICENSE
new file mode 100644
index 0000000..2298515
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/ioprogress/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/Godeps/_workspace/src/github.com/coreos/ioprogress/README.md b/Godeps/_workspace/src/github.com/coreos/ioprogress/README.md
new file mode 100644
index 0000000..3d291e9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/ioprogress/README.md
@@ -0,0 +1,42 @@
+# ioprogress
+
+ioprogress is a Go (golang) library with implementations of `io.Reader`
+and `io.Writer` that draws progress bars. The primary use case for these
+are for CLI applications but alternate progress bar writers can be supplied
+for alternate environments.
+
+## Example
+
+
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/ioprogress
+```
+
+## Usage
+
+Here is an example of outputting a basic progress bar to the CLI as
+we're "downloading" from some other `io.Reader` (perhaps from a network
+connection):
+
+```go
+// Imagine this came from some external source, such as a network connection,
+// and that we have the full size of it, such as from a Content-Length HTTP
+// header.
+var r io.Reader
+
+// Create the progress reader
+progressR := &ioprogress.Reader{
+ Reader: r,
+ Size: rSize,
+}
+
+// Copy all of the reader to some local file f. As it copies, the
+// progressR will write progress to the terminal on os.Stdout. This is
+// customizable.
+io.Copy(f, progressR)
+```
diff --git a/Godeps/_workspace/src/github.com/coreos/ioprogress/draw.go b/Godeps/_workspace/src/github.com/coreos/ioprogress/draw.go
new file mode 100644
index 0000000..3cf4243
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/ioprogress/draw.go
@@ -0,0 +1,135 @@
+package ioprogress
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/golang.org/x/crypto/ssh/terminal"
+)
+
+// DrawFunc is the callback type for drawing progress.
+type DrawFunc func(int64, int64) error
+
+// DrawTextFormatFunc is a callback used by DrawFuncs that draw text in
+// order to format the text into some more human friendly format.
+type DrawTextFormatFunc func(int64, int64) string
+
+var defaultDrawFunc DrawFunc
+
+func init() {
+ defaultDrawFunc = DrawTerminal(os.Stdout)
+}
+
+// isTerminal returns True when w is going to a tty, and false otherwise.
+func isTerminal(w io.Writer) bool {
+ if f, ok := w.(*os.File); ok {
+ return terminal.IsTerminal(int(f.Fd()))
+ }
+ return false
+}
+
+// DrawTerminal returns a DrawFunc that draws a progress bar to an io.Writer
+// that is assumed to be a terminal (and therefore respects carriage returns).
+func DrawTerminal(w io.Writer) DrawFunc {
+ return DrawTerminalf(w, func(progress, total int64) string {
+ return fmt.Sprintf("%d/%d", progress, total)
+ })
+}
+
+// DrawTerminalf returns a DrawFunc that draws a progress bar to an io.Writer
+// that is formatted with the given formatting function.
+func DrawTerminalf(w io.Writer, f DrawTextFormatFunc) DrawFunc {
+ var maxLength int
+
+ return func(progress, total int64) error {
+ if progress == -1 && total == -1 {
+ _, err := fmt.Fprintf(w, "\n")
+ return err
+ }
+
+ // Make sure we pad it to the max length we've ever drawn so that
+ // we don't have trailing characters.
+ line := f(progress, total)
+ if len(line) < maxLength {
+ line = fmt.Sprintf(
+ "%s%s",
+ line,
+ strings.Repeat(" ", maxLength-len(line)))
+ }
+ maxLength = len(line)
+
+ terminate := "\r"
+ if !isTerminal(w) {
+ terminate = "\n"
+ }
+ _, err := fmt.Fprint(w, line+terminate)
+ return err
+ }
+}
+
+var byteUnits = []string{"B", "KB", "MB", "GB", "TB", "PB"}
+
+// DrawTextFormatBytes is a DrawTextFormatFunc that formats the progress
+// and total into human-friendly byte formats.
+func DrawTextFormatBytes(progress, total int64) string {
+ return fmt.Sprintf("%s/%s", ByteUnitStr(progress), ByteUnitStr(total))
+}
+
+// DrawTextFormatBar returns a DrawTextFormatFunc that draws a progress
+// bar with the given width (in characters). This can be used in conjunction
+// with another DrawTextFormatFunc to create a progress bar with bytes, for
+// example:
+//
+// bar := DrawTextFormatBar(20)
+// func(progress, total int64) string {
+// return fmt.Sprintf(
+// "%s %s",
+// bar(progress, total),
+// DrawTextFormatBytes(progress, total))
+// }
+//
+func DrawTextFormatBar(width int64) DrawTextFormatFunc {
+ return DrawTextFormatBarForW(width, nil)
+}
+
+// DrawTextFormatBarForW returns a DrawTextFormatFunc as described in the docs
+// for DrawTextFormatBar, however if the io.Writer passed in is not a tty then
+// the returned function will always return "".
+func DrawTextFormatBarForW(width int64, w io.Writer) DrawTextFormatFunc {
+ if w != nil && !isTerminal(w) {
+ return func(progress, total int64) string {
+ return ""
+ }
+ }
+
+ width -= 2
+
+ return func(progress, total int64) string {
+ current := int64((float64(progress) / float64(total)) * float64(width))
+ if current < 0 || current > width {
+ return fmt.Sprintf("[%s]", strings.Repeat(" ", int(width)))
+ }
+ return fmt.Sprintf(
+ "[%s%s]",
+ strings.Repeat("=", int(current)),
+ strings.Repeat(" ", int(width-current)))
+ }
+}
+
+// ByteUnitStr pretty prints a number of bytes.
+func ByteUnitStr(n int64) string {
+ var unit string
+ size := float64(n)
+ for i := 1; i < len(byteUnits); i++ {
+ if size < 1000 {
+ unit = byteUnits[i-1]
+ break
+ }
+
+ size = size / 1000
+ }
+
+ return fmt.Sprintf("%.3g %s", size, unit)
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/ioprogress/reader.go b/Godeps/_workspace/src/github.com/coreos/ioprogress/reader.go
new file mode 100644
index 0000000..7d52731
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/ioprogress/reader.go
@@ -0,0 +1,107 @@
+package ioprogress
+
+import (
+ "io"
+ "time"
+)
+
+// Reader is an implementation of io.Reader that draws the progress of
+// reading some data.
+type Reader struct {
+ // Reader is the underlying reader to read from
+ Reader io.Reader
+
+ // Size is the total size of the data coming out of the reader.
+ Size int64
+
+ // DrawFunc is the callback to invoke to draw the progress bar. By
+ // default, this will be DrawTerminal(os.Stdout).
+ //
+ // DrawInterval is the minimum time to wait between reads to update the
+ // progress bar.
+ DrawFunc DrawFunc
+ DrawInterval time.Duration
+
+ progress int64
+ lastDraw time.Time
+}
+
+// Read reads from the underlying reader and invokes the DrawFunc if
+// appropriate. The DrawFunc is executed when there is data that is
+// read (progress is made) and at least DrawInterval time has passed.
+func (r *Reader) Read(p []byte) (int, error) {
+ // If we haven't drawn before, initialize the progress bar
+ if r.lastDraw.IsZero() {
+ r.initProgress()
+ }
+
+ // Read from the underlying source
+ n, err := r.Reader.Read(p)
+
+ // Always increment the progress even if there was an error
+ r.progress += int64(n)
+
+ // If we don't have any errors, then draw the progress. If we are
+ // at the end of the data, then finish the progress.
+ if err == nil {
+ // Only draw if we read data or we've never read data before (to
+ // initialize the progress bar).
+ if n > 0 {
+ r.drawProgress()
+ }
+ }
+ if err == io.EOF {
+ r.finishProgress()
+ }
+
+ return n, err
+}
+
+func (r *Reader) drawProgress() {
+ // If we've drawn before, then make sure that the draw interval
+ // has passed before we draw again.
+ interval := r.DrawInterval
+ if interval == 0 {
+ interval = time.Second
+ }
+ if !r.lastDraw.IsZero() {
+ nextDraw := r.lastDraw.Add(interval)
+ if time.Now().Before(nextDraw) {
+ return
+ }
+ }
+
+ // Draw
+ f := r.drawFunc()
+ f(r.progress, r.Size)
+
+ // Record this draw so that we don't draw again really quickly
+ r.lastDraw = time.Now()
+}
+
+func (r *Reader) finishProgress() {
+ f := r.drawFunc()
+ f(r.progress, r.Size)
+
+ // Print a newline
+ f(-1, -1)
+
+ // Reset lastDraw so we don't finish again
+ var zeroDraw time.Time
+ r.lastDraw = zeroDraw
+}
+
+func (r *Reader) initProgress() {
+ var zeroDraw time.Time
+ r.lastDraw = zeroDraw
+ r.drawProgress()
+ r.lastDraw = zeroDraw
+}
+
+func (r *Reader) drawFunc() DrawFunc {
+ if r.DrawFunc == nil {
+ return defaultDrawFunc
+ }
+
+ return r.DrawFunc
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/common/apps/apps.go b/Godeps/_workspace/src/github.com/coreos/rkt/common/apps/apps.go
new file mode 100644
index 0000000..97416ea
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/common/apps/apps.go
@@ -0,0 +1,137 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//+build linux
+
+package apps
+
+import (
+ "fmt"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema"
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+type App struct {
+ Image string // the image reference as supplied by the user on the cli
+ Args []string // any arguments the user supplied for this app
+ Asc string // signature file override for image verification (if fetching occurs)
+ Exec string // exec override for image
+ Mounts []schema.Mount // mounts for this app (superseding any mounts in rktApps.mounts of same MountPoint)
+
+ // TODO(jonboulle): These images are partially-populated hashes, this should be clarified.
+ ImageID types.Hash // resolved image identifier
+}
+
+type Apps struct {
+ apps []App
+ Mounts []schema.Mount // global mounts applied to all apps
+ Volumes []types.Volume // volumes available to all apps
+}
+
+// Reset creates a new slice for al.apps, needed by tests
+func (al *Apps) Reset() {
+ al.apps = make([]App, 0)
+}
+
+// Count returns the number of apps in al
+func (al *Apps) Count() int {
+ return len(al.apps)
+}
+
+// Create creates a new app in al and returns a pointer to it
+func (al *Apps) Create(img string) {
+ al.apps = append(al.apps, App{Image: img})
+}
+
+// Last returns a pointer to the top app in al
+func (al *Apps) Last() *App {
+ if len(al.apps) == 0 {
+ return nil
+ }
+ return &al.apps[len(al.apps)-1]
+}
+
+// Validate validates al for things like referential integrity of mounts<->volumes.
+func (al *Apps) Validate() error {
+ vs := map[types.ACName]struct{}{}
+ for _, v := range al.Volumes {
+ vs[v.Name] = struct{}{}
+ }
+
+ f := func(mnts []schema.Mount) error {
+ for _, m := range mnts {
+ if _, ok := vs[m.Volume]; !ok {
+ return fmt.Errorf("dangling mount point %q: volume %q not found", m.Path, m.Volume)
+ }
+ }
+ return nil
+ }
+
+ if err := f(al.Mounts); err != nil {
+ return err
+ }
+
+ err := al.Walk(func(app *App) error {
+ return f(app.Mounts)
+ })
+
+ /* TODO(vc): in debug/verbose mode say something about unused volumes? */
+ return err
+}
+
+// Walk iterates on al.apps calling f for each app
+// walking stops if f returns an error, the error is simply returned
+func (al *Apps) Walk(f func(*App) error) error {
+ for i, _ := range al.apps {
+ // XXX(vc): note we supply f() with a pointer to the app instance in al.apps to enable modification by f()
+ if err := f(&al.apps[i]); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// these convenience functions just return typed lists containing just the named member
+// TODO(vc): these probably go away when we just pass Apps to stage0
+
+// GetImages returns a list of the images in al, one per app.
+// The order reflects the app order in al.
+func (al *Apps) GetImages() []string {
+ var il []string
+ for _, a := range al.apps {
+ il = append(il, a.Image)
+ }
+ return il
+}
+
+// GetArgs returns a list of lists of arguments in al, one list of args per app.
+// The order reflects the app order in al.
+func (al *Apps) GetArgs() [][]string {
+ var aal [][]string
+ for _, a := range al.apps {
+ aal = append(aal, a.Args)
+ }
+ return aal
+}
+
+// GetImageIDs returns a list of the imageIDs in al, one per app.
+// The order reflects the app order in al.
+func (al *Apps) GetImageIDs() []types.Hash {
+ var hl []types.Hash
+ for _, a := range al.apps {
+ hl = append(hl, a.ImageID)
+ }
+ return hl
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup/cgroup.go b/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup/cgroup.go
new file mode 100644
index 0000000..ef4897d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup/cgroup.go
@@ -0,0 +1,412 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//+build linux
+
+package cgroup
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+ "syscall"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/k8s.io/kubernetes/pkg/api/resource"
+ "github.com/coreos/go-systemd/unit"
+)
+
+type addIsolatorFunc func(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error)
+
+var (
+ isolatorFuncs = map[string]addIsolatorFunc{
+ "cpu": addCpuLimit,
+ "memory": addMemoryLimit,
+ }
+ cgroupControllerRWFiles = map[string][]string{
+ "memory": []string{"memory.limit_in_bytes"},
+ "cpu": []string{"cpu.cfs_quota_us"},
+ }
+)
+
+func addCpuLimit(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error) {
+ if limit.Value() > resource.MaxMilliValue {
+ return nil, fmt.Errorf("cpu limit exceeds the maximum millivalue: %v", limit.String())
+ }
+ quota := strconv.Itoa(int(limit.MilliValue()/10)) + "%"
+ opts = append(opts, unit.NewUnitOption("Service", "CPUQuota", quota))
+ return opts, nil
+}
+
+func addMemoryLimit(opts []*unit.UnitOption, limit *resource.Quantity) ([]*unit.UnitOption, error) {
+ opts = append(opts, unit.NewUnitOption("Service", "MemoryLimit", strconv.Itoa(int(limit.Value()))))
+ return opts, nil
+}
+
+// MaybeAddIsolator considers the given isolator; if the type is known
+// (i.e. IsIsolatorSupported is true) and the limit is non-nil, the supplied
+// opts will be extended with an appropriate option implementing the desired
+// isolation.
+func MaybeAddIsolator(opts []*unit.UnitOption, isolator string, limit *resource.Quantity) ([]*unit.UnitOption, error) {
+ var err error
+ if limit == nil {
+ return opts, nil
+ }
+ if IsIsolatorSupported(isolator) {
+ opts, err = isolatorFuncs[isolator](opts, limit)
+ if err != nil {
+ return nil, err
+ }
+ } else {
+ fmt.Fprintf(os.Stderr, "warning: resource/%s isolator set but support disabled in the kernel, skipping\n", isolator)
+ }
+ return opts, nil
+}
+
+// IsIsolatorSupported returns whether an isolator is supported in the kernel
+func IsIsolatorSupported(isolator string) bool {
+ if files, ok := cgroupControllerRWFiles[isolator]; ok {
+ for _, f := range files {
+ isolatorPath := filepath.Join("/sys/fs/cgroup/", isolator, f)
+ if _, err := os.Stat(isolatorPath); os.IsNotExist(err) {
+ return false
+ }
+ }
+ return true
+ }
+ return false
+}
+
+func parseCgroups(f io.Reader) (map[int][]string, error) {
+ sc := bufio.NewScanner(f)
+
+ // skip first line since it is a comment
+ sc.Scan()
+
+ cgroups := make(map[int][]string)
+ for sc.Scan() {
+ var controller string
+ var hierarchy int
+ var num int
+ var enabled int
+ fmt.Sscanf(sc.Text(), "%s %d %d %d", &controller, &hierarchy, &num, &enabled)
+
+ if enabled == 1 {
+ if _, ok := cgroups[hierarchy]; !ok {
+ cgroups[hierarchy] = []string{controller}
+ } else {
+ cgroups[hierarchy] = append(cgroups[hierarchy], controller)
+ }
+ }
+ }
+
+ if err := sc.Err(); err != nil {
+ return nil, err
+ }
+
+ return cgroups, nil
+}
+
+// GetEnabledCgroups returns a map with the enabled cgroup controllers grouped by
+// hierarchy
+func GetEnabledCgroups() (map[int][]string, error) {
+ cgroupsFile, err := os.Open("/proc/cgroups")
+ if err != nil {
+ return nil, err
+ }
+ defer cgroupsFile.Close()
+
+ cgroups, err := parseCgroups(cgroupsFile)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing /proc/cgroups: %v", err)
+ }
+
+ return cgroups, nil
+}
+
+// GetControllerDirs takes a map with the enabled cgroup controllers grouped by
+// hierarchy and returns the directory names as they should be in
+// /sys/fs/cgroup
+func GetControllerDirs(cgroups map[int][]string) []string {
+ var controllers []string
+ for _, cs := range cgroups {
+ controllers = append(controllers, strings.Join(cs, ","))
+ }
+
+ return controllers
+}
+
+func getControllerSymlinks(cgroups map[int][]string) map[string]string {
+ symlinks := make(map[string]string)
+
+ for _, cs := range cgroups {
+ if len(cs) > 1 {
+ tgt := strings.Join(cs, ",")
+ for _, ln := range cs {
+ symlinks[ln] = tgt
+ }
+ }
+ }
+
+ return symlinks
+}
+
+func getControllerRWFiles(controller string) []string {
+ parts := strings.Split(controller, ",")
+ for _, p := range parts {
+ if files, ok := cgroupControllerRWFiles[p]; ok {
+ // cgroup.procs always needs to be RW for allowing systemd to add
+ // processes to the controller
+ files = append(files, "cgroup.procs")
+ return files
+ }
+ }
+
+ return nil
+}
+
+func parseOwnCgroupController(controller string) ([]string, error) {
+ cgroupPath := "/proc/self/cgroup"
+ cg, err := os.Open(cgroupPath)
+ if err != nil {
+ return nil, fmt.Errorf("error opening /proc/self/cgroup: %v", err)
+ }
+ defer cg.Close()
+
+ s := bufio.NewScanner(cg)
+ for s.Scan() {
+ parts := strings.SplitN(s.Text(), ":", 3)
+ if len(parts) < 3 {
+ return nil, fmt.Errorf("error parsing /proc/self/cgroup")
+ }
+ controllerParts := strings.Split(parts[1], ",")
+ for _, c := range controllerParts {
+ if c == controller {
+ return parts, nil
+ }
+ }
+ }
+
+ return nil, fmt.Errorf("controller %q not found", controller)
+}
+
+// GetOwnCgroupPath returns the cgroup path of this process in controller
+// hierarchy
+func GetOwnCgroupPath(controller string) (string, error) {
+ parts, err := parseOwnCgroupController(controller)
+ if err != nil {
+ return "", err
+ }
+ return parts[2], nil
+}
+
+// JoinCgroup makes the calling process join the subcgroup hierarchy on a
+// particular controller
+func JoinSubcgroup(controller string, subcgroup string) error {
+ subcgroupPath := filepath.Join("/sys/fs/cgroup", controller, subcgroup)
+ if err := os.MkdirAll(subcgroupPath, 0600); err != nil {
+ return fmt.Errorf("error creating %q subcgroup: %v", subcgroup, err)
+ }
+ pidBytes := []byte(strconv.Itoa(os.Getpid()))
+ if err := ioutil.WriteFile(filepath.Join(subcgroupPath, "cgroup.procs"), pidBytes, 0600); err != nil {
+ return fmt.Errorf("error adding ourselves to the %q subcgroup: %v", subcgroup, err)
+ }
+
+ return nil
+}
+
+// If /system.slice does not exist in the cpuset controller, create it and
+// configure it.
+// Since this is a workaround, we ignore errors
+func fixCpusetKnobs(cpusetPath string) {
+ cgroupPathFix := filepath.Join(cpusetPath, "system.slice")
+ _ = os.MkdirAll(cgroupPathFix, 0755)
+ knobs := []string{"cpuset.mems", "cpuset.cpus"}
+ for _, knob := range knobs {
+ parentFile := filepath.Join(filepath.Dir(cgroupPathFix), knob)
+ childFile := filepath.Join(cgroupPathFix, knob)
+
+ data, err := ioutil.ReadFile(childFile)
+ if err != nil {
+ continue
+ }
+ // If the file is already configured, don't change it
+ if strings.TrimSpace(string(data)) != "" {
+ continue
+ }
+
+ data, err = ioutil.ReadFile(parentFile)
+ if err == nil {
+ // Workaround: just write twice to workaround the kernel bug fixed by this commit:
+ // https://github.com/torvalds/linux/commit/24ee3cf89bef04e8bc23788aca4e029a3f0f06d9
+ ioutil.WriteFile(childFile, data, 0644)
+ ioutil.WriteFile(childFile, data, 0644)
+ }
+ }
+}
+
+// IsControllerMounted returns whether a controller is mounted by checking that
+// cgroup.procs is accessible
+func IsControllerMounted(c string) bool {
+ cgroupProcsPath := filepath.Join("/sys/fs/cgroup", c, "cgroup.procs")
+ if _, err := os.Stat(cgroupProcsPath); err != nil {
+ return false
+ }
+
+ return true
+}
+
+// CreateCgroups mounts the cgroup controllers hierarchy in /sys/fs/cgroup
+// under root
+func CreateCgroups(root string, enabledCgroups map[int][]string) error {
+ controllers := GetControllerDirs(enabledCgroups)
+ var flags uintptr
+
+ sys := filepath.Join(root, "/sys")
+ if err := os.MkdirAll(sys, 0700); err != nil {
+ return err
+ }
+ flags = syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV
+ // If we're mounting the host cgroups, /sys is probably mounted so we
+ // ignore EBUSY
+ if err := syscall.Mount("sysfs", sys, "sysfs", flags, ""); err != nil && err != syscall.EBUSY {
+ return fmt.Errorf("error mounting %q: %v", sys, err)
+ }
+
+ cgroupTmpfs := filepath.Join(root, "/sys/fs/cgroup")
+ if err := os.MkdirAll(cgroupTmpfs, 0700); err != nil {
+ return err
+ }
+ flags = syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV |
+ syscall.MS_STRICTATIME
+ if err := syscall.Mount("tmpfs", cgroupTmpfs, "tmpfs", flags, "mode=755"); err != nil {
+ return fmt.Errorf("error mounting %q: %v", cgroupTmpfs, err)
+ }
+
+ // Mount controllers
+ for _, c := range controllers {
+ cPath := filepath.Join(root, "/sys/fs/cgroup", c)
+ if err := os.MkdirAll(cPath, 0700); err != nil {
+ return err
+ }
+
+ flags = syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV
+ if err := syscall.Mount("cgroup", cPath, "cgroup", flags, c); err != nil {
+ return fmt.Errorf("error mounting %q: %v", cPath, err)
+ }
+ }
+
+ // Create symlinks for combined controllers
+ symlinks := getControllerSymlinks(enabledCgroups)
+ for ln, tgt := range symlinks {
+ lnPath := filepath.Join(cgroupTmpfs, ln)
+ if err := os.Symlink(tgt, lnPath); err != nil {
+ return fmt.Errorf("error creating symlink: %v", err)
+ }
+ }
+
+ systemdControllerPath := filepath.Join(root, "/sys/fs/cgroup/systemd")
+ if err := os.MkdirAll(systemdControllerPath, 0700); err != nil {
+ return err
+ }
+
+ // Bind-mount cgroup tmpfs filesystem read-only
+ flags = syscall.MS_BIND |
+ syscall.MS_REMOUNT |
+ syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV |
+ syscall.MS_RDONLY
+ if err := syscall.Mount(cgroupTmpfs, cgroupTmpfs, "", flags, ""); err != nil {
+ return fmt.Errorf("error remounting RO %q: %v", cgroupTmpfs, err)
+ }
+
+ return nil
+}
+
+// RemountCgroupsRO remounts the cgroup hierarchy under root read-only, leaving
+// the needed knobs in the subcgroup for each app read-write so the systemd
+// inside stage1 can apply isolators to them
+func RemountCgroupsRO(root string, enabledCgroups map[int][]string, subcgroup string, serviceNames []string) error {
+ controllers := GetControllerDirs(enabledCgroups)
+ cgroupTmpfs := filepath.Join(root, "/sys/fs/cgroup")
+ sysPath := filepath.Join(root, "/sys")
+
+ var flags uintptr
+
+ // Mount RW knobs we need to make the enabled isolators work
+ for _, c := range controllers {
+ cPath := filepath.Join(cgroupTmpfs, c)
+ subcgroupPath := filepath.Join(cPath, subcgroup)
+
+ // Workaround for https://github.com/coreos/rkt/issues/1210
+ if c == "cpuset" {
+ fixCpusetKnobs(cPath)
+ }
+
+ // Create cgroup directories and mount the files we need over
+ // themselves so they stay read-write
+ for _, serviceName := range serviceNames {
+ appCgroup := filepath.Join(subcgroupPath, serviceName)
+ if err := os.MkdirAll(appCgroup, 0755); err != nil {
+ return err
+ }
+ for _, f := range getControllerRWFiles(c) {
+ cgroupFilePath := filepath.Join(appCgroup, f)
+ // the file may not be there if kernel doesn't support the
+ // feature, skip it in that case
+ if _, err := os.Stat(cgroupFilePath); os.IsNotExist(err) {
+ continue
+ }
+ if err := syscall.Mount(cgroupFilePath, cgroupFilePath, "", syscall.MS_BIND, ""); err != nil {
+ return fmt.Errorf("error bind mounting %q: %v", cgroupFilePath, err)
+ }
+ }
+ }
+
+ // Re-mount controller read-only to prevent the container modifying host controllers
+ flags = syscall.MS_BIND |
+ syscall.MS_REMOUNT |
+ syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV |
+ syscall.MS_RDONLY
+ if err := syscall.Mount(cPath, cPath, "", flags, ""); err != nil {
+ return fmt.Errorf("error remounting RO %q: %v", cPath, err)
+ }
+ }
+
+ // Bind-mount sys filesystem read-only
+ flags = syscall.MS_BIND |
+ syscall.MS_REMOUNT |
+ syscall.MS_NOSUID |
+ syscall.MS_NOEXEC |
+ syscall.MS_NODEV |
+ syscall.MS_RDONLY
+ if err := syscall.Mount(sysPath, sysPath, "", flags, ""); err != nil {
+ return fmt.Errorf("error remounting RO %q: %v", sysPath, err)
+ }
+
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup_util.go b/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup_util.go
new file mode 100644
index 0000000..7a00276
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/common/cgroup_util.go
@@ -0,0 +1,183 @@
+// Copyright 2014 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//+build linux
+
+package common
+
+// adapted from systemd/src/shared/cgroup-util.c
+// TODO this should be moved to go-systemd
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+)
+
+const (
+ unitNameMax = 256
+)
+
+var (
+ validChars = regexp.MustCompile(`[a-zA-Z0-9:\-_\.\\]+`)
+)
+
+// cgEscape implements very minimal escaping for names to be used as file names
+// in the cgroup tree: any name which might conflict with a kernel name or is
+// prefixed with '_' is prefixed with a '_'. That way, when reading cgroup
+// names it is sufficient to remove a single prefixing underscore if there is
+// one.
+func cgEscape(p string) string {
+ needPrefix := false
+
+ switch {
+ case strings.HasPrefix(p, "_"):
+ fallthrough
+ case strings.HasPrefix(p, "."):
+ fallthrough
+ case p == "notify_on_release":
+ fallthrough
+ case p == "release_agent":
+ fallthrough
+ case p == "tasks":
+ needPrefix = true
+ case strings.Contains(p, "."):
+ sp := strings.Split(p, ".")
+ if sp[0] == "cgroup" {
+ needPrefix = true
+ } else {
+ n := sp[0]
+ if checkHierarchy(n) {
+ needPrefix = true
+ }
+ }
+ }
+
+ if needPrefix {
+ return "_" + p
+ }
+
+ return p
+}
+
+func filenameIsValid(p string) bool {
+ switch {
+ case p == "", p == ".", p == "..", strings.Contains(p, "/"):
+ return false
+ default:
+ return true
+ }
+}
+
+func checkHierarchy(p string) bool {
+ if !filenameIsValid(p) {
+ return true
+ }
+
+ cc := filepath.Join("/sys/fs/cgroup", p)
+ if _, err := os.Stat(cc); os.IsNotExist(err) {
+ return false
+ }
+
+ return true
+}
+
+func cgUnescape(p string) string {
+ if p[0] == '_' {
+ return p[1:]
+ }
+
+ return p
+}
+
+func sliceNameIsValid(n string) bool {
+ if n == "" {
+ return false
+ }
+
+ if len(n) >= unitNameMax {
+ return false
+ }
+
+ if !strings.Contains(n, ".") {
+ return false
+ }
+
+ if validChars.FindString(n) != n {
+ return false
+ }
+
+ if strings.Contains(n, "@") {
+ return false
+ }
+
+ return true
+}
+
+// SliceToPath explodes a slice name to its corresponding path in the cgroup
+// hierarchy. For example, a slice named "foo-bar-baz.slice" corresponds to the
+// path "foo.slice/foo-bar.slice/foo-bar-baz.slice". See systemd.slice(5)
+func SliceToPath(unit string) (string, error) {
+ if unit == "-.slice" {
+ return "", nil
+ }
+
+ if !strings.HasSuffix(unit, ".slice") {
+ return "", fmt.Errorf("not a slice")
+ }
+
+ if !sliceNameIsValid(unit) {
+ return "", fmt.Errorf("invalid slice name")
+ }
+
+ prefix := unitnameToPrefix(unit)
+
+ // don't allow initial dashes
+ if prefix[0] == '-' {
+ return "", fmt.Errorf("initial dash")
+ }
+
+ prefixParts := strings.Split(prefix, "-")
+
+ var curSlice string
+ var slicePath string
+ for _, slicePart := range prefixParts {
+ if slicePart == "" {
+ return "", fmt.Errorf("trailing or double dash")
+ }
+
+ if curSlice != "" {
+ curSlice = curSlice + "-"
+ }
+ curSlice = curSlice + slicePart
+
+ curSliceDir := curSlice + ".slice"
+ escaped := cgEscape(curSliceDir)
+
+ slicePath = filepath.Join(slicePath, escaped)
+ }
+
+ return slicePath, nil
+}
+
+func unitnameToPrefix(unit string) string {
+ idx := strings.Index(unit, "@")
+ if idx == -1 {
+ idx = strings.LastIndex(unit, ".")
+ }
+
+ return unit[:idx]
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/common/common.go b/Godeps/_workspace/src/github.com/coreos/rkt/common/common.go
new file mode 100644
index 0000000..916e602
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/common/common.go
@@ -0,0 +1,271 @@
+// Copyright 2014 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package common defines values shared by different parts
+// of rkt (e.g. stage0 and stage1)
+package common
+
+import (
+ "bufio"
+ "fmt"
+ "net"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/aci"
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/appc/spec/schema/types"
+)
+
+const (
+ sharedVolumesDir = "/sharedVolumes"
+ stage1Dir = "/stage1"
+ stage2Dir = "/opt/stage2"
+ AppsInfoDir = "/appsinfo"
+
+ EnvLockFd = "RKT_LOCK_FD"
+ EnvSELinuxContext = "RKT_SELINUX_CONTEXT"
+ Stage1TreeStoreIDFilename = "stage1TreeStoreID"
+ AppTreeStoreIDFilename = "treeStoreID"
+ OverlayPreparedFilename = "overlay-prepared"
+ PrivateUsersPreparedFilename = "private-users-prepared"
+
+ PrepareLock = "prepareLock"
+
+ MetadataServicePort = 18112
+ MetadataServiceRegSock = "/run/rkt/metadata-svc.sock"
+
+ APIServiceListenClientURL = "localhost:15441"
+
+ DefaultLocalConfigDir = "/etc/rkt"
+ DefaultSystemConfigDir = "/usr/lib/rkt"
+)
+
+// Stage1ImagePath returns the path where the stage1 app image (unpacked ACI) is rooted,
+// (i.e. where its contents are extracted during stage0).
+func Stage1ImagePath(root string) string {
+ return filepath.Join(root, stage1Dir)
+}
+
+// Stage1RootfsPath returns the path to the stage1 rootfs
+func Stage1RootfsPath(root string) string {
+ return filepath.Join(Stage1ImagePath(root), aci.RootfsDir)
+}
+
+// Stage1ManifestPath returns the path to the stage1's manifest file inside the expanded ACI.
+func Stage1ManifestPath(root string) string {
+ return filepath.Join(Stage1ImagePath(root), aci.ManifestFile)
+}
+
+// PodManifestPath returns the path in root to the Pod Manifest
+func PodManifestPath(root string) string {
+ return filepath.Join(root, "pod")
+}
+
+// AppsPath returns the path where the apps within a pod live.
+func AppsPath(root string) string {
+ return filepath.Join(Stage1RootfsPath(root), stage2Dir)
+}
+
+// AppPath returns the path to an app's rootfs.
+func AppPath(root string, appName types.ACName) string {
+ return filepath.Join(AppsPath(root), appName.String())
+}
+
+// AppRootfsPath returns the path to an app's rootfs.
+func AppRootfsPath(root string, appName types.ACName) string {
+ return filepath.Join(AppPath(root, appName), aci.RootfsDir)
+}
+
+// RelAppPath returns the path of an app relative to the stage1 chroot.
+func RelAppPath(appName types.ACName) string {
+ return filepath.Join(stage2Dir, appName.String())
+}
+
+// RelAppRootfsPath returns the path of an app's rootfs relative to the stage1 chroot.
+func RelAppRootfsPath(appName types.ACName) string {
+ return filepath.Join(RelAppPath(appName), aci.RootfsDir)
+}
+
+// ImageManifestPath returns the path to the app's manifest file of a pod.
+func ImageManifestPath(root string, appName types.ACName) string {
+ return filepath.Join(AppPath(root, appName), aci.ManifestFile)
+}
+
+// AppsInfoPath returns the path to the appsinfo directory of a pod.
+func AppsInfoPath(root string) string {
+ return filepath.Join(root, AppsInfoDir)
+}
+
+// AppInfoPath returns the path to the app's appsinfo directory of a pod.
+func AppInfoPath(root string, appName types.ACName) string {
+ return filepath.Join(AppsInfoPath(root), appName.String())
+}
+
+// AppTreeStoreIDPath returns the path to the app's treeStoreID file of a pod.
+func AppTreeStoreIDPath(root string, appName types.ACName) string {
+ return filepath.Join(AppInfoPath(root, appName), AppTreeStoreIDFilename)
+}
+
+// SharedVolumesPath returns the path to the shared (empty) volumes of a pod.
+func SharedVolumesPath(root string) string {
+ return filepath.Join(root, sharedVolumesDir)
+}
+
+// MetadataServicePublicURL returns the public URL used to host the metadata service
+func MetadataServicePublicURL(ip net.IP, token string) string {
+ return fmt.Sprintf("http://%v:%v/%v", ip, MetadataServicePort, token)
+}
+
+func GetRktLockFD() (int, error) {
+ if v := os.Getenv(EnvLockFd); v != "" {
+ fd, err := strconv.ParseUint(v, 10, 32)
+ if err != nil {
+ return -1, err
+ }
+ return int(fd), nil
+ }
+ return -1, fmt.Errorf("%v env var is not set", EnvLockFd)
+}
+
+// SupportsOverlay returns whether the system supports overlay filesystem
+func SupportsOverlay() bool {
+ exec.Command("modprobe", "overlay").Run()
+
+ f, err := os.Open("/proc/filesystems")
+ if err != nil {
+ fmt.Println("error opening /proc/filesystems")
+ return false
+ }
+ defer f.Close()
+
+ s := bufio.NewScanner(f)
+ for s.Scan() {
+ if s.Text() == "nodev\toverlay" {
+ return true
+ }
+ }
+ return false
+}
+
+// SupportsUserNS returns whether the kernel has CONFIG_USER_NS set
+func SupportsUserNS() bool {
+ if _, err := os.Stat("/proc/self/uid_map"); err == nil {
+ return true
+ }
+
+ return false
+}
+
+// NetList implements the flag.Value interface to allow specification of --net with and without values
+// Example: --net="all,net1:k1=v1;k2=v2,net2:l1=w1"
+type NetList struct {
+ mapping map[string]string
+}
+
+func (l *NetList) String() string {
+ return strings.Join(l.Strings(), ",")
+}
+
+func (l *NetList) Set(value string) error {
+ if l.mapping == nil {
+ l.mapping = make(map[string]string)
+ }
+ for _, s := range strings.Split(value, ",") {
+ netArgsPair := strings.Split(s, ":")
+ netName := netArgsPair[0]
+
+ if netName == "" {
+ return fmt.Errorf("netname must not be empty")
+ }
+
+ if _, duplicate := l.mapping[netName]; duplicate {
+ return fmt.Errorf("found duplicate netname %q", netName)
+ }
+
+ switch {
+ case len(netArgsPair) == 1:
+ l.mapping[netName] = ""
+ case len(netArgsPair) == 2:
+ if netName == "all" ||
+ netName == "host" {
+ return fmt.Errorf("arguments are not supported by special netname %q", netName)
+ }
+ l.mapping[netName] = netArgsPair[1]
+ case len(netArgsPair) > 2:
+ return fmt.Errorf("network %q provided with invalid arguments: %v", netName, netArgsPair[1:])
+ default:
+ return fmt.Errorf("unexpected case when processing network %q", s)
+ }
+ }
+ return nil
+}
+
+func (l *NetList) Type() string {
+ return "netList"
+}
+
+func (l *NetList) Strings() []string {
+ if len(l.mapping) == 0 {
+ return []string{"default"}
+ }
+
+ var list []string
+ for k, v := range l.mapping {
+ if v == "" {
+ list = append(list, k)
+ } else {
+ list = append(list, fmt.Sprintf("%s:%s", k, v))
+ }
+ }
+ return list
+}
+
+func (l *NetList) StringsOnlyNames() []string {
+ var list []string
+ for k, _ := range l.mapping {
+ list = append(list, k)
+ }
+ return list
+}
+
+// Check if host networking has been requested
+func (l *NetList) Host() bool {
+ return l.Specific("host")
+}
+
+// Check if 'none' (loopback only) networking has been requested
+func (l *NetList) None() bool {
+ return l.Specific("none")
+}
+
+// Check if the container needs to be put in a separate network namespace
+func (l *NetList) Contained() bool {
+ return !l.Host() && len(l.mapping) > 0
+}
+
+func (l *NetList) Specific(net string) bool {
+ _, exists := l.mapping[net]
+ return exists
+}
+
+func (l *NetList) SpecificArgs(net string) string {
+ return l.mapping[net]
+}
+
+func (l *NetList) All() bool {
+ return l.Specific("all")
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/common/group.go b/Godeps/_workspace/src/github.com/coreos/rkt/common/group.go
new file mode 100644
index 0000000..d3e74d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/common/group.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package common
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+const (
+ groupFilePath = "/etc/group"
+ RktGroup = "rkt"
+)
+
+// Group represents an entry in the group file.
+type Group struct {
+ Name string
+ Pass string
+ Gid int
+ Users []string
+}
+
+// LookupGid reads the group file and returns the gid of the group
+// specified by groupName.
+func LookupGid(groupName string) (gid int, err error) {
+ groups, err := parseGroupFile(groupFilePath)
+ if err != nil {
+ return -1, fmt.Errorf("error parsing %q file: %v", groupFilePath, err)
+ }
+
+ group, ok := groups[groupName]
+ if !ok {
+ return -1, fmt.Errorf("%q group not found", groupName)
+ }
+
+ return group.Gid, nil
+}
+
+func parseGroupFile(path string) (group map[string]Group, err error) {
+ groupFile, err := os.Open(path)
+ if err != nil {
+ return nil, err
+ }
+ defer groupFile.Close()
+
+ return parseGroups(groupFile)
+}
+
+func parseGroups(r io.Reader) (group map[string]Group, err error) {
+ s := bufio.NewScanner(r)
+ out := make(map[string]Group)
+
+ for s.Scan() {
+ if err := s.Err(); err != nil {
+ return nil, err
+ }
+
+ text := s.Text()
+ if text == "" {
+ continue
+ }
+
+ p := Group{}
+ parseGroupLine(text, &p)
+
+ out[p.Name] = p
+ }
+
+ return out, nil
+}
+
+func parseGroupLine(line string, group *Group) {
+ const (
+ NameIdx = iota
+ PassIdx
+ GidIdx
+ UsersIdx
+ )
+
+ if line == "" {
+ return
+ }
+
+ splits := strings.Split(line, ":")
+ if len(splits) < 4 {
+ return
+ }
+
+ group.Name = splits[NameIdx]
+ group.Pass = splits[PassIdx]
+ group.Gid, _ = strconv.Atoi(splits[GidIdx])
+
+ u := splits[UsersIdx]
+ if u != "" {
+ group.Users = strings.Split(u, ",")
+ } else {
+ group.Users = []string{}
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/auth.go b/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/auth.go
new file mode 100644
index 0000000..c0cce1b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/auth.go
@@ -0,0 +1,180 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "net/http"
+)
+
+const (
+ authHeader string = "Authorization"
+)
+
+type authV1JsonParser struct{}
+
+type authV1 struct {
+ Domains []string `json:"domains"`
+ Type string `json:"type"`
+ Credentials json.RawMessage `json:"credentials"`
+}
+
+type basicV1 struct {
+ User string `json:"user"`
+ Password string `json:"password"`
+}
+
+type oauthV1 struct {
+ Token string `json:"token"`
+}
+
+type dockerAuthV1JsonParser struct{}
+
+type dockerAuthV1 struct {
+ Registries []string `json:"registries"`
+ Credentials basicV1 `json:"credentials"`
+}
+
+func init() {
+ addParser("auth", "v1", &authV1JsonParser{})
+ addParser("dockerAuth", "v1", &dockerAuthV1JsonParser{})
+ registerSubDir("auth.d", []string{"auth", "dockerAuth"})
+}
+
+type basicAuthHeaderer struct {
+ user string
+ password string
+}
+
+func (h *basicAuthHeaderer) Header() http.Header {
+ headers := make(http.Header)
+ creds := []byte(fmt.Sprintf("%s:%s", h.user, h.password))
+ encodedCreds := base64.StdEncoding.EncodeToString(creds)
+ headers.Add(authHeader, "Basic "+encodedCreds)
+
+ return headers
+}
+
+type oAuthBearerTokenHeaderer struct {
+ token string
+}
+
+func (h *oAuthBearerTokenHeaderer) Header() http.Header {
+ headers := make(http.Header)
+ headers.Add(authHeader, "Bearer "+h.token)
+
+ return headers
+}
+
+func (p *authV1JsonParser) parse(config *Config, raw []byte) error {
+ var auth authV1
+ if err := json.Unmarshal(raw, &auth); err != nil {
+ return err
+ }
+ if len(auth.Domains) == 0 {
+ return fmt.Errorf("no domains specified")
+ }
+ if len(auth.Type) == 0 {
+ return fmt.Errorf("no auth type specified")
+ }
+ var (
+ err error
+ headerer Headerer
+ )
+ switch auth.Type {
+ case "basic":
+ headerer, err = p.getBasicV1Headerer(auth.Credentials)
+ case "oauth":
+ headerer, err = p.getOAuthV1Headerer(auth.Credentials)
+ default:
+ err = fmt.Errorf("unknown auth type: %q", auth.Type)
+ }
+ if err != nil {
+ return err
+ }
+ for _, domain := range auth.Domains {
+ if _, ok := config.AuthPerHost[domain]; ok {
+ return fmt.Errorf("auth for domain %q is already specified", domain)
+ }
+ config.AuthPerHost[domain] = headerer
+ }
+ return nil
+}
+
+func (p *authV1JsonParser) getBasicV1Headerer(raw json.RawMessage) (Headerer, error) {
+ var basic basicV1
+ if err := json.Unmarshal(raw, &basic); err != nil {
+ return nil, err
+ }
+ if err := validateBasicV1(&basic); err != nil {
+ return nil, err
+ }
+ return &basicAuthHeaderer{
+ user: basic.User,
+ password: basic.Password,
+ }, nil
+}
+
+func (p *authV1JsonParser) getOAuthV1Headerer(raw json.RawMessage) (Headerer, error) {
+ var oauth oauthV1
+ if err := json.Unmarshal(raw, &oauth); err != nil {
+ return nil, err
+ }
+ if len(oauth.Token) == 0 {
+ return nil, fmt.Errorf("no oauth bearer token specified")
+ }
+ return &oAuthBearerTokenHeaderer{
+ token: oauth.Token,
+ }, nil
+}
+
+func (p *dockerAuthV1JsonParser) parse(config *Config, raw []byte) error {
+ var auth dockerAuthV1
+ if err := json.Unmarshal(raw, &auth); err != nil {
+ return err
+ }
+ if len(auth.Registries) == 0 {
+ return fmt.Errorf("no registries specified")
+ }
+ if err := validateBasicV1(&auth.Credentials); err != nil {
+ return err
+ }
+ basic := BasicCredentials{
+ User: auth.Credentials.User,
+ Password: auth.Credentials.Password,
+ }
+ for _, registry := range auth.Registries {
+ if _, ok := config.DockerCredentialsPerRegistry[registry]; ok {
+ return fmt.Errorf("credentials for docker registry %q are already specified", registry)
+ }
+ config.DockerCredentialsPerRegistry[registry] = basic
+ }
+ return nil
+}
+
+func validateBasicV1(basic *basicV1) error {
+ if basic == nil {
+ return fmt.Errorf("no credentials")
+ }
+ if len(basic.User) == 0 {
+ return fmt.Errorf("user not specified")
+ }
+ if len(basic.Password) == 0 {
+ return fmt.Errorf("password not specified")
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/config.go b/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/config.go
new file mode 100644
index 0000000..d979aa0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/coreos/rkt/rkt/config/config.go
@@ -0,0 +1,282 @@
+// Copyright 2015 The rkt Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/coreos/rkt/common"
+)
+
+// Headerer is an interface for getting additional HTTP headers to use
+// when downloading data (images, signatures).
+type Headerer interface {
+ Header() http.Header
+}
+
+type BasicCredentials struct {
+ User string
+ Password string
+}
+
+// Config is a single place where configuration for rkt frontend needs
+// resides.
+type Config struct {
+ AuthPerHost map[string]Headerer
+ DockerCredentialsPerRegistry map[string]BasicCredentials
+}
+
+type configParser interface {
+ parse(config *Config, raw []byte) error
+}
+
+var (
+ // configSubDirs is a map saying what kinds of configuration
+ // (values) are acceptable in a config subdirectory (key)
+ configSubDirs = make(map[string][]string)
+ parsersForKind = make(map[string]map[string]configParser)
+)
+
+func addParser(kind, version string, parser configParser) {
+ if len(kind) == 0 {
+ panic("empty kind string when registering a config parser")
+ }
+ if len(version) == 0 {
+ panic("empty version string when registering a config parser")
+ }
+ if parser == nil {
+ panic("trying to register a nil parser")
+ }
+ if _, err := getParser(kind, version); err == nil {
+ panic(fmt.Sprintf("A parser for kind %q and version %q already exist", kind, version))
+ }
+ if _, ok := parsersForKind[kind]; !ok {
+ parsersForKind[kind] = make(map[string]configParser)
+ }
+ parsersForKind[kind][version] = parser
+}
+
+func registerSubDir(dir string, kinds []string) {
+ if len(dir) == 0 {
+ panic("trying to register empty config subdirectory")
+ }
+ if len(kinds) == 0 {
+ panic("kinds array cannot be empty when registering config subdir")
+ }
+ allKinds := toArray(toSet(append(configSubDirs[dir], kinds...)))
+ sort.Strings(allKinds)
+ configSubDirs[dir] = allKinds
+}
+
+func toSet(a []string) map[string]struct{} {
+ s := make(map[string]struct{})
+ for _, v := range a {
+ s[v] = struct{}{}
+ }
+ return s
+}
+
+func toArray(s map[string]struct{}) []string {
+ a := make([]string, len(s))
+ for k := range s {
+ a = append(a, k)
+ }
+ return a
+}
+
+// GetConfig gets the Config instance with configuration taken from
+// default system path (see common.DefaultSystemConfigDir) overridden
+// with configuration from default local path (see
+// common.DefaultLocalConfigDir).
+func GetConfig() (*Config, error) {
+ return GetConfigFrom(common.DefaultSystemConfigDir, common.DefaultLocalConfigDir)
+}
+
+// GetConfigFrom gets the Config instance with configuration taken
+// from given system path overridden with configuration from given
+// local path.
+func GetConfigFrom(system, local string) (*Config, error) {
+ cfg := newConfig()
+ for _, cd := range []string{system, local} {
+ subcfg, err := GetConfigFromDir(cd)
+ if err != nil {
+ return nil, err
+ }
+ mergeConfigs(cfg, subcfg)
+ }
+ return cfg, nil
+}
+
+// GetConfigFromDir gets the Config instance with configuration taken
+// from given directory.
+func GetConfigFromDir(dir string) (*Config, error) {
+ subcfg := newConfig()
+ if valid, err := validDir(dir); err != nil {
+ return nil, err
+ } else if !valid {
+ return subcfg, nil
+ }
+ if err := readConfigDir(subcfg, dir); err != nil {
+ return nil, err
+ }
+ return subcfg, nil
+}
+
+func newConfig() *Config {
+ return &Config{
+ AuthPerHost: make(map[string]Headerer),
+ DockerCredentialsPerRegistry: make(map[string]BasicCredentials),
+ }
+}
+
+func readConfigDir(config *Config, dir string) error {
+ for csd, kinds := range configSubDirs {
+ d := filepath.Join(dir, csd)
+ if valid, err := validDir(d); err != nil {
+ return err
+ } else if !valid {
+ continue
+ }
+ configWalker := getConfigWalker(config, kinds, d)
+ if err := filepath.Walk(d, configWalker); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func validDir(path string) (bool, error) {
+ fi, err := os.Stat(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return false, nil
+ }
+ return false, err
+ }
+ if !fi.IsDir() {
+ return false, fmt.Errorf("expected %q to be a directory", path)
+ }
+ return true, nil
+}
+
+func getConfigWalker(config *Config, kinds []string, root string) filepath.WalkFunc {
+ return func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if path == root {
+ return nil
+ }
+ return readFile(config, info, path, kinds)
+ }
+}
+
+func readFile(config *Config, info os.FileInfo, path string, kinds []string) error {
+ if valid, err := validConfigFile(info); err != nil {
+ return err
+ } else if !valid {
+ return nil
+ }
+ if err := parseConfigFile(config, path, kinds); err != nil {
+ return err
+ }
+ return nil
+}
+
+func validConfigFile(info os.FileInfo) (bool, error) {
+ mode := info.Mode()
+ switch {
+ case mode.IsDir():
+ return false, filepath.SkipDir
+ case mode.IsRegular():
+ return filepath.Ext(info.Name()) == ".json", nil
+ case mode&os.ModeSymlink == os.ModeSymlink:
+ // TODO: support symlinks?
+ return false, nil
+ default:
+ return false, nil
+ }
+}
+
+type configHeader struct {
+ RktVersion string `json:"rktVersion"`
+ RktKind string `json:"rktKind"`
+}
+
+func parseConfigFile(config *Config, path string, kinds []string) error {
+ raw, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ var header configHeader
+ if err := json.Unmarshal(raw, &header); err != nil {
+ return err
+ }
+ if len(header.RktKind) == 0 {
+ return fmt.Errorf("no rktKind specified in %q", path)
+ }
+ if len(header.RktVersion) == 0 {
+ return fmt.Errorf("no rktVersion specified in %q", path)
+ }
+ kindOk := false
+ for _, kind := range kinds {
+ if header.RktKind == kind {
+ kindOk = true
+ break
+ }
+ }
+ if !kindOk {
+ dir := filepath.Dir(path)
+ base := filepath.Base(path)
+ kindsStr := strings.Join(kinds, `", "`)
+ return fmt.Errorf("the configuration directory %q expects to have configuration files of kinds %q, but %q has kind of %q", dir, kindsStr, base, header.RktKind)
+ }
+ parser, err := getParser(header.RktKind, header.RktVersion)
+ if err != nil {
+ return err
+ }
+ if err := parser.parse(config, raw); err != nil {
+ return fmt.Errorf("failed to parse %q: %v", path, err)
+ }
+ return nil
+}
+
+func getParser(kind, version string) (configParser, error) {
+ parsers, ok := parsersForKind[kind]
+ if !ok {
+ return nil, fmt.Errorf("no parser available for configuration of kind %q", kind)
+ }
+ parser, ok := parsers[version]
+ if !ok {
+ return nil, fmt.Errorf("no parser available for configuration of kind %q and version %q", kind, version)
+ }
+ return parser, nil
+}
+
+func mergeConfigs(config *Config, subconfig *Config) {
+ for host, headerer := range subconfig.AuthPerHost {
+ config.AuthPerHost[host] = headerer
+ }
+ for registry, creds := range subconfig.DockerCredentialsPerRegistry {
+ config.DockerCredentialsPerRegistry[registry] = creds
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/md2man.go b/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/md2man.go
new file mode 100644
index 0000000..77249d7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/md2man.go
@@ -0,0 +1,19 @@
+package md2man
+
+import (
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/russross/blackfriday"
+)
+
+func Render(doc []byte) []byte {
+ renderer := RoffRenderer(0)
+ extensions := 0
+ extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
+ extensions |= blackfriday.EXTENSION_TABLES
+ extensions |= blackfriday.EXTENSION_FENCED_CODE
+ extensions |= blackfriday.EXTENSION_AUTOLINK
+ extensions |= blackfriday.EXTENSION_SPACE_HEADERS
+ extensions |= blackfriday.EXTENSION_FOOTNOTES
+ extensions |= blackfriday.EXTENSION_TITLEBLOCK
+
+ return blackfriday.Markdown(doc, renderer, extensions)
+}
diff --git a/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/roff.go b/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/roff.go
new file mode 100644
index 0000000..d2cfcb8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/cpuguy83/go-md2man/md2man/roff.go
@@ -0,0 +1,269 @@
+package md2man
+
+import (
+ "bytes"
+ "fmt"
+ "html"
+ "strings"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/russross/blackfriday"
+)
+
+type roffRenderer struct{}
+
+func RoffRenderer(flags int) blackfriday.Renderer {
+ return &roffRenderer{}
+}
+
+func (r *roffRenderer) GetFlags() int {
+ return 0
+}
+
+func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
+ out.WriteString(".TH ")
+
+ splitText := bytes.Split(text, []byte("\n"))
+ for i, line := range splitText {
+ line = bytes.TrimPrefix(line, []byte("% "))
+ if i == 0 {
+ line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
+ line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
+ }
+ line = append([]byte("\""), line...)
+ line = append(line, []byte("\" ")...)
+ out.Write(line)
+ }
+
+ out.WriteString(" \"\"\n")
+}
+
+func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
+ out.WriteString("\n.PP\n.RS\n\n.nf\n")
+ escapeSpecialChars(out, text)
+ out.WriteString("\n.fi\n.RE\n")
+}
+
+func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
+ out.WriteString("\n.PP\n.RS\n")
+ out.Write(text)
+ out.WriteString("\n.RE\n")
+}
+
+func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) {
+ out.Write(text)
+}
+
+func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
+ marker := out.Len()
+
+ switch {
+ case marker == 0:
+ // This is the doc header
+ out.WriteString(".TH ")
+ case level == 1:
+ out.WriteString("\n\n.SH ")
+ case level == 2:
+ out.WriteString("\n.SH ")
+ default:
+ out.WriteString("\n.SS ")
+ }
+
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+}
+
+func (r *roffRenderer) HRule(out *bytes.Buffer) {
+ out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
+}
+
+func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
+ marker := out.Len()
+ out.WriteString(".IP ")
+ if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
+ out.WriteString("\\(bu 2")
+ } else {
+ out.WriteString("\\n+[step" + string(flags) + "]")
+ }
+ out.WriteString("\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+
+}
+
+func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
+ out.WriteString("\n\\item ")
+ out.Write(text)
+}
+
+func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
+ marker := out.Len()
+ out.WriteString("\n.PP\n")
+ if !text() {
+ out.Truncate(marker)
+ return
+ }
+ if marker != 0 {
+ out.WriteString("\n")
+ }
+}
+
+// TODO: This might now work
+func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
+ out.WriteString(".TS\nallbox;\n")
+
+ out.Write(header)
+ out.Write(body)
+ out.WriteString("\n.TE\n")
+}
+
+func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
+ if out.Len() > 0 {
+ out.WriteString("\n")
+ }
+ out.Write(text)
+ out.WriteString("\n")
+}
+
+func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString(" ")
+ }
+ out.Write(text)
+ out.WriteString(" ")
+}
+
+// TODO: This is probably broken
+func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
+ if out.Len() > 0 {
+ out.WriteString("\t")
+ }
+ out.Write(text)
+ out.WriteString("\t")
+}
+
+func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
+
+}
+
+func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
+
+}
+
+func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
+ out.WriteString("\n\\[la]")
+ out.Write(link)
+ out.WriteString("\\[ra]")
+}
+
+func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB\\fC")
+ escapeSpecialChars(out, text)
+ out.WriteString("\\fR")
+}
+
+func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fB")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\fI")
+ out.Write(text)
+ out.WriteString("\\fP")
+}
+
+func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
+}
+
+func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
+ out.WriteString("\n.br\n")
+}
+
+func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
+ r.AutoLink(out, link, 0)
+}
+
+func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {
+ out.Write(tag)
+}
+
+func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
+ out.WriteString("\\s+2")
+ out.Write(text)
+ out.WriteString("\\s-2")
+}
+
+func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
+}
+
+func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
+
+}
+
+func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
+ out.WriteString(html.UnescapeString(string(entity)))
+}
+
+func processFooterText(text []byte) []byte {
+ text = bytes.TrimPrefix(text, []byte("% "))
+ newText := []byte{}
+ textArr := strings.Split(string(text), ") ")
+
+ for i, w := range textArr {
+ if i == 0 {
+ w = strings.Replace(w, "(", "\" \"", 1)
+ w = fmt.Sprintf("\"%s\"", w)
+ } else {
+ w = fmt.Sprintf(" \"%s\"", w)
+ }
+ newText = append(newText, []byte(w)...)
+ }
+ newText = append(newText, []byte(" \"\"")...)
+
+ return newText
+}
+
+func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
+ escapeSpecialChars(out, text)
+}
+
+func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
+}
+
+func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
+}
+
+func needsBackslash(c byte) bool {
+ for _, r := range []byte("-_&\\~") {
+ if c == r {
+ return true
+ }
+ }
+ return false
+}
+
+func escapeSpecialChars(out *bytes.Buffer, text []byte) {
+ for i := 0; i < len(text); i++ {
+ // directly copy normal characters
+ org := i
+
+ for i < len(text) && !needsBackslash(text[i]) {
+ i++
+ }
+ if i > org {
+ out.Write(text[org:i])
+ }
+
+ // escape a character
+ if i >= len(text) {
+ break
+ }
+ out.WriteByte('\\')
+ out.WriteByte(text[i])
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/LICENSE b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/LICENSE
new file mode 100644
index 0000000..5f0d1fb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/LICENSE
@@ -0,0 +1,13 @@
+Copyright 2014 Alan Shreve
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/README.md b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/README.md
new file mode 100644
index 0000000..7a950d1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/README.md
@@ -0,0 +1,23 @@
+# mousetrap
+
+mousetrap is a tiny library that answers a single question.
+
+On a Windows machine, was the process invoked by someone double clicking on
+the executable file while browsing in explorer?
+
+### Motivation
+
+Windows developers unfamiliar with command line tools will often "double-click"
+the executable for a tool. Because most CLI tools print the help and then exit
+when invoked without arguments, this is often very frustrating for those users.
+
+mousetrap provides a way to detect these invocations so that you can provide
+more helpful behavior and instructions on how to run the CLI tool. To see what
+this looks like, both from an organizational and a technical perspective, see
+https://inconshreveable.com/09-09-2014/sweat-the-small-stuff/
+
+### The interface
+
+The library exposes a single interface:
+
+ func StartedByExplorer() (bool)
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_others.go b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_others.go
new file mode 100644
index 0000000..9d2d8a4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_others.go
@@ -0,0 +1,15 @@
+// +build !windows
+
+package mousetrap
+
+// StartedByExplorer returns true if the program was invoked by the user
+// double-clicking on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+//
+// On non-Windows platforms, it always returns false.
+func StartedByExplorer() bool {
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows.go b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows.go
new file mode 100644
index 0000000..336142a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows.go
@@ -0,0 +1,98 @@
+// +build windows
+// +build !go1.4
+
+package mousetrap
+
+import (
+ "fmt"
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+const (
+ // defined by the Win32 API
+ th32cs_snapprocess uintptr = 0x2
+)
+
+var (
+ kernel = syscall.MustLoadDLL("kernel32.dll")
+ CreateToolhelp32Snapshot = kernel.MustFindProc("CreateToolhelp32Snapshot")
+ Process32First = kernel.MustFindProc("Process32FirstW")
+ Process32Next = kernel.MustFindProc("Process32NextW")
+)
+
+// ProcessEntry32 structure defined by the Win32 API
+type processEntry32 struct {
+ dwSize uint32
+ cntUsage uint32
+ th32ProcessID uint32
+ th32DefaultHeapID int
+ th32ModuleID uint32
+ cntThreads uint32
+ th32ParentProcessID uint32
+ pcPriClassBase int32
+ dwFlags uint32
+ szExeFile [syscall.MAX_PATH]uint16
+}
+
+func getProcessEntry(pid int) (pe *processEntry32, err error) {
+ snapshot, _, e1 := CreateToolhelp32Snapshot.Call(th32cs_snapprocess, uintptr(0))
+ if snapshot == uintptr(syscall.InvalidHandle) {
+ err = fmt.Errorf("CreateToolhelp32Snapshot: %v", e1)
+ return
+ }
+ defer syscall.CloseHandle(syscall.Handle(snapshot))
+
+ var processEntry processEntry32
+ processEntry.dwSize = uint32(unsafe.Sizeof(processEntry))
+ ok, _, e1 := Process32First.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32First: %v", e1)
+ return
+ }
+
+ for {
+ if processEntry.th32ProcessID == uint32(pid) {
+ pe = &processEntry
+ return
+ }
+
+ ok, _, e1 = Process32Next.Call(snapshot, uintptr(unsafe.Pointer(&processEntry)))
+ if ok == 0 {
+ err = fmt.Errorf("Process32Next: %v", e1)
+ return
+ }
+ }
+}
+
+func getppid() (pid int, err error) {
+ pe, err := getProcessEntry(os.Getpid())
+ if err != nil {
+ return
+ }
+
+ pid = int(pe.th32ParentProcessID)
+ return
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ ppid, err := getppid()
+ if err != nil {
+ return false
+ }
+
+ pe, err := getProcessEntry(ppid)
+ if err != nil {
+ return false
+ }
+
+ name := syscall.UTF16ToString(pe.szExeFile[:])
+ return name == "explorer.exe"
+}
diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
new file mode 100644
index 0000000..9a28e57
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go
@@ -0,0 +1,46 @@
+// +build windows
+// +build go1.4
+
+package mousetrap
+
+import (
+ "os"
+ "syscall"
+ "unsafe"
+)
+
+func getProcessEntry(pid int) (*syscall.ProcessEntry32, error) {
+ snapshot, err := syscall.CreateToolhelp32Snapshot(syscall.TH32CS_SNAPPROCESS, 0)
+ if err != nil {
+ return nil, err
+ }
+ defer syscall.CloseHandle(snapshot)
+ var procEntry syscall.ProcessEntry32
+ procEntry.Size = uint32(unsafe.Sizeof(procEntry))
+ if err = syscall.Process32First(snapshot, &procEntry); err != nil {
+ return nil, err
+ }
+ for {
+ if procEntry.ProcessID == uint32(pid) {
+ return &procEntry, nil
+ }
+ err = syscall.Process32Next(snapshot, &procEntry)
+ if err != nil {
+ return nil, err
+ }
+ }
+}
+
+// StartedByExplorer returns true if the program was invoked by the user double-clicking
+// on the executable from explorer.exe
+//
+// It is conservative and returns false if any of the internal calls fail.
+// It does not guarantee that the program was run from a terminal. It only can tell you
+// whether it was launched from explorer.exe
+func StartedByExplorer() bool {
+ pe, err := getProcessEntry(os.Getppid())
+ if err != nil {
+ return false
+ }
+ return "explorer.exe" == syscall.UTF16ToString(pe.ExeFile[:])
+}
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/.gitignore b/Godeps/_workspace/src/github.com/russross/blackfriday/.gitignore
new file mode 100644
index 0000000..75623dc
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/.gitignore
@@ -0,0 +1,8 @@
+*.out
+*.swp
+*.8
+*.6
+_obj
+_test*
+markdown
+tags
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/.travis.yml b/Godeps/_workspace/src/github.com/russross/blackfriday/.travis.yml
new file mode 100644
index 0000000..208fd25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/.travis.yml
@@ -0,0 +1,18 @@
+# Travis CI (http://travis-ci.org/) is a continuous integration service for
+# open source projects. This file configures it to run unit tests for
+# blackfriday.
+
+language: go
+
+go:
+ - 1.2
+ - 1.3
+ - 1.4
+ - 1.5
+
+install:
+ - go get -d -t -v ./...
+ - go build -v ./...
+
+script:
+ - go test -v ./...
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/LICENSE.txt b/Godeps/_workspace/src/github.com/russross/blackfriday/LICENSE.txt
new file mode 100644
index 0000000..2885af3
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/LICENSE.txt
@@ -0,0 +1,29 @@
+Blackfriday is distributed under the Simplified BSD License:
+
+> Copyright © 2011 Russ Ross
+> All rights reserved.
+>
+> Redistribution and use in source and binary forms, with or without
+> modification, are permitted provided that the following conditions
+> are met:
+>
+> 1. Redistributions of source code must retain the above copyright
+> notice, this list of conditions and the following disclaimer.
+>
+> 2. Redistributions in binary form must reproduce the above
+> copyright notice, this list of conditions and the following
+> disclaimer in the documentation and/or other materials provided with
+> the distribution.
+>
+> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
+> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+> POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/README.md b/Godeps/_workspace/src/github.com/russross/blackfriday/README.md
new file mode 100644
index 0000000..52e3b25
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/README.md
@@ -0,0 +1,246 @@
+Blackfriday [](https://travis-ci.org/russross/blackfriday)
+===========
+
+Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It
+is paranoid about its input (so you can safely feed it user-supplied
+data), it is fast, it supports common extensions (tables, smart
+punctuation substitutions, etc.), and it is safe for all utf-8
+(unicode) input.
+
+HTML output is currently supported, along with Smartypants
+extensions. An experimental LaTeX output engine is also included.
+
+It started as a translation from C of [Sundown][3].
+
+
+Installation
+------------
+
+Blackfriday is compatible with Go 1. If you are using an older
+release of Go, consider using v1.1 of blackfriday, which was based
+on the last stable release of Go prior to Go 1. You can find it as a
+tagged commit on github.
+
+With Go 1 and git installed:
+
+ go get github.com/russross/blackfriday
+
+will download, compile, and install the package into your `$GOPATH`
+directory hierarchy. Alternatively, you can achieve the same if you
+import it into a project:
+
+ import "github.com/russross/blackfriday"
+
+and `go get` without parameters.
+
+Usage
+-----
+
+For basic usage, it is as simple as getting your input into a byte
+slice and calling:
+
+ output := blackfriday.MarkdownBasic(input)
+
+This renders it with no extensions enabled. To get a more useful
+feature set, use this instead:
+
+ output := blackfriday.MarkdownCommon(input)
+
+### Sanitize untrusted content
+
+Blackfriday itself does nothing to protect against malicious content. If you are
+dealing with user-supplied markdown, we recommend running blackfriday's output
+through HTML sanitizer such as
+[Bluemonday](https://github.com/microcosm-cc/bluemonday).
+
+Here's an example of simple usage of blackfriday together with bluemonday:
+
+``` go
+import (
+ "github.com/microcosm-cc/bluemonday"
+ "github.com/russross/blackfriday"
+)
+
+// ...
+unsafe := blackfriday.MarkdownCommon(input)
+html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
+```
+
+### Custom options
+
+If you want to customize the set of options, first get a renderer
+(currently either the HTML or LaTeX output engines), then use it to
+call the more general `Markdown` function. For examples, see the
+implementations of `MarkdownBasic` and `MarkdownCommon` in
+`markdown.go`.
+
+You can also check out `blackfriday-tool` for a more complete example
+of how to use it. Download and install it using:
+
+ go get github.com/russross/blackfriday-tool
+
+This is a simple command-line tool that allows you to process a
+markdown file using a standalone program. You can also browse the
+source directly on github if you are just looking for some example
+code:
+
+*
+
+Note that if you have not already done so, installing
+`blackfriday-tool` will be sufficient to download and install
+blackfriday in addition to the tool itself. The tool binary will be
+installed in `$GOPATH/bin`. This is a statically-linked binary that
+can be copied to wherever you need it without worrying about
+dependencies and library versions.
+
+
+Features
+--------
+
+All features of Sundown are supported, including:
+
+* **Compatibility**. The Markdown v1.0.3 test suite passes with
+ the `--tidy` option. Without `--tidy`, the differences are
+ mostly in whitespace and entity escaping, where blackfriday is
+ more consistent and cleaner.
+
+* **Common extensions**, including table support, fenced code
+ blocks, autolinks, strikethroughs, non-strict emphasis, etc.
+
+* **Safety**. Blackfriday is paranoid when parsing, making it safe
+ to feed untrusted user input without fear of bad things
+ happening. The test suite stress tests this and there are no
+ known inputs that make it crash. If you find one, please let me
+ know and send me the input that does it.
+
+ NOTE: "safety" in this context means *runtime safety only*. In order to
+ protect yourself agains JavaScript injection in untrusted content, see
+ [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content).
+
+* **Fast processing**. It is fast enough to render on-demand in
+ most web applications without having to cache the output.
+
+* **Thread safety**. You can run multiple parsers in different
+ goroutines without ill effect. There is no dependence on global
+ shared state.
+
+* **Minimal dependencies**. Blackfriday only depends on standard
+ library packages in Go. The source code is pretty
+ self-contained, so it is easy to add to any project, including
+ Google App Engine projects.
+
+* **Standards compliant**. Output successfully validates using the
+ W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional.
+
+
+Extensions
+----------
+
+In addition to the standard markdown syntax, this package
+implements the following extensions:
+
+* **Intra-word emphasis supression**. The `_` character is
+ commonly used inside words when discussing code, so having
+ markdown interpret it as an emphasis command is usually the
+ wrong thing. Blackfriday lets you treat all emphasis markers as
+ normal characters when they occur inside a word.
+
+* **Tables**. Tables can be created by drawing them in the input
+ using a simple syntax:
+
+ ```
+ Name | Age
+ --------|------
+ Bob | 27
+ Alice | 23
+ ```
+
+* **Fenced code blocks**. In addition to the normal 4-space
+ indentation to mark code blocks, you can explicitly mark them
+ and supply a language (to make syntax highlighting simple). Just
+ mark it like this:
+
+ ``` go
+ func getTrue() bool {
+ return true
+ }
+ ```
+
+ You can use 3 or more backticks to mark the beginning of the
+ block, and the same number to mark the end of the block.
+
+* **Autolinking**. Blackfriday can find URLs that have not been
+ explicitly marked as links and turn them into links.
+
+* **Strikethrough**. Use two tildes (`~~`) to mark text that
+ should be crossed out.
+
+* **Hard line breaks**. With this extension enabled (it is off by
+ default in the `MarkdownBasic` and `MarkdownCommon` convenience
+ functions), newlines in the input translate into line breaks in
+ the output.
+
+* **Smart quotes**. Smartypants-style punctuation substitution is
+ supported, turning normal double- and single-quote marks into
+ curly quotes, etc.
+
+* **LaTeX-style dash parsing** is an additional option, where `--`
+ is translated into `–`, and `---` is translated into
+ `—`. This differs from most smartypants processors, which
+ turn a single hyphen into an ndash and a double hyphen into an
+ mdash.
+
+* **Smart fractions**, where anything that looks like a fraction
+ is translated into suitable HTML (instead of just a few special
+ cases like most smartypant processors). For example, `4/5`
+ becomes `4⁄5`, which renders as
+ 4⁄5.
+
+
+Other renderers
+---------------
+
+Blackfriday is structured to allow alternative rendering engines. Here
+are a few of note:
+
+* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown):
+ provides a GitHub Flavored Markdown renderer with fenced code block
+ highlighting, clickable header anchor links.
+
+ It's not customizable, and its goal is to produce HTML output
+ equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode),
+ except the rendering is performed locally.
+
+* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt,
+ but for markdown.
+
+* LaTeX output: renders output as LaTeX. This is currently part of the
+ main Blackfriday repository, but may be split into its own project
+ in the future. If you are interested in owning and maintaining the
+ LaTeX output component, please be in touch.
+
+ It renders some basic documents, but is only experimental at this
+ point. In particular, it does not do any inline escaping, so input
+ that happens to look like LaTeX code will be passed through without
+ modification.
+
+
+Todo
+----
+
+* More unit testing
+* Improve unicode support. It does not understand all unicode
+ rules (about what constitutes a letter, a punctuation symbol,
+ etc.), so it may fail to detect word boundaries correctly in
+ some instances. It is safe on all utf-8 input.
+
+
+License
+-------
+
+[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt)
+
+
+ [1]: http://daringfireball.net/projects/markdown/ "Markdown"
+ [2]: http://golang.org/ "Go Language"
+ [3]: https://github.com/vmg/sundown "Sundown"
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/block.go b/Godeps/_workspace/src/github.com/russross/blackfriday/block.go
new file mode 100644
index 0000000..fb88bb1
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/block.go
@@ -0,0 +1,1372 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+// Functions to parse block-level elements.
+//
+
+package blackfriday
+
+import (
+ "bytes"
+
+ "github.com/appc/acpush/Godeps/_workspace/src/github.com/shurcooL/sanitized_anchor_name"
+)
+
+// Parse block-level data.
+// Note: this function and many that it calls assume that
+// the input buffer ends with a newline.
+func (p *parser) block(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 || data[len(data)-1] != '\n' {
+ panic("block input is missing terminating newline")
+ }
+
+ // this is called recursively: enforce a maximum depth
+ if p.nesting >= p.maxNesting {
+ return
+ }
+ p.nesting++
+
+ // parse out one block-level construct at a time
+ for len(data) > 0 {
+ // prefixed header:
+ //
+ // # Header 1
+ // ## Header 2
+ // ...
+ // ###### Header 6
+ if p.isPrefixHeader(data) {
+ data = data[p.prefixHeader(out, data):]
+ continue
+ }
+
+ // block of preformatted HTML:
+ //
+ //
+ // ...
+ //
+ if data[0] == '<' {
+ if i := p.html(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // title block
+ //
+ // % stuff
+ // % more stuff
+ // % even more stuff
+ if p.flags&EXTENSION_TITLEBLOCK != 0 {
+ if data[0] == '%' {
+ if i := p.titleBlock(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+ }
+
+ // blank lines. note: returns the # of bytes to skip
+ if i := p.isEmpty(data); i > 0 {
+ data = data[i:]
+ continue
+ }
+
+ // indented code block:
+ //
+ // func max(a, b int) int {
+ // if a > b {
+ // return a
+ // }
+ // return b
+ // }
+ if p.codePrefix(data) > 0 {
+ data = data[p.code(out, data):]
+ continue
+ }
+
+ // fenced code block:
+ //
+ // ``` go
+ // func fact(n int) int {
+ // if n <= 1 {
+ // return n
+ // }
+ // return n * fact(n-1)
+ // }
+ // ```
+ if p.flags&EXTENSION_FENCED_CODE != 0 {
+ if i := p.fencedCode(out, data, true); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // horizontal rule:
+ //
+ // ------
+ // or
+ // ******
+ // or
+ // ______
+ if p.isHRule(data) {
+ p.r.HRule(out)
+ var i int
+ for i = 0; data[i] != '\n'; i++ {
+ }
+ data = data[i:]
+ continue
+ }
+
+ // block quote:
+ //
+ // > A big quote I found somewhere
+ // > on the web
+ if p.quotePrefix(data) > 0 {
+ data = data[p.quote(out, data):]
+ continue
+ }
+
+ // table:
+ //
+ // Name | Age | Phone
+ // ------|-----|---------
+ // Bob | 31 | 555-1234
+ // Alice | 27 | 555-4321
+ if p.flags&EXTENSION_TABLES != 0 {
+ if i := p.table(out, data); i > 0 {
+ data = data[i:]
+ continue
+ }
+ }
+
+ // an itemized/unordered list:
+ //
+ // * Item 1
+ // * Item 2
+ //
+ // also works with + or -
+ if p.uliPrefix(data) > 0 {
+ data = data[p.list(out, data, 0):]
+ continue
+ }
+
+ // a numbered/ordered list:
+ //
+ // 1. Item 1
+ // 2. Item 2
+ if p.oliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_ORDERED):]
+ continue
+ }
+
+ // definition lists:
+ //
+ // Term 1
+ // : Definition a
+ // : Definition b
+ //
+ // Term 2
+ // : Definition c
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(data) > 0 {
+ data = data[p.list(out, data, LIST_TYPE_DEFINITION):]
+ continue
+ }
+ }
+
+ // anything else must look like a normal paragraph
+ // note: this finds underlined headers, too
+ data = data[p.paragraph(out, data):]
+ }
+
+ p.nesting--
+}
+
+func (p *parser) isPrefixHeader(data []byte) bool {
+ if data[0] != '#' {
+ return false
+ }
+
+ if p.flags&EXTENSION_SPACE_HEADERS != 0 {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ if data[level] != ' ' {
+ return false
+ }
+ }
+ return true
+}
+
+func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int {
+ level := 0
+ for level < 6 && data[level] == '#' {
+ level++
+ }
+ i := skipChar(data, level, ' ')
+ end := skipUntilChar(data, i, '\n')
+ skip := end
+ id := ""
+ if p.flags&EXTENSION_HEADER_IDS != 0 {
+ j, k := 0, 0
+ // find start/end of header id
+ for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ {
+ }
+ for k = j + 1; k < end && data[k] != '}'; k++ {
+ }
+ // extract header id iff found
+ if j < end && k < end {
+ id = string(data[j+2 : k])
+ end = j
+ skip = k + 1
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ }
+ }
+ for end > 0 && data[end-1] == '#' {
+ if isBackslashEscaped(data, end-1) {
+ break
+ }
+ end--
+ }
+ for end > 0 && data[end-1] == ' ' {
+ end--
+ }
+ if end > i {
+ if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = sanitized_anchor_name.Create(string(data[i:end]))
+ }
+ work := func() bool {
+ p.inline(out, data[i:end])
+ return true
+ }
+ p.r.Header(out, work, level, id)
+ }
+ return skip
+}
+
+func (p *parser) isUnderlinedHeader(data []byte) int {
+ // test of level 1 header
+ if data[0] == '=' {
+ i := skipChar(data, 1, '=')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 1
+ } else {
+ return 0
+ }
+ }
+
+ // test of level 2 header
+ if data[0] == '-' {
+ i := skipChar(data, 1, '-')
+ i = skipChar(data, i, ' ')
+ if data[i] == '\n' {
+ return 2
+ } else {
+ return 0
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '%' {
+ return 0
+ }
+ splitData := bytes.Split(data, []byte("\n"))
+ var i int
+ for idx, b := range splitData {
+ if !bytes.HasPrefix(b, []byte("%")) {
+ i = idx // - 1
+ break
+ }
+ }
+
+ data = bytes.Join(splitData[0:i], []byte("\n"))
+ p.r.TitleBlock(out, data)
+
+ return len(data)
+}
+
+func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int {
+ var i, j int
+
+ // identify the opening tag
+ if data[0] != '<' {
+ return 0
+ }
+ curtag, tagfound := p.htmlFindTag(data[1:])
+
+ // handle special cases
+ if !tagfound {
+ // check for an HTML comment
+ if size := p.htmlComment(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // check for an
tag
+ if size := p.htmlHr(out, data, doRender); size > 0 {
+ return size
+ }
+
+ // no special case recognized
+ return 0
+ }
+
+ // look for an unindented matching closing tag
+ // followed by a blank line
+ found := false
+ /*
+ closetag := []byte("\n" + curtag + ">")
+ j = len(curtag) + 1
+ for !found {
+ // scan for a closing tag at the beginning of a line
+ if skip := bytes.Index(data[j:], closetag); skip >= 0 {
+ j += skip + len(closetag)
+ } else {
+ break
+ }
+
+ // see if it is the only thing on the line
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ // see if it is followed by a blank line/eof
+ j += skip
+ if j >= len(data) {
+ found = true
+ i = j
+ } else {
+ if skip := p.isEmpty(data[j:]); skip > 0 {
+ j += skip
+ found = true
+ i = j
+ }
+ }
+ }
+ }
+ */
+
+ // if not found, try a second pass looking for indented match
+ // but not if tag is "ins" or "del" (following original Markdown.pl)
+ if !found && curtag != "ins" && curtag != "del" {
+ i = 1
+ for i < len(data) {
+ i++
+ for i < len(data) && !(data[i-1] == '<' && data[i] == '/') {
+ i++
+ }
+
+ if i+2+len(curtag) >= len(data) {
+ break
+ }
+
+ j = p.htmlFindEnd(curtag, data[i-1:])
+
+ if j > 0 {
+ i += j - 1
+ found = true
+ break
+ }
+ }
+ }
+
+ if !found {
+ return 0
+ }
+
+ // the end of the block has been found
+ if doRender {
+ // trim newlines
+ end := i
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+
+ return i
+}
+
+// HTML comment, lax form
+func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int {
+ i := p.inlineHtmlComment(out, data)
+ // needs to end with a blank line
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim trailing newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ return 0
+}
+
+// HR, which is the only self-closing block tag considered
+func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int {
+ if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') {
+ return 0
+ }
+ if data[3] != ' ' && data[3] != '/' && data[3] != '>' {
+ // not an
tag after all; at least not a valid one
+ return 0
+ }
+
+ i := 3
+ for data[i] != '>' && data[i] != '\n' {
+ i++
+ }
+
+ if data[i] == '>' {
+ i++
+ if j := p.isEmpty(data[i:]); j > 0 {
+ size := i + j
+ if doRender {
+ // trim newlines
+ end := size
+ for end > 0 && data[end-1] == '\n' {
+ end--
+ }
+ p.r.BlockHtml(out, data[:end])
+ }
+ return size
+ }
+ }
+
+ return 0
+}
+
+func (p *parser) htmlFindTag(data []byte) (string, bool) {
+ i := 0
+ for isalnum(data[i]) {
+ i++
+ }
+ key := string(data[:i])
+ if blockTags[key] {
+ return key, true
+ }
+ return "", false
+}
+
+func (p *parser) htmlFindEnd(tag string, data []byte) int {
+ // assume data[0] == '<' && data[1] == '/' already tested
+
+ // check if tag is a match
+ closetag := []byte("" + tag + ">")
+ if !bytes.HasPrefix(data, closetag) {
+ return 0
+ }
+ i := len(closetag)
+
+ // check that the rest of the line is blank
+ skip := 0
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ return 0
+ }
+ i += skip
+ skip = 0
+
+ if i >= len(data) {
+ return i
+ }
+
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ return i
+ }
+ if skip = p.isEmpty(data[i:]); skip == 0 {
+ // following line must be blank
+ return 0
+ }
+
+ return i + skip
+}
+
+func (p *parser) isEmpty(data []byte) int {
+ // it is okay to call isEmpty on an empty buffer
+ if len(data) == 0 {
+ return 0
+ }
+
+ var i int
+ for i = 0; i < len(data) && data[i] != '\n'; i++ {
+ if data[i] != ' ' && data[i] != '\t' {
+ return 0
+ }
+ }
+ return i + 1
+}
+
+func (p *parser) isHRule(data []byte) bool {
+ i := 0
+
+ // skip up to three spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // look at the hrule char
+ if data[i] != '*' && data[i] != '-' && data[i] != '_' {
+ return false
+ }
+ c := data[i]
+
+ // the whole line must be the char or whitespace
+ n := 0
+ for data[i] != '\n' {
+ switch {
+ case data[i] == c:
+ n++
+ case data[i] != ' ':
+ return false
+ }
+ i++
+ }
+
+ return n >= 3
+}
+
+func (p *parser) isFencedCode(data []byte, syntax **string, oldmarker string) (skip int, marker string) {
+ i, size := 0, 0
+ skip = 0
+
+ // skip up to three spaces
+ for i < len(data) && i < 3 && data[i] == ' ' {
+ i++
+ }
+ if i >= len(data) {
+ return
+ }
+
+ // check for the marker characters: ~ or `
+ if data[i] != '~' && data[i] != '`' {
+ return
+ }
+
+ c := data[i]
+
+ // the whole line must be the same char or whitespace
+ for i < len(data) && data[i] == c {
+ size++
+ i++
+ }
+
+ if i >= len(data) {
+ return
+ }
+
+ // the marker char must occur at least 3 times
+ if size < 3 {
+ return
+ }
+ marker = string(data[i-size : i])
+
+ // if this is the end marker, it must match the beginning marker
+ if oldmarker != "" && marker != oldmarker {
+ return
+ }
+
+ if syntax != nil {
+ syn := 0
+ i = skipChar(data, i, ' ')
+
+ if i >= len(data) {
+ return
+ }
+
+ syntaxStart := i
+
+ if data[i] == '{' {
+ i++
+ syntaxStart++
+
+ for i < len(data) && data[i] != '}' && data[i] != '\n' {
+ syn++
+ i++
+ }
+
+ if i >= len(data) || data[i] != '}' {
+ return
+ }
+
+ // strip all whitespace at the beginning and the end
+ // of the {} block
+ for syn > 0 && isspace(data[syntaxStart]) {
+ syntaxStart++
+ syn--
+ }
+
+ for syn > 0 && isspace(data[syntaxStart+syn-1]) {
+ syn--
+ }
+
+ i++
+ } else {
+ for i < len(data) && !isspace(data[i]) {
+ syn++
+ i++
+ }
+ }
+
+ language := string(data[syntaxStart : syntaxStart+syn])
+ *syntax = &language
+ }
+
+ i = skipChar(data, i, ' ')
+ if i >= len(data) || data[i] != '\n' {
+ return
+ }
+
+ skip = i + 1
+ return
+}
+
+func (p *parser) fencedCode(out *bytes.Buffer, data []byte, doRender bool) int {
+ var lang *string
+ beg, marker := p.isFencedCode(data, &lang, "")
+ if beg == 0 || beg >= len(data) {
+ return 0
+ }
+
+ var work bytes.Buffer
+
+ for {
+ // safe to assume beg < len(data)
+
+ // check for the end of the code block
+ fenceEnd, _ := p.isFencedCode(data[beg:], nil, marker)
+ if fenceEnd != 0 {
+ beg += fenceEnd
+ break
+ }
+
+ // copy the current line
+ end := skipUntilChar(data, beg, '\n') + 1
+
+ // did we reach the end of the buffer without a closing marker?
+ if end >= len(data) {
+ return 0
+ }
+
+ // verbatim copy to the working buffer
+ if doRender {
+ work.Write(data[beg:end])
+ }
+ beg = end
+ }
+
+ syntax := ""
+ if lang != nil {
+ syntax = *lang
+ }
+
+ if doRender {
+ p.r.BlockCode(out, work.Bytes(), syntax)
+ }
+
+ return beg
+}
+
+func (p *parser) table(out *bytes.Buffer, data []byte) int {
+ var header bytes.Buffer
+ i, columns := p.tableHeader(&header, data)
+ if i == 0 {
+ return 0
+ }
+
+ var body bytes.Buffer
+
+ for i < len(data) {
+ pipes, rowStart := 0, i
+ for ; data[i] != '\n'; i++ {
+ if data[i] == '|' {
+ pipes++
+ }
+ }
+
+ if pipes == 0 {
+ i = rowStart
+ break
+ }
+
+ // include the newline in data sent to tableRow
+ i++
+ p.tableRow(&body, data[rowStart:i], columns, false)
+ }
+
+ p.r.Table(out, header.Bytes(), body.Bytes(), columns)
+
+ return i
+}
+
+// check if the specified position is preceded by an odd number of backslashes
+func isBackslashEscaped(data []byte, i int) bool {
+ backslashes := 0
+ for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' {
+ backslashes++
+ }
+ return backslashes&1 == 1
+}
+
+func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) {
+ i := 0
+ colCount := 1
+ for i = 0; data[i] != '\n'; i++ {
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ colCount++
+ }
+ }
+
+ // doesn't look like a table header
+ if colCount == 1 {
+ return
+ }
+
+ // include the newline in the data sent to tableRow
+ header := data[:i+1]
+
+ // column count ignores pipes at beginning or end of line
+ if data[0] == '|' {
+ colCount--
+ }
+ if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) {
+ colCount--
+ }
+
+ columns = make([]int, colCount)
+
+ // move on to the header underline
+ i++
+ if i >= len(data) {
+ return
+ }
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+ i = skipChar(data, i, ' ')
+
+ // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3
+ // and trailing | optional on last column
+ col := 0
+ for data[i] != '\n' {
+ dashes := 0
+
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_LEFT
+ dashes++
+ }
+ for data[i] == '-' {
+ i++
+ dashes++
+ }
+ if data[i] == ':' {
+ i++
+ columns[col] |= TABLE_ALIGNMENT_RIGHT
+ dashes++
+ }
+ for data[i] == ' ' {
+ i++
+ }
+
+ // end of column test is messy
+ switch {
+ case dashes < 3:
+ // not a valid column
+ return
+
+ case data[i] == '|' && !isBackslashEscaped(data, i):
+ // marker found, now skip past trailing whitespace
+ col++
+ i++
+ for data[i] == ' ' {
+ i++
+ }
+
+ // trailing junk found after last column
+ if col >= colCount && data[i] != '\n' {
+ return
+ }
+
+ case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount:
+ // something else found where marker was required
+ return
+
+ case data[i] == '\n':
+ // marker is optional for the last column
+ col++
+
+ default:
+ // trailing junk found after last column
+ return
+ }
+ }
+ if col != colCount {
+ return
+ }
+
+ p.tableRow(out, header, columns, true)
+ size = i + 1
+ return
+}
+
+func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) {
+ i, col := 0, 0
+ var rowWork bytes.Buffer
+
+ if data[i] == '|' && !isBackslashEscaped(data, i) {
+ i++
+ }
+
+ for col = 0; col < len(columns) && i < len(data); col++ {
+ for data[i] == ' ' {
+ i++
+ }
+
+ cellStart := i
+
+ for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' {
+ i++
+ }
+
+ cellEnd := i
+
+ // skip the end-of-cell marker, possibly taking us past end of buffer
+ i++
+
+ for cellEnd > cellStart && data[cellEnd-1] == ' ' {
+ cellEnd--
+ }
+
+ var cellWork bytes.Buffer
+ p.inline(&cellWork, data[cellStart:cellEnd])
+
+ if header {
+ p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col])
+ } else {
+ p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col])
+ }
+ }
+
+ // pad it out with empty columns to get the right number
+ for ; col < len(columns); col++ {
+ if header {
+ p.r.TableHeaderCell(&rowWork, nil, columns[col])
+ } else {
+ p.r.TableCell(&rowWork, nil, columns[col])
+ }
+ }
+
+ // silently ignore rows with too many cells
+
+ p.r.TableRow(out, rowWork.Bytes())
+}
+
+// returns blockquote prefix length
+func (p *parser) quotePrefix(data []byte) int {
+ i := 0
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+ if data[i] == '>' {
+ if data[i+1] == ' ' {
+ return i + 2
+ }
+ return i + 1
+ }
+ return 0
+}
+
+// parse a blockquote fragment
+func (p *parser) quote(out *bytes.Buffer, data []byte) int {
+ var raw bytes.Buffer
+ beg, end := 0, 0
+ for beg < len(data) {
+ end = beg
+ for data[end] != '\n' {
+ end++
+ }
+ end++
+
+ if pre := p.quotePrefix(data[beg:]); pre > 0 {
+ // skip the prefix
+ beg += pre
+ } else if p.isEmpty(data[beg:]) > 0 &&
+ (end >= len(data) ||
+ (p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0)) {
+ // blockquote ends with at least one blank line
+ // followed by something without a blockquote prefix
+ break
+ }
+
+ // this line is part of the blockquote
+ raw.Write(data[beg:end])
+ beg = end
+ }
+
+ var cooked bytes.Buffer
+ p.block(&cooked, raw.Bytes())
+ p.r.BlockQuote(out, cooked.Bytes())
+ return end
+}
+
+// returns prefix length for block code
+func (p *parser) codePrefix(data []byte) int {
+ if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' {
+ return 4
+ }
+ return 0
+}
+
+func (p *parser) code(out *bytes.Buffer, data []byte) int {
+ var work bytes.Buffer
+
+ i := 0
+ for i < len(data) {
+ beg := i
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+
+ blankline := p.isEmpty(data[beg:i]) > 0
+ if pre := p.codePrefix(data[beg:i]); pre > 0 {
+ beg += pre
+ } else if !blankline {
+ // non-empty, non-prefixed line breaks the pre
+ i = beg
+ break
+ }
+
+ // verbatim copy to the working buffeu
+ if blankline {
+ work.WriteByte('\n')
+ } else {
+ work.Write(data[beg:i])
+ }
+ }
+
+ // trim all the \n off the end of work
+ workbytes := work.Bytes()
+ eol := len(workbytes)
+ for eol > 0 && workbytes[eol-1] == '\n' {
+ eol--
+ }
+ if eol != len(workbytes) {
+ work.Truncate(eol)
+ }
+
+ work.WriteByte('\n')
+
+ p.r.BlockCode(out, work.Bytes(), "")
+
+ return i
+}
+
+// returns unordered list item prefix
+func (p *parser) uliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // need a *, +, or - followed by a space
+ if (data[i] != '*' && data[i] != '+' && data[i] != '-') ||
+ data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns ordered list item prefix
+func (p *parser) oliPrefix(data []byte) int {
+ i := 0
+
+ // start with up to 3 spaces
+ for i < 3 && data[i] == ' ' {
+ i++
+ }
+
+ // count the digits
+ start := i
+ for data[i] >= '0' && data[i] <= '9' {
+ i++
+ }
+
+ // we need >= 1 digits followed by a dot and a space
+ if start == i || data[i] != '.' || data[i+1] != ' ' {
+ return 0
+ }
+ return i + 2
+}
+
+// returns definition list item prefix
+func (p *parser) dliPrefix(data []byte) int {
+ i := 0
+
+ // need a : followed by a spaces
+ if data[i] != ':' || data[i+1] != ' ' {
+ return 0
+ }
+ for data[i] == ' ' {
+ i++
+ }
+ return i + 2
+}
+
+// parse ordered or unordered list block
+func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int {
+ i := 0
+ flags |= LIST_ITEM_BEGINNING_OF_LIST
+ work := func() bool {
+ for i < len(data) {
+ skip := p.listItem(out, data[i:], &flags)
+ i += skip
+
+ if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 {
+ break
+ }
+ flags &= ^LIST_ITEM_BEGINNING_OF_LIST
+ }
+ return true
+ }
+
+ p.r.List(out, work, flags)
+ return i
+}
+
+// Parse a single list item.
+// Assumes initial prefix is already removed if this is a sublist.
+func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int {
+ // keep track of the indentation of the first line
+ itemIndent := 0
+ for itemIndent < 3 && data[itemIndent] == ' ' {
+ itemIndent++
+ }
+
+ i := p.uliPrefix(data)
+ if i == 0 {
+ i = p.oliPrefix(data)
+ }
+ if i == 0 {
+ i = p.dliPrefix(data)
+ // reset definition term flag
+ if i > 0 {
+ *flags &= ^LIST_TYPE_TERM
+ }
+ }
+ if i == 0 {
+ // if in defnition list, set term flag and continue
+ if *flags&LIST_TYPE_DEFINITION != 0 {
+ *flags |= LIST_TYPE_TERM
+ } else {
+ return 0
+ }
+ }
+
+ // skip leading whitespace on first line
+ for data[i] == ' ' {
+ i++
+ }
+
+ // find the end of the line
+ line := i
+ for i > 0 && data[i-1] != '\n' {
+ i++
+ }
+
+ // get working buffer
+ var raw bytes.Buffer
+
+ // put the first line into the working buffer
+ raw.Write(data[line:i])
+ line = i
+
+ // process the following lines
+ containsBlankLine := false
+ sublist := 0
+
+gatherlines:
+ for line < len(data) {
+ i++
+
+ // find the end of this line
+ for data[i-1] != '\n' {
+ i++
+ }
+
+ // if it is an empty line, guess that it is part of this item
+ // and move on to the next line
+ if p.isEmpty(data[line:i]) > 0 {
+ containsBlankLine = true
+ line = i
+ continue
+ }
+
+ // calculate the indentation
+ indent := 0
+ for indent < 4 && line+indent < i && data[line+indent] == ' ' {
+ indent++
+ }
+
+ chunk := data[line+indent : i]
+
+ // evaluate how this line fits in
+ switch {
+ // is this a nested list item?
+ case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) ||
+ p.oliPrefix(chunk) > 0 ||
+ p.dliPrefix(chunk) > 0:
+
+ if containsBlankLine {
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // to be a nested list, it must be indented more
+ // if not, it is the next item in the same list
+ if indent <= itemIndent {
+ break gatherlines
+ }
+
+ // is this the first item in the nested list?
+ if sublist == 0 {
+ sublist = raw.Len()
+ }
+
+ // is this a nested prefix header?
+ case p.isPrefixHeader(chunk):
+ // if the header is not indented, it is not nested in the list
+ // and thus ends the list
+ if containsBlankLine && indent < 4 {
+ *flags |= LIST_ITEM_END_OF_LIST
+ break gatherlines
+ }
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+
+ // anything following an empty line is only part
+ // of this item if it is indented 4 spaces
+ // (regardless of the indentation of the beginning of the item)
+ case containsBlankLine && indent < 4:
+ if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 {
+ // is the next item still a part of this list?
+ next := i
+ for data[next] != '\n' {
+ next++
+ }
+ for next < len(data)-1 && data[next] == '\n' {
+ next++
+ }
+ if i < len(data)-1 && data[i] != ':' && data[next] != ':' {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ } else {
+ *flags |= LIST_ITEM_END_OF_LIST
+ }
+ break gatherlines
+
+ // a blank line means this should be parsed as a block
+ case containsBlankLine:
+ raw.WriteByte('\n')
+ *flags |= LIST_ITEM_CONTAINS_BLOCK
+ }
+
+ // if this line was preceeded by one or more blanks,
+ // re-introduce the blank into the buffer
+ if containsBlankLine {
+ containsBlankLine = false
+ raw.WriteByte('\n')
+
+ }
+
+ // add the line into the working buffer without prefix
+ raw.Write(data[line+indent : i])
+
+ line = i
+ }
+
+ rawBytes := raw.Bytes()
+
+ // render the contents of the list item
+ var cooked bytes.Buffer
+ if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 {
+ // intermediate render of block item, except for definition term
+ if sublist > 0 {
+ p.block(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.block(&cooked, rawBytes)
+ }
+ } else {
+ // intermediate render of inline item
+ if sublist > 0 {
+ p.inline(&cooked, rawBytes[:sublist])
+ p.block(&cooked, rawBytes[sublist:])
+ } else {
+ p.inline(&cooked, rawBytes)
+ }
+ }
+
+ // render the actual list item
+ cookedBytes := cooked.Bytes()
+ parsedEnd := len(cookedBytes)
+
+ // strip trailing newlines
+ for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' {
+ parsedEnd--
+ }
+ p.r.ListItem(out, cookedBytes[:parsedEnd], *flags)
+
+ return line
+}
+
+// render a single paragraph that has already been parsed out
+func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) {
+ if len(data) == 0 {
+ return
+ }
+
+ // trim leading spaces
+ beg := 0
+ for data[beg] == ' ' {
+ beg++
+ }
+
+ // trim trailing newline
+ end := len(data) - 1
+
+ // trim trailing spaces
+ for end > beg && data[end-1] == ' ' {
+ end--
+ }
+
+ work := func() bool {
+ p.inline(out, data[beg:end])
+ return true
+ }
+ p.r.Paragraph(out, work)
+}
+
+func (p *parser) paragraph(out *bytes.Buffer, data []byte) int {
+ // prev: index of 1st char of previous line
+ // line: index of 1st char of current line
+ // i: index of cursor/end of current line
+ var prev, line, i int
+
+ // keep going until we find something to mark the end of the paragraph
+ for i < len(data) {
+ // mark the beginning of the current line
+ prev = line
+ current := data[i:]
+ line = i
+
+ // did we find a blank line marking the end of the paragraph?
+ if n := p.isEmpty(current); n > 0 {
+ // did this blank line followed by a definition list item?
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if i < len(data)-1 && data[i+1] == ':' {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i + n
+ }
+
+ // an underline under some text marks a header, so our paragraph ended on prev line
+ if i > 0 {
+ if level := p.isUnderlinedHeader(current); level > 0 {
+ // render the paragraph
+ p.renderParagraph(out, data[:prev])
+
+ // ignore leading and trailing whitespace
+ eol := i - 1
+ for prev < eol && data[prev] == ' ' {
+ prev++
+ }
+ for eol > prev && data[eol-1] == ' ' {
+ eol--
+ }
+
+ // render the header
+ // this ugly double closure avoids forcing variables onto the heap
+ work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool {
+ return func() bool {
+ pp.inline(o, d)
+ return true
+ }
+ }(out, p, data[prev:eol])
+
+ id := ""
+ if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 {
+ id = sanitized_anchor_name.Create(string(data[prev:eol]))
+ }
+
+ p.r.Header(out, work, level, id)
+
+ // find the end of the underline
+ for data[i] != '\n' {
+ i++
+ }
+ return i
+ }
+ }
+
+ // if the next line starts a block of HTML, then the paragraph ends here
+ if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 {
+ if data[i] == '<' && p.html(out, current, false) > 0 {
+ // rewind to before the HTML block
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // if there's a prefixed header or a horizontal rule after this, paragraph is over
+ if p.isPrefixHeader(current) || p.isHRule(current) {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+
+ // if there's a definition list item, prev line is a definition term
+ if p.flags&EXTENSION_DEFINITION_LISTS != 0 {
+ if p.dliPrefix(current) != 0 {
+ return p.list(out, data[prev:], LIST_TYPE_DEFINITION)
+ }
+ }
+
+ // if there's a list after this, paragraph is over
+ if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 {
+ if p.uliPrefix(current) != 0 ||
+ p.oliPrefix(current) != 0 ||
+ p.quotePrefix(current) != 0 ||
+ p.codePrefix(current) != 0 {
+ p.renderParagraph(out, data[:i])
+ return i
+ }
+ }
+
+ // otherwise, scan to the beginning of the next line
+ for data[i] != '\n' {
+ i++
+ }
+ i++
+ }
+
+ p.renderParagraph(out, data[:i])
+ return i
+}
diff --git a/Godeps/_workspace/src/github.com/russross/blackfriday/html.go b/Godeps/_workspace/src/github.com/russross/blackfriday/html.go
new file mode 100644
index 0000000..74e67ee
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/russross/blackfriday/html.go
@@ -0,0 +1,949 @@
+//
+// Blackfriday Markdown Processor
+// Available at http://github.com/russross/blackfriday
+//
+// Copyright © 2011 Russ Ross .
+// Distributed under the Simplified BSD License.
+// See README.md for details.
+//
+
+//
+//
+// HTML rendering backend
+//
+//
+
+package blackfriday
+
+import (
+ "bytes"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// Html renderer configuration options.
+const (
+ HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks
+ HTML_SKIP_STYLE // skip embedded