diff --git a/Dockerfile b/Dockerfile
index 80cc6966..ea484565 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -21,4 +21,9 @@ ENV DATABASE_CONFIG=/var/lib/drone/drone.sqlite
ADD drone_static /drone_static
+# Alpine Linux doesn't use pam, which means that there is no /etc/nsswitch.conf,
+# but Go and CGO rely on /etc/nsswitch.conf to check the order of DNS resolving.
+# To fix this we just create /etc/nsswitch.conf and add the following line:
+#RUN echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
+
ENTRYPOINT ["/drone_static"]
diff --git a/Dockerfile.env b/Dockerfile.env
deleted file mode 100644
index d409b603..00000000
--- a/Dockerfile.env
+++ /dev/null
@@ -1,10 +0,0 @@
-# build environment used in .drone.yml
-#
-# docker build --rm=true -t drone/golang:1.5 -f Dockerfile.env .
-
-FROM golang:1.5
-ADD contrib/*.sh /usr/local/bin/
-RUN chmod +x /usr/local/bin/setup-sassc.sh && \
- chmod +x /usr/local/bin/setup-sqlite.sh && \
- /usr/local/bin/setup-sassc.sh && \
- /usr/local/bin/setup-sqlite.sh
diff --git a/api/build.go b/api/build.go
index 45c20bce..75ec3730 100644
--- a/api/build.go
+++ b/api/build.go
@@ -18,7 +18,6 @@ import (
"github.com/gin-gonic/gin"
"github.com/drone/drone/model"
- "github.com/drone/drone/router/middleware/context"
"github.com/drone/drone/router/middleware/session"
)
@@ -130,7 +129,7 @@ func GetBuildLogs(c *gin.Context) {
}
func DeleteBuild(c *gin.Context) {
- engine_ := context.Engine(c)
+ engine_ := engine.FromContext(c)
repo := session.Repo(c)
// parse the build number and job sequence number from
@@ -281,7 +280,7 @@ func PostBuild(c *gin.Context) {
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
- engine_ := context.Engine(c)
+ engine_ := engine.FromContext(c)
go engine_.Schedule(c.Copy(), &engine.Task{
User: user,
Repo: repo,
diff --git a/api/node.go b/api/node.go
index 90b73179..592ec0c1 100644
--- a/api/node.go
+++ b/api/node.go
@@ -6,8 +6,8 @@ import (
"github.com/gin-gonic/gin"
+ "github.com/drone/drone/engine"
"github.com/drone/drone/model"
- "github.com/drone/drone/router/middleware/context"
"github.com/drone/drone/store"
)
@@ -25,7 +25,7 @@ func GetNode(c *gin.Context) {
}
func PostNode(c *gin.Context) {
- engine := context.Engine(c)
+ engine := engine.FromContext(c)
in := struct {
Addr string `json:"address"`
@@ -63,7 +63,7 @@ func PostNode(c *gin.Context) {
}
func DeleteNode(c *gin.Context) {
- engine := context.Engine(c)
+ engine := engine.FromContext(c)
id, _ := strconv.Atoi(c.Param("node"))
node, err := store.GetNode(c, int64(id))
diff --git a/bus/bus.go b/bus/bus.go
new file mode 100644
index 00000000..7bb39c53
--- /dev/null
+++ b/bus/bus.go
@@ -0,0 +1,40 @@
+package bus
+
+//go:generate mockery -name Bus -output mock -case=underscore
+
+import "golang.org/x/net/context"
+
+// Bus represents an event bus implementation that
+// allows a publisher to broadcast Event notifications
+// to a list of subscribers.
+type Bus interface {
+ // Publish broadcasts an event to all subscribers.
+ Publish(*Event)
+
+ // Subscribe adds the channel to the list of
+ // subscribers. Each subscriber in the list will
+ // receive broadcast events.
+ Subscribe(chan *Event)
+
+ // Unsubscribe removes the channel from the list
+ // of subscribers.
+ Unsubscribe(chan *Event)
+}
+
+// Publish broadcasts an event to all subscribers.
+func Publish(c context.Context, event *Event) {
+ FromContext(c).Publish(event)
+}
+
+// Subscribe adds the channel to the list of
+// subscribers. Each subscriber in the list will
+// receive broadcast events.
+func Subscribe(c context.Context, eventc chan *Event) {
+ FromContext(c).Subscribe(eventc)
+}
+
+// Unsubscribe removes the channel from the
+// list of subscribers.
+func Unsubscribe(c context.Context, eventc chan *Event) {
+ FromContext(c).Unsubscribe(eventc)
+}
diff --git a/bus/bus_impl.go b/bus/bus_impl.go
new file mode 100644
index 00000000..d0f0e6a6
--- /dev/null
+++ b/bus/bus_impl.go
@@ -0,0 +1,46 @@
+package bus
+
+import (
+ "sync"
+)
+
+type eventbus struct {
+ sync.Mutex
+ subs map[chan *Event]bool
+}
+
+// New creates a simple event bus that manages a list of
+// subscribers to which events are published.
+func New() Bus {
+ return newEventbus()
+}
+
+func newEventbus() *eventbus {
+ return &eventbus{
+ subs: make(map[chan *Event]bool),
+ }
+}
+
+func (b *eventbus) Subscribe(c chan *Event) {
+ b.Lock()
+ b.subs[c] = true
+ b.Unlock()
+}
+
+func (b *eventbus) Unsubscribe(c chan *Event) {
+ b.Lock()
+ delete(b.subs, c)
+ b.Unlock()
+}
+
+func (b *eventbus) Publish(event *Event) {
+ b.Lock()
+ defer b.Unlock()
+
+ for s := range b.subs {
+ go func(c chan *Event) {
+ defer recover()
+ c <- event
+ }(s)
+ }
+}
diff --git a/bus/bus_impl_test.go b/bus/bus_impl_test.go
new file mode 100644
index 00000000..ffcb1e56
--- /dev/null
+++ b/bus/bus_impl_test.go
@@ -0,0 +1,73 @@
+package bus
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/drone/drone/model"
+ . "github.com/franela/goblin"
+ "github.com/gin-gonic/gin"
+)
+
+func TestBus(t *testing.T) {
+ g := Goblin(t)
+ g.Describe("Event bus", func() {
+
+ g.It("Should unsubscribe", func() {
+ c := new(gin.Context)
+ b := newEventbus()
+ ToContext(c, b)
+
+ c1 := make(chan *Event)
+ c2 := make(chan *Event)
+ Subscribe(c, c1)
+ Subscribe(c, c2)
+
+ g.Assert(len(b.subs)).Equal(2)
+ })
+
+ g.It("Should subscribe", func() {
+ c := new(gin.Context)
+ b := newEventbus()
+ ToContext(c, b)
+
+ c1 := make(chan *Event)
+ c2 := make(chan *Event)
+ Subscribe(c, c1)
+ Subscribe(c, c2)
+
+ g.Assert(len(b.subs)).Equal(2)
+
+ Unsubscribe(c, c1)
+ Unsubscribe(c, c2)
+
+ g.Assert(len(b.subs)).Equal(0)
+ })
+
+ g.It("Should publish", func() {
+ c := new(gin.Context)
+ b := New()
+ ToContext(c, b)
+
+ e1 := NewEvent(Started, &model.Repo{}, &model.Build{}, &model.Job{})
+ e2 := NewEvent(Started, &model.Repo{}, &model.Build{}, &model.Job{})
+ c1 := make(chan *Event)
+
+ Subscribe(c, c1)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ var r1, r2 *Event
+ go func() {
+ r1 = <-c1
+ r2 = <-c1
+ wg.Done()
+ }()
+ Publish(c, e1)
+ Publish(c, e2)
+ wg.Wait()
+ })
+ })
+
+}
diff --git a/bus/context.go b/bus/context.go
new file mode 100644
index 00000000..4eccfa7f
--- /dev/null
+++ b/bus/context.go
@@ -0,0 +1,21 @@
+package bus
+
+import "golang.org/x/net/context"
+
+const key = "bus"
+
+// Setter defines a context that enables setting values.
+type Setter interface {
+ Set(string, interface{})
+}
+
+// FromContext returns the Bus associated with this context.
+func FromContext(c context.Context) Bus {
+ return c.Value(key).(Bus)
+}
+
+// ToContext adds the Bus to this context if it supports
+// the Setter interface.
+func ToContext(c Setter, b Bus) {
+ c.Set(key, b)
+}
diff --git a/bus/types.go b/bus/types.go
new file mode 100644
index 00000000..3dded50b
--- /dev/null
+++ b/bus/types.go
@@ -0,0 +1,32 @@
+package bus
+
+import "github.com/drone/drone/model"
+
+// EventType defines the possible types of build events.
+type EventType string
+
+const (
+ Enqueued EventType = "enqueued"
+ Started EventType = "started"
+ Finished EventType = "finished"
+ Cancelled EventType = "cancelled"
+)
+
+// Event represents a build event.
+type Event struct {
+ Type EventType `json:"type"`
+ Repo model.Repo `json:"repo"`
+ Build model.Build `json:"build"`
+ Job model.Job `json:"job"`
+}
+
+// NewEvent creates a new Event for the build, using copies of
+// the build data to avoid possible mutation or race conditions.
+func NewEvent(t EventType, r *model.Repo, b *model.Build, j *model.Job) *Event {
+ return &Event{
+ Type: t,
+ Repo: *r,
+ Build: *b,
+ Job: *j,
+ }
+}
diff --git a/drone.go b/drone.go
index c92281aa..a0cae10c 100644
--- a/drone.go
+++ b/drone.go
@@ -1,55 +1,53 @@
package main
import (
- "flag"
+ "net/http"
+ "time"
- "github.com/drone/drone/engine"
- "github.com/drone/drone/remote"
"github.com/drone/drone/router"
- "github.com/drone/drone/router/middleware/cache"
- "github.com/drone/drone/router/middleware/context"
- "github.com/drone/drone/router/middleware/header"
- "github.com/drone/drone/shared/envconfig"
- "github.com/drone/drone/shared/server"
- "github.com/drone/drone/store/datastore"
+ "github.com/drone/drone/router/middleware"
"github.com/Sirupsen/logrus"
+ "github.com/gin-gonic/contrib/ginrus"
+ "github.com/ianschenck/envflag"
+ _ "github.com/joho/godotenv/autoload"
)
var (
- dotenv = flag.String("config", ".env", "")
- debug = flag.Bool("debug", false, "")
+ addr = envflag.String("SERVER_ADDR", ":8000", "")
+ cert = envflag.String("SERVER_CERT", "", "")
+ key = envflag.String("SERVER_KEY", "", "")
+
+ debug = envflag.Bool("DEBUG", false, "")
)
func main() {
- flag.Parse()
+ envflag.Parse()
// debug level if requested by user
if *debug {
logrus.SetLevel(logrus.DebugLevel)
+ } else {
+ logrus.SetLevel(logrus.WarnLevel)
}
- // Load the configuration from env file
- env := envconfig.Load(*dotenv)
-
- // Setup the database driver
- store_ := datastore.Load(env)
-
- // setup the remote driver
- remote_ := remote.Load(env)
-
- // setup the runner
- engine_ := engine.Load(env, store_)
-
// setup the server and start the listener
- server_ := server.Load(env)
- server_.Run(
- router.Load(
- header.Version,
- cache.Default(),
- context.SetStore(store_),
- context.SetRemote(remote_),
- context.SetEngine(engine_),
- ),
+ handler := router.Load(
+ ginrus.Ginrus(logrus.StandardLogger(), time.RFC3339, true),
+ middleware.Version,
+ middleware.Cache(),
+ middleware.Store(),
+ middleware.Remote(),
+ middleware.Engine(),
)
+
+ if *cert != "" {
+ logrus.Fatal(
+ http.ListenAndServeTLS(*addr, *cert, *key, handler),
+ )
+ } else {
+ logrus.Fatal(
+ http.ListenAndServe(*addr, handler),
+ )
+ }
}
diff --git a/engine/compiler/builtin/alias.go b/engine/compiler/builtin/alias.go
new file mode 100644
index 00000000..002cd8fa
--- /dev/null
+++ b/engine/compiler/builtin/alias.go
@@ -0,0 +1,29 @@
+package builtin
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type aliasOp struct {
+ visitor
+ index map[string]string
+ prefix string
+ suffix int
+}
+
+func NewAliasOp(prefix string) Visitor {
+ return &aliasOp{
+ index: map[string]string{},
+ prefix: prefix,
+ }
+}
+
+func (v *aliasOp) VisitContainer(node *parse.ContainerNode) error {
+ v.suffix++
+
+ node.Container.Alias = node.Container.Name
+ node.Container.Name = fmt.Sprintf("%s_%d", v.prefix, v.suffix)
+ return nil
+}
diff --git a/engine/compiler/builtin/args.go b/engine/compiler/builtin/args.go
new file mode 100644
index 00000000..d0d471c0
--- /dev/null
+++ b/engine/compiler/builtin/args.go
@@ -0,0 +1,90 @@
+package builtin
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+
+ "github.com/drone/drone/engine/compiler/parse"
+
+ json "github.com/ghodss/yaml"
+ "gopkg.in/yaml.v2"
+)
+
+type argsOps struct {
+ visitor
+}
+
+// NewArgsOp returns a transformer that provides the plugin node
+// with the custom arguments from the Yaml file.
+func NewArgsOp() Visitor {
+ return &argsOps{}
+}
+
+func (v *argsOps) VisitContainer(node *parse.ContainerNode) error {
+ switch node.NodeType {
+ case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
+ break // no-op
+ default:
+ return nil
+ }
+ if node.Container.Environment == nil {
+ node.Container.Environment = map[string]string{}
+ }
+ return argsToEnv(node.Vargs, node.Container.Environment)
+}
+
+// argsToEnv uses reflection to convert a map[string]interface to a list
+// of environment variables.
+func argsToEnv(from map[string]interface{}, to map[string]string) error {
+
+ for k, v := range from {
+ t := reflect.TypeOf(v)
+ vv := reflect.ValueOf(v)
+
+ k = "PLUGIN_" + strings.ToUpper(k)
+
+ switch t.Kind() {
+ case reflect.Bool:
+ to[k] = strconv.FormatBool(vv.Bool())
+
+ case reflect.String:
+ to[k] = vv.String()
+
+ case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
+ to[k] = fmt.Sprintf("%v", vv.Int())
+
+ case reflect.Float32, reflect.Float64:
+ to[k] = fmt.Sprintf("%v", vv.Float())
+
+ // case reflect.Int, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int8:
+ // to[k] = strconv.FormatInt(vv.Int(), 16)
+
+ // case reflect.Float32, reflect.Float64:
+ // to[k] = strconv.FormatFloat(vv.Float(), 'E', -1, 64)
+
+ case reflect.Map:
+ yml, _ := yaml.Marshal(vv.Interface())
+ out, _ := json.YAMLToJSON(yml)
+ to[k] = string(out)
+
+ case reflect.Slice:
+ out, _ := yaml.Marshal(vv.Interface())
+
+ in := []string{}
+ err := yaml.Unmarshal(out, &in)
+ if err == nil {
+ to[k] = strings.Join(in, ",")
+ } else {
+ out, err = json.YAMLToJSON(out)
+ if err != nil {
+ println(err.Error())
+ }
+ to[k] = string(out)
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/engine/compiler/builtin/args_test.go b/engine/compiler/builtin/args_test.go
new file mode 100644
index 00000000..1669d48c
--- /dev/null
+++ b/engine/compiler/builtin/args_test.go
@@ -0,0 +1,46 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_args(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("plugins arguments", func() {
+
+ g.It("should ignore non-plugin containers", func() {
+ root := parse.NewRootNode()
+ c := root.NewShellNode()
+ c.Container = runner.Container{}
+ c.Vargs = map[string]interface{}{
+ "depth": 50,
+ }
+
+ ops := NewArgsOp()
+ ops.VisitContainer(c)
+
+ g.Assert(c.Container.Environment["PLUGIN_DEPTH"]).Equal("")
+ })
+
+ g.It("should include args as environment variable", func() {
+ root := parse.NewRootNode()
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ c.Vargs = map[string]interface{}{
+ "depth": 50,
+ }
+
+ ops := NewArgsOp()
+ ops.VisitContainer(c)
+
+ g.Assert(c.Container.Environment["PLUGIN_DEPTH"]).Equal("50")
+ })
+ })
+
+}
diff --git a/engine/compiler/builtin/build.go b/engine/compiler/builtin/build.go
new file mode 100644
index 00000000..9d4d6516
--- /dev/null
+++ b/engine/compiler/builtin/build.go
@@ -0,0 +1,40 @@
+package builtin
+
+import (
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+)
+
+// BuildOp is a transform operation that converts the build section of the Yaml
+// to a step in the pipeline responsible for building the Docker image.
+func BuildOp(node parse.Node) error {
+ build, ok := node.(*parse.BuildNode)
+ if !ok {
+ return nil
+ }
+ if build.Context == "" {
+ return nil
+ }
+
+ root := node.Root()
+ builder := root.NewContainerNode()
+
+ command := []string{
+ "build",
+ "--force-rm",
+ "-f", build.Dockerfile,
+ "-t", root.Image,
+ build.Context,
+ }
+
+ builder.Container = runner.Container{
+ Image: "docker:apline",
+ Volumes: []string{"/var/run/docker.sock:/var/run/docker.sock"},
+ Entrypoint: []string{"/usr/local/bin/docker"},
+ Command: command,
+ WorkingDir: root.Path,
+ }
+
+ root.Services = append(root.Services, builder)
+ return nil
+}
diff --git a/engine/compiler/builtin/cache.go b/engine/compiler/builtin/cache.go
new file mode 100644
index 00000000..acdd2d6d
--- /dev/null
+++ b/engine/compiler/builtin/cache.go
@@ -0,0 +1,61 @@
+package builtin
+
+import (
+ "github.com/drone/drone/engine/runner"
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type cacheOp struct {
+ visitor
+ enable bool
+ plugin string
+ mount string
+}
+
+// NewCacheOp returns a transformer that configures the default cache plugin.
+func NewCacheOp(plugin, mount string, enable bool) Visitor {
+ return &cacheOp{
+ mount: mount,
+ enable: enable,
+ plugin: plugin,
+ }
+}
+
+func (v *cacheOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.Type() != parse.NodeCache {
+ return nil
+ }
+ if len(node.Vargs) == 0 || v.enable == false {
+ node.Disabled = true
+ return nil
+ }
+
+ if node.Container.Name == "" {
+ node.Container.Name = "cache"
+ }
+ if node.Container.Image == "" {
+ node.Container.Image = v.plugin
+ }
+
+ // discard any other cache properties except the image name.
+ // everything else is discard for security reasons.
+ node.Container = runner.Container{
+ Name: node.Container.Name,
+ Alias: node.Container.Alias,
+ Image: node.Container.Image,
+ Volumes: []string{
+ v.mount + ":/cache",
+ },
+ }
+
+ // this is a hack until I can come up with a better solution.
+ // this copies the clone name, and appends at the end of the
+ // build. When it is executed a second time the build should
+ // have a completed status, so it knows to cache instead
+ // of restore.
+ cache := node.Root().NewCacheNode()
+ cache.Vargs = node.Vargs
+ cache.Container = node.Container
+ node.Root().Script = append(node.Root().Script, cache)
+ return nil
+}
\ No newline at end of file
diff --git a/engine/compiler/builtin/cache_test.go b/engine/compiler/builtin/cache_test.go
new file mode 100644
index 00000000..9d5e04a6
--- /dev/null
+++ b/engine/compiler/builtin/cache_test.go
@@ -0,0 +1,37 @@
+package builtin
+
+// import (
+// "testing"
+
+// "github.com/libcd/libcd"
+// "github.com/libcd/libyaml/parse"
+
+// "github.com/franela/goblin"
+// )
+
+// func Test_cache(t *testing.T) {
+// root := parse.NewRootNode()
+
+// g := goblin.Goblin(t)
+// g.Describe("cache", func() {
+
+// g.It("should use default when nil", func() {
+// op := NewCacheOp("plugins/cache:latest", "/tmp/cache")
+
+// op.VisitRoot(root)
+// g.Assert(root.Cache.(*parse.ContainerNode).Container.Image).Equal("plugins/cache:latest")
+// g.Assert(root.Cache.(*parse.ContainerNode).Container.Volumes[0]).Equal("/tmp/cache:/cache")
+// })
+
+// g.It("should use user-defined cache plugin", func() {
+// op := NewCacheOp("plugins/cache:latest", "/tmp/cache")
+// cache := root.NewCacheNode()
+// cache.Container = libcd.Container{}
+// cache.Container.Image = "custom/cacher:latest"
+// root.Cache = cache
+
+// op.VisitRoot(root)
+// g.Assert(cache.Container.Image).Equal("custom/cacher:latest")
+// })
+// })
+// }
diff --git a/engine/compiler/builtin/clone.go b/engine/compiler/builtin/clone.go
new file mode 100644
index 00000000..3b2c79c2
--- /dev/null
+++ b/engine/compiler/builtin/clone.go
@@ -0,0 +1,45 @@
+package builtin
+
+import (
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+)
+
+type cloneOp struct {
+ visitor
+ plugin string
+ enable bool
+}
+
+// NewCloneOp returns a transformer that configures the default clone plugin.
+func NewCloneOp(plugin string, enable bool) Visitor {
+ return &cloneOp{
+ enable: enable,
+ plugin: plugin,
+ }
+}
+
+func (v *cloneOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.Type() != parse.NodeClone {
+ return nil
+ }
+ if v.enable == false {
+ node.Disabled = true
+ return nil
+ }
+
+ if node.Container.Name == "" {
+ node.Container.Name = "clone"
+ }
+ if node.Container.Image == "" {
+ node.Container.Image = v.plugin
+ }
+
+ // discard any other cache properties except the image name.
+ // everything else is discard for security reasons.
+ node.Container = runner.Container{
+ Name: node.Container.Name,
+ Image: node.Container.Image,
+ }
+ return nil
+}
diff --git a/engine/compiler/builtin/clone_test.go b/engine/compiler/builtin/clone_test.go
new file mode 100644
index 00000000..98d86993
--- /dev/null
+++ b/engine/compiler/builtin/clone_test.go
@@ -0,0 +1,36 @@
+package builtin
+
+// import (
+// "testing"
+
+// "github.com/libcd/libcd"
+// "github.com/libcd/libyaml/parse"
+
+// "github.com/franela/goblin"
+// )
+
+// func Test_clone(t *testing.T) {
+// root := parse.NewRootNode()
+
+// g := goblin.Goblin(t)
+// g.Describe("clone", func() {
+
+// g.It("should use default when nil", func() {
+// op := NewCloneOp("plugins/git:latest")
+
+// op.VisitRoot(root)
+// g.Assert(root.Clone.(*parse.ContainerNode).Container.Image).Equal("plugins/git:latest")
+// })
+
+// g.It("should use user-defined clone plugin", func() {
+// op := NewCloneOp("plugins/git:latest")
+// clone := root.NewCloneNode()
+// clone.Container = libcd.Container{}
+// clone.Container.Image = "custom/hg:latest"
+// root.Clone = clone
+
+// op.VisitRoot(root)
+// g.Assert(clone.Container.Image).Equal("custom/hg:latest")
+// })
+// })
+// }
diff --git a/engine/compiler/builtin/envs.go b/engine/compiler/builtin/envs.go
new file mode 100644
index 00000000..42fb595e
--- /dev/null
+++ b/engine/compiler/builtin/envs.go
@@ -0,0 +1,57 @@
+package builtin
+
+import (
+ "os"
+ "strings"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+var (
+ httpProxy = os.Getenv("HTTP_PROXY")
+ httpsProxy = os.Getenv("HTTPS_PROXY")
+ noProxy = os.Getenv("NO_PROXY")
+)
+
+type envOp struct {
+ visitor
+ envs map[string]string
+}
+
+// NewEnvOp returns a transformer that sets default environment variables
+// for each container, service and plugin.
+func NewEnvOp(envs map[string]string) Visitor {
+ return &envOp{
+ envs: envs,
+ }
+}
+
+func (v *envOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.Container.Environment == nil {
+ node.Container.Environment = map[string]string{}
+ }
+ v.defaultEnv(node)
+ v.defaultEnvProxy(node)
+ return nil
+}
+
+func (v *envOp) defaultEnv(node *parse.ContainerNode) {
+ for k, v := range v.envs {
+ node.Container.Environment[k] = v
+ }
+}
+
+func (v *envOp) defaultEnvProxy(node *parse.ContainerNode) {
+ if httpProxy != "" {
+ node.Container.Environment["HTTP_PROXY"] = httpProxy
+ node.Container.Environment["http_proxy"] = strings.ToUpper(httpProxy)
+ }
+ if httpsProxy != "" {
+ node.Container.Environment["HTTPS_PROXY"] = httpsProxy
+ node.Container.Environment["https_proxy"] = strings.ToUpper(httpsProxy)
+ }
+ if noProxy != "" {
+ node.Container.Environment["NO_PROXY"] = noProxy
+ node.Container.Environment["no_proxy"] = strings.ToUpper(noProxy)
+ }
+}
diff --git a/engine/compiler/builtin/envs_test.go b/engine/compiler/builtin/envs_test.go
new file mode 100644
index 00000000..aab72c50
--- /dev/null
+++ b/engine/compiler/builtin/envs_test.go
@@ -0,0 +1,45 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_env(t *testing.T) {
+ root := parse.NewRootNode()
+
+ g := goblin.Goblin(t)
+ g.Describe("environment variables", func() {
+
+ g.It("should be copied", func() {
+ envs := map[string]string{"CI": "drone"}
+
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ op := NewEnvOp(envs)
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Environment["CI"]).Equal("drone")
+ })
+
+ g.It("should include http proxy variables", func() {
+ httpProxy = "foo"
+ httpsProxy = "bar"
+ noProxy = "baz"
+
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ op := NewEnvOp(map[string]string{})
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Environment["HTTP_PROXY"]).Equal("foo")
+ g.Assert(c.Container.Environment["HTTPS_PROXY"]).Equal("bar")
+ g.Assert(c.Container.Environment["NO_PROXY"]).Equal("baz")
+ })
+
+ })
+}
diff --git a/engine/compiler/builtin/filter.go b/engine/compiler/builtin/filter.go
new file mode 100644
index 00000000..88f64283
--- /dev/null
+++ b/engine/compiler/builtin/filter.go
@@ -0,0 +1,128 @@
+package builtin
+
+import (
+ "path/filepath"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type filterOp struct {
+ visitor
+ status string
+ branch string
+ event string
+ environ string
+ platform string
+ matrix map[string]string
+}
+
+// NewFilterOp returns a transformer that filters (ie removes) steps
+// from the process based on conditional logic in the yaml.
+func NewFilterOp(status, branch, event, env string, matrix map[string]string) Visitor {
+ return &filterOp{
+ status: status,
+ branch: branch,
+ event: event,
+ environ: env,
+ matrix: matrix,
+ }
+}
+
+func (v *filterOp) VisitContainer(node *parse.ContainerNode) error {
+ v.visitStatus(node)
+ v.visitBranch(node)
+ v.visitEvent(node)
+ v.visitMatrix(node)
+ v.visitPlatform(node)
+ return nil
+}
+
+// visitStatus is a helpfer function that converts an on_change status
+// filter to either success or failure based on the prior build status.
+func (v *filterOp) visitStatus(node *parse.ContainerNode) {
+ if len(node.Conditions.Status) == 0 {
+ node.Conditions.Status = []string{"success"}
+ return
+ }
+ for _, status := range node.Conditions.Status {
+ if status != "change" && status != "changed" && status != "changes" {
+ continue
+ }
+ var want []string
+ switch v.status {
+ case "success":
+ want = append(want, "failure")
+ case "failure", "error", "killed":
+ want = append(want, "success")
+ default:
+ want = []string{"success", "failure"}
+ }
+ node.Conditions.Status = append(node.Conditions.Status, want...)
+ break
+ }
+}
+
+// visitBranch is a helper function that disables container steps when
+// the branch conditions are not satisfied.
+func (v *filterOp) visitBranch(node *parse.ContainerNode) {
+ if len(node.Conditions.Branch) == 0 {
+ return
+ }
+ for _, pattern := range node.Conditions.Branch {
+ if ok, _ := filepath.Match(pattern, v.branch); ok {
+ return
+ }
+ }
+ node.Disabled = true
+}
+
+// visitEnvironment is a helper function that disables container steps
+// when the deployment environment conditions are not satisfied.
+func (v *filterOp) visitEnvironment(node *parse.ContainerNode) {
+ if len(node.Conditions.Environment) == 0 {
+ return
+ }
+ for _, pattern := range node.Conditions.Environment {
+ if ok, _ := filepath.Match(pattern, v.environ); ok {
+ return
+ }
+ }
+ node.Disabled = true
+}
+
+// visitEvent is a helper function that disables container steps
+// when the build event conditions are not satisfied.
+func (v *filterOp) visitEvent(node *parse.ContainerNode) {
+ if len(node.Conditions.Event) == 0 {
+ return
+ }
+ for _, pattern := range node.Conditions.Event {
+ if ok, _ := filepath.Match(pattern, v.event); ok {
+ return
+ }
+ }
+ node.Disabled = true
+}
+
+func (v *filterOp) visitMatrix(node *parse.ContainerNode) {
+ for key, val := range node.Conditions.Matrix {
+ if v.matrix[key] != val {
+ node.Disabled = true
+ break
+ }
+ }
+}
+
+// visitPlatform is a helper function that disables container steps
+// when the build event conditions are not satisfied.
+func (v *filterOp) visitPlatform(node *parse.ContainerNode) {
+ if len(node.Conditions.Platform) == 0 {
+ return
+ }
+ for _, pattern := range node.Conditions.Platform {
+ if ok, _ := filepath.Match(pattern, v.platform); ok {
+ return
+ }
+ }
+ node.Disabled = true
+}
diff --git a/engine/compiler/builtin/filter_test.go b/engine/compiler/builtin/filter_test.go
new file mode 100644
index 00000000..ae01fa3c
--- /dev/null
+++ b/engine/compiler/builtin/filter_test.go
@@ -0,0 +1,130 @@
+package builtin
+
+// import (
+// "testing"
+
+// "github.com/franela/goblin"
+// )
+
+// func TestFilter(t *testing.T) {
+// g := goblin.Goblin(t)
+// g.Describe("Filters", func() {
+
+// g.It("Should match no branch filter", func() {
+// c := &Container{}
+// FilterBranch("feature/foo")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should match branch", func() {
+// c := &Container{}
+// c.Conditions.Branch.parts = []string{"feature/*"}
+// FilterBranch("feature/foo")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should match branch wildcard", func() {
+// c := &Container{}
+// c.Conditions.Branch.parts = []string{"feature/*"}
+// FilterBranch("feature/foo")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should disable when branch filter doesn't match", func() {
+// c := &Container{}
+// c.Conditions.Branch.parts = []string{"feature/*", "develop"}
+// FilterBranch("master")(nil, c)
+// g.Assert(c.Disabled).IsTrue()
+// })
+
+// g.It("Should match no platform filter", func() {
+// c := &Container{}
+// FilterPlatform("linux_amd64")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should match platform", func() {
+// c := &Container{}
+// c.Conditions.Platform.parts = []string{"linux_amd64"}
+// FilterPlatform("linux_amd64")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should disable when platform filter doesn't match", func() {
+// c := &Container{}
+// c.Conditions.Platform.parts = []string{"linux_arm", "linux_arm64"}
+// FilterPlatform("linux_amd64")(nil, c)
+// g.Assert(c.Disabled).IsTrue()
+// })
+
+// g.It("Should match no environment filter", func() {
+// c := &Container{}
+// FilterEnvironment("production")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should match environment", func() {
+// c := &Container{}
+// c.Conditions.Environment.parts = []string{"production"}
+// FilterEnvironment("production")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should disable when environment filter doesn't match", func() {
+// c := &Container{}
+// c.Conditions.Environment.parts = []string{"develop", "staging"}
+// FilterEnvironment("production")(nil, c)
+// g.Assert(c.Disabled).IsTrue()
+// })
+
+// g.It("Should match no event filter", func() {
+// c := &Container{}
+// FilterEvent("push")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should match event", func() {
+// c := &Container{}
+// c.Conditions.Event.parts = []string{"push"}
+// FilterEvent("push")(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should disable when event filter doesn't match", func() {
+// c := &Container{}
+// c.Conditions.Event.parts = []string{"push", "tag"}
+// FilterEvent("pull_request")(nil, c)
+// g.Assert(c.Disabled).IsTrue()
+// })
+
+// g.It("Should match matrix", func() {
+// c := &Container{}
+// c.Conditions.Matrix = map[string]string{
+// "go": "1.5",
+// "redis": "3.0",
+// }
+// matrix := map[string]string{
+// "go": "1.5",
+// "redis": "3.0",
+// "node": "5.0.0",
+// }
+// FilterMatrix(matrix)(nil, c)
+// g.Assert(c.Disabled).IsFalse()
+// })
+
+// g.It("Should disable when event filter doesn't match", func() {
+// c := &Container{}
+// c.Conditions.Matrix = map[string]string{
+// "go": "1.5",
+// "redis": "3.0",
+// }
+// matrix := map[string]string{
+// "go": "1.4.2",
+// "redis": "3.0",
+// "node": "5.0.0",
+// }
+// FilterMatrix(matrix)(nil, c)
+// g.Assert(c.Disabled).IsTrue()
+// })
+// })
+// }
diff --git a/engine/compiler/builtin/normalize.go b/engine/compiler/builtin/normalize.go
new file mode 100644
index 00000000..90c40418
--- /dev/null
+++ b/engine/compiler/builtin/normalize.go
@@ -0,0 +1,63 @@
+package builtin
+
+import (
+ "path/filepath"
+ "strings"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type normalizeOp struct {
+ visitor
+ namespace string
+}
+
+// NewNormalizeOp returns a transformer that normalizes the container image
+// names and plugin names to their fully qualified values.
+func NewNormalizeOp(namespace string) Visitor {
+ return &normalizeOp{
+ namespace: namespace,
+ }
+}
+
+func (v *normalizeOp) VisitContainer(node *parse.ContainerNode) error {
+ v.normalizeName(node)
+ v.normalizeImage(node)
+ switch node.NodeType {
+ case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
+ v.normalizePlugin(node)
+ }
+ return nil
+}
+
+// normalize the container image to the fully qualified name.
+func (v *normalizeOp) normalizeImage(node *parse.ContainerNode) {
+ if strings.Contains(node.Container.Image, ":") {
+ return
+ }
+ node.Container.Image = node.Container.Image + ":latest"
+}
+
+// normalize the plugin entrypoint and command values.
+func (v *normalizeOp) normalizePlugin(node *parse.ContainerNode) {
+ if strings.Contains(node.Container.Image, "/") {
+ return
+ }
+ node.Container.Image = filepath.Join(v.namespace, node.Container.Image)
+}
+
+// normalize the container name to ensrue a value is set.
+func (v *normalizeOp) normalizeName(node *parse.ContainerNode) {
+ if node.Container.Name != "" {
+ return
+ }
+
+ parts := strings.Split(node.Container.Image, "/")
+ if len(parts) != 0 {
+ node.Container.Name = parts[len(parts)-1]
+ }
+ parts = strings.Split(node.Container.Image, ":")
+ if len(parts) != 0 {
+ node.Container.Name = parts[0]
+ }
+}
diff --git a/engine/compiler/builtin/normalize_test.go b/engine/compiler/builtin/normalize_test.go
new file mode 100644
index 00000000..ecf6e4ec
--- /dev/null
+++ b/engine/compiler/builtin/normalize_test.go
@@ -0,0 +1,69 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_normalize(t *testing.T) {
+ root := parse.NewRootNode()
+
+ g := goblin.Goblin(t)
+ g.Describe("normalizing", func() {
+
+ g.Describe("images", func() {
+
+ g.It("should append tag if empty", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{Image: "golang"}
+ op := NewNormalizeOp("")
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Image).Equal("golang:latest")
+ })
+
+ g.It("should not override existing tag", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{Image: "golang:1.5"}
+ op := NewNormalizeOp("")
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Image).Equal("golang:1.5")
+ })
+ })
+
+ g.Describe("plugins", func() {
+
+ g.It("should prepend namespace", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{Image: "git"}
+ op := NewNormalizeOp("plugins")
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Image).Equal("plugins/git:latest")
+ })
+
+ g.It("should not override existing namespace", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{Image: "index.docker.io/drone/git"}
+ op := NewNormalizeOp("plugins")
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Image).Equal("index.docker.io/drone/git:latest")
+ })
+
+ g.It("should ignore shell or service types", func() {
+ c := root.NewShellNode()
+ c.Container = runner.Container{Image: "golang"}
+ op := NewNormalizeOp("plugins")
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Image).Equal("golang:latest")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/builtin/pod.go b/engine/compiler/builtin/pod.go
new file mode 100644
index 00000000..630b353c
--- /dev/null
+++ b/engine/compiler/builtin/pod.go
@@ -0,0 +1,50 @@
+package builtin
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+)
+
+type podOp struct {
+ visitor
+ name string
+}
+
+// NewPodOp returns a transformer that configures an ambassador container
+// providing shared networking and container volumes.
+func NewPodOp(name string) Visitor {
+ return &podOp{
+ name: name,
+ }
+}
+
+func (v *podOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.Container.Network == "" {
+ parent := fmt.Sprintf("container:%s", v.name)
+ node.Container.Network = parent
+ }
+ node.Container.VolumesFrom = append(node.Container.VolumesFrom, v.name)
+ return nil
+}
+
+func (v *podOp) VisitRoot(node *parse.RootNode) error {
+ service := node.NewServiceNode()
+ service.Container = runner.Container{
+ Name: v.name,
+ Alias: "ambassador",
+ Image: "busybox",
+ Entrypoint: []string{"/bin/sleep"},
+ Command: []string{"86400"},
+ Volumes: []string{node.Path, node.Base},
+ // Entrypoint: []string{"/bin/sh", "-c"},
+ // Volumes: []string{node.Base},
+ // Command: []string{
+ // fmt.Sprintf("mkdir -p %s; sleep 86400", node.Path),
+ // },
+ }
+
+ node.Pod = service
+ return nil
+}
diff --git a/engine/compiler/builtin/pull.go b/engine/compiler/builtin/pull.go
new file mode 100644
index 00000000..5796b572
--- /dev/null
+++ b/engine/compiler/builtin/pull.go
@@ -0,0 +1,26 @@
+package builtin
+
+import (
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type pullOp struct {
+ visitor
+ pull bool
+}
+
+// NewPullOp returns a transformer that configures plugins to automatically
+// pull the latest images at runtime.
+func NewPullOp(pull bool) Visitor {
+ return &pullOp{
+ pull: pull,
+ }
+}
+
+func (v *pullOp) VisitContainer(node *parse.ContainerNode) error {
+ switch node.NodeType {
+ case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
+ node.Container.Pull = v.pull
+ }
+ return nil
+}
diff --git a/engine/compiler/builtin/pull_test.go b/engine/compiler/builtin/pull_test.go
new file mode 100644
index 00000000..882d3210
--- /dev/null
+++ b/engine/compiler/builtin/pull_test.go
@@ -0,0 +1,45 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_pull(t *testing.T) {
+ root := parse.NewRootNode()
+
+ g := goblin.Goblin(t)
+ g.Describe("pull image", func() {
+
+ g.It("should be enabled for plugins", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ op := NewPullOp(true)
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Pull).IsTrue()
+ })
+
+ g.It("should be disabled for plugins", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ op := NewPullOp(false)
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Pull).IsFalse()
+ })
+
+ g.It("should be disabled for non-plugins", func() {
+ c := root.NewShellNode()
+ c.Container = runner.Container{}
+ op := NewPullOp(true)
+
+ op.VisitContainer(c)
+ g.Assert(c.Container.Pull).IsFalse()
+ })
+ })
+}
diff --git a/engine/compiler/builtin/shell.go b/engine/compiler/builtin/shell.go
new file mode 100644
index 00000000..3ad3d004
--- /dev/null
+++ b/engine/compiler/builtin/shell.go
@@ -0,0 +1,85 @@
+package builtin
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+const (
+ Freebsd_amd64 = "freebsd_amd64"
+ Linux_adm64 = "linux_amd64"
+ Windows_amd64 = "windows_amd64"
+)
+
+type shellOp struct {
+ visitor
+ platform string
+}
+
+// NewShellOp returns a transformer that converts the shell node to
+// a runnable container.
+func NewShellOp(platform string) Visitor {
+ return &shellOp{
+ platform: platform,
+ }
+}
+
+func (v *shellOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.NodeType != parse.NodeShell {
+ return nil
+ }
+
+ node.Container.Entrypoint = []string{
+ "/bin/sh", "-c",
+ }
+ node.Container.Command = []string{
+ "echo $CI_CMDS | base64 -d | /bin/sh -e",
+ }
+ if node.Container.Environment == nil {
+ node.Container.Environment = map[string]string{}
+ }
+ node.Container.Environment["HOME"] = "/root"
+ node.Container.Environment["SHELL"] = "/bin/sh"
+ node.Container.Environment["CI_CMDS"] = toScript(
+ node.Root().Path,
+ node.Commands,
+ )
+
+ return nil
+}
+
+func toScript(base string, commands []string) string {
+ var buf bytes.Buffer
+ for _, command := range commands {
+ buf.WriteString(fmt.Sprintf(
+ traceScript,
+ ""+command+"",
+ command,
+ ))
+ }
+
+ script := fmt.Sprintf(
+ setupScript,
+ buf.String(),
+ )
+
+ return base64.StdEncoding.EncodeToString([]byte(script))
+}
+
+// setupScript is a helper script this is added to the build to ensure
+// a minimum set of environment variables are set correctly.
+const setupScript = `
+echo $DRONE_NETRC > $HOME/.netrc
+
+%s
+`
+
+// traceScript is a helper script that is added to the build script
+// to trace a command.
+const traceScript = `
+echo %q
+%s
+`
diff --git a/engine/compiler/builtin/shell_test.go b/engine/compiler/builtin/shell_test.go
new file mode 100644
index 00000000..f771a8df
--- /dev/null
+++ b/engine/compiler/builtin/shell_test.go
@@ -0,0 +1,44 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_shell(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("shell containers", func() {
+
+ g.It("should ignore plugin steps", func() {
+ root := parse.NewRootNode()
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ ops := NewShellOp(Linux_adm64)
+ ops.VisitContainer(c)
+
+ g.Assert(len(c.Container.Entrypoint)).Equal(0)
+ g.Assert(len(c.Container.Command)).Equal(0)
+ g.Assert(c.Container.Environment["CI_CMDS"]).Equal("")
+ })
+
+ g.It("should set entrypoint, command and environment variables", func() {
+ root := parse.NewRootNode()
+ root.Base = "/go"
+ root.Path = "/go/src/github.com/octocat/hello-world"
+
+ c := root.NewShellNode()
+ c.Commands = []string{"go build"}
+ ops := NewShellOp(Linux_adm64)
+ ops.VisitContainer(c)
+
+ g.Assert(c.Container.Entrypoint).Equal([]string{"/bin/sh", "-c"})
+ g.Assert(c.Container.Command).Equal([]string{"echo $CI_CMDS | base64 -d | /bin/sh -e"})
+ g.Assert(c.Container.Environment["CI_CMDS"] != "").IsTrue()
+ })
+ })
+}
diff --git a/engine/compiler/builtin/validate.go b/engine/compiler/builtin/validate.go
new file mode 100644
index 00000000..b19678e8
--- /dev/null
+++ b/engine/compiler/builtin/validate.go
@@ -0,0 +1,105 @@
+package builtin
+
+import (
+ "fmt"
+ "path/filepath"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type validateOp struct {
+ visitor
+ plugins []string
+ trusted bool
+}
+
+// NewValidateOp returns a linter that checks container configuration.
+func NewValidateOp(trusted bool, plugins []string) Visitor {
+ return &validateOp{
+ trusted: trusted,
+ plugins: plugins,
+ }
+}
+
+func (v *validateOp) VisitContainer(node *parse.ContainerNode) error {
+ switch node.NodeType {
+ case parse.NodePlugin, parse.NodeCache, parse.NodeClone:
+ if err := v.validatePlugins(node); err != nil {
+ return err
+ }
+ }
+ if node.NodeType == parse.NodePlugin {
+ if err := v.validatePluginConfig(node); err != nil {
+ return err
+ }
+ }
+ return v.validateConfig(node)
+}
+
+// validate the plugin image and return an error if the plugin
+// image does not match the whitelist.
+func (v *validateOp) validatePlugins(node *parse.ContainerNode) error {
+ match := false
+ for _, pattern := range v.plugins {
+ ok, err := filepath.Match(pattern, node.Container.Image)
+ if ok && err == nil {
+ match = true
+ break
+ }
+ }
+ if !match {
+ return fmt.Errorf(
+ "Plugin %s is not in the whitelist",
+ node.Container.Image,
+ )
+ }
+ return nil
+}
+
+// validate the plugin command and entrypoint and return an error
+// the user attempts to set or override these values.
+func (v *validateOp) validatePluginConfig(node *parse.ContainerNode) error {
+ if len(node.Container.Entrypoint) != 0 {
+ return fmt.Errorf("Cannot set plugin Entrypoint")
+ }
+ if len(node.Container.Command) != 0 {
+ return fmt.Errorf("Cannot set plugin Command")
+ }
+ return nil
+}
+
+// validate the container configuration and return an error if
+// restricted configurations are used.
+func (v *validateOp) validateConfig(node *parse.ContainerNode) error {
+ if v.trusted {
+ return nil
+ }
+ if node.Container.Privileged {
+ return fmt.Errorf("Insufficient privileges to use privileged mode")
+ }
+ if len(node.Container.DNS) != 0 {
+ return fmt.Errorf("Insufficient privileges to use custom dns")
+ }
+ if len(node.Container.DNSSearch) != 0 {
+ return fmt.Errorf("Insufficient privileges to use dns_search")
+ }
+ if len(node.Container.Devices) != 0 {
+ return fmt.Errorf("Insufficient privileges to use devices")
+ }
+ if len(node.Container.ExtraHosts) != 0 {
+ return fmt.Errorf("Insufficient privileges to use extra_hosts")
+ }
+ if len(node.Container.Network) != 0 {
+ return fmt.Errorf("Insufficient privileges to override the network")
+ }
+ if node.Container.OomKillDisable {
+ return fmt.Errorf("Insufficient privileges to disable oom_kill")
+ }
+ if len(node.Container.Volumes) != 0 && node.Type() != parse.NodeCache {
+ return fmt.Errorf("Insufficient privileges to use volumes")
+ }
+ if len(node.Container.VolumesFrom) != 0 {
+ return fmt.Errorf("Insufficient privileges to use volumes_from")
+ }
+ return nil
+}
diff --git a/engine/compiler/builtin/validate_test.go b/engine/compiler/builtin/validate_test.go
new file mode 100644
index 00000000..1744c628
--- /dev/null
+++ b/engine/compiler/builtin/validate_test.go
@@ -0,0 +1,199 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/drone/drone/engine/compiler/parse"
+ "github.com/drone/drone/engine/runner"
+
+ "github.com/franela/goblin"
+)
+
+func Test_validate(t *testing.T) {
+ root := parse.NewRootNode()
+
+ g := goblin.Goblin(t)
+ g.Describe("validating", func() {
+
+ g.Describe("privileged attributes", func() {
+
+ g.It("should not error when trusted build", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ ops := NewValidateOp(true, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+
+ g.It("should error when privleged mode", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.Privileged = true
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use privileged mode")
+ })
+
+ g.It("should error when dns configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.DNS = []string{"8.8.8.8"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use custom dns")
+ })
+
+ g.It("should error when dns_search configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.DNSSearch = []string{"8.8.8.8"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use dns_search")
+ })
+
+ g.It("should error when devices configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.Devices = []string{"/dev/foo"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use devices")
+ })
+
+ g.It("should error when extra_hosts configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.ExtraHosts = []string{"1.2.3.4 foo.com"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use extra_hosts")
+ })
+
+ g.It("should error when network configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.Network = "host"
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to override the network")
+ })
+
+ g.It("should error when oom_kill_disabled configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.OomKillDisable = true
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to disable oom_kill")
+ })
+
+ g.It("should error when volumes configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.Volumes = []string{"/:/tmp"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use volumes")
+ })
+
+ g.It("should error when volumes_from configured", func() {
+ c := root.NewContainerNode()
+ c.Container = runner.Container{}
+ c.Container.VolumesFrom = []string{"drone"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Insufficient privileges to use volumes_from")
+ })
+ })
+
+ g.Describe("plugin configuration", func() {
+ g.It("should error when entrypoint is configured", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{Image: "plugins/git"}
+ c.Container.Entrypoint = []string{"/bin/sh"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Cannot set plugin Entrypoint")
+ })
+
+ g.It("should error when command is configured", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{Image: "plugins/git"}
+ c.Container.Command = []string{"cat", "/proc/1/status"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should not be nil")
+ g.Assert(err.Error()).Equal("Cannot set plugin Command")
+ })
+
+ g.It("should not error when empty entrypoint, command", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{Image: "plugins/git"}
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+ })
+
+ g.Describe("plugin whitelist", func() {
+
+ g.It("should error when no match found", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ c.Container.Image = "custom/git"
+
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err != nil).IsTrue("error should be nil")
+ g.Assert(err.Error()).Equal("Plugin custom/git is not in the whitelist")
+ })
+
+ g.It("should not error when match found", func() {
+ c := root.NewPluginNode()
+ c.Container = runner.Container{}
+ c.Container.Image = "plugins/git"
+
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+
+ g.It("should ignore build images", func() {
+ c := root.NewShellNode()
+ c.Container = runner.Container{}
+ c.Container.Image = "google/golang"
+
+ ops := NewValidateOp(false, []string{"plugins/*"})
+ err := ops.VisitContainer(c)
+
+ g.Assert(err == nil).IsTrue("error should be nil")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/builtin/visitor.go b/engine/compiler/builtin/visitor.go
new file mode 100644
index 00000000..bd84a8f5
--- /dev/null
+++ b/engine/compiler/builtin/visitor.go
@@ -0,0 +1,23 @@
+package builtin
+
+import "github.com/drone/drone/engine/compiler/parse"
+
+// Visitor interface for walking the Yaml file.
+type Visitor interface {
+ VisitRoot(*parse.RootNode) error
+ VisitVolume(*parse.VolumeNode) error
+ VisitNetwork(*parse.NetworkNode) error
+ VisitBuild(*parse.BuildNode) error
+ VisitContainer(*parse.ContainerNode) error
+}
+
+// visitor provides an easy default implementation of a Visitor interface with
+// stubbed methods. This can be embedded in transforms to meet the basic
+// requirements.
+type visitor struct{}
+
+func (visitor) VisitRoot(*parse.RootNode) error { return nil }
+func (visitor) VisitVolume(*parse.VolumeNode) error { return nil }
+func (visitor) VisitNetwork(*parse.NetworkNode) error { return nil }
+func (visitor) VisitBuild(*parse.BuildNode) error { return nil }
+func (visitor) VisitContainer(*parse.ContainerNode) error { return nil }
diff --git a/engine/compiler/builtin/workspace.go b/engine/compiler/builtin/workspace.go
new file mode 100644
index 00000000..1f00647c
--- /dev/null
+++ b/engine/compiler/builtin/workspace.go
@@ -0,0 +1,50 @@
+package builtin
+
+import (
+ "path/filepath"
+
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+type workspaceOp struct {
+ visitor
+ base string
+ path string
+}
+
+// NewWorkspaceOp returns a transformer that provides a default workspace paths,
+// including the base path (mounted as a volume) and absolute path where the
+// code is cloned.
+func NewWorkspaceOp(base, path string) Visitor {
+ return &workspaceOp{
+ base: base,
+ path: path,
+ }
+}
+
+func (v *workspaceOp) VisitRoot(node *parse.RootNode) error {
+ if node.Base == "" {
+ node.Base = v.base
+ }
+ if node.Path == "" {
+ node.Path = v.path
+ }
+ if !filepath.IsAbs(node.Path) {
+ node.Path = filepath.Join(
+ node.Base,
+ node.Path,
+ )
+ }
+ return nil
+}
+
+func (v *workspaceOp) VisitContainer(node *parse.ContainerNode) error {
+ if node.NodeType == parse.NodeService {
+ // we must not override the default working
+ // directory of service containers. All other
+ // container should launch in the workspace
+ return nil
+ }
+ node.Container.WorkingDir = node.Root().Path
+ return nil
+}
diff --git a/engine/compiler/builtin/workspace_test.go b/engine/compiler/builtin/workspace_test.go
new file mode 100644
index 00000000..523d2f01
--- /dev/null
+++ b/engine/compiler/builtin/workspace_test.go
@@ -0,0 +1,89 @@
+package builtin
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "github.com/drone/drone/engine/compiler/parse"
+)
+
+func Test_workspace(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("workspace", func() {
+
+ var defaultBase = "/go"
+ var defaultPath = "src/github.com/octocat/hello-world"
+
+ g.It("should not override user paths", func() {
+ var base = "/drone"
+ var path = "/drone/src/github.com/octocat/hello-world"
+
+ op := NewWorkspaceOp(defaultBase, defaultPath)
+ root := parse.NewRootNode()
+ root.Base = base
+ root.Path = path
+
+ op.VisitRoot(root)
+ g.Assert(root.Base).Equal(base)
+ g.Assert(root.Path).Equal(path)
+ })
+
+ g.It("should convert user paths to absolute", func() {
+ var base = "/drone"
+ var path = "src/github.com/octocat/hello-world"
+ var abs = "/drone/src/github.com/octocat/hello-world"
+
+ op := NewWorkspaceOp(defaultBase, defaultPath)
+ root := parse.NewRootNode()
+ root.Base = base
+ root.Path = path
+
+ op.VisitRoot(root)
+ g.Assert(root.Base).Equal(base)
+ g.Assert(root.Path).Equal(abs)
+ })
+
+ g.It("should set the default path", func() {
+ var base = "/go"
+ var path = "/go/src/github.com/octocat/hello-world"
+
+ op := NewWorkspaceOp(defaultBase, defaultPath)
+ root := parse.NewRootNode()
+
+ op.VisitRoot(root)
+ g.Assert(root.Base).Equal(base)
+ g.Assert(root.Path).Equal(path)
+ })
+
+ g.It("should use workspace as working_dir", func() {
+ var base = "/drone"
+ var path = "/drone/src/github.com/octocat/hello-world"
+
+ root := parse.NewRootNode()
+ root.Base = base
+ root.Path = path
+
+ c := root.NewContainerNode()
+
+ op := NewWorkspaceOp(defaultBase, defaultPath)
+ op.VisitContainer(c)
+ g.Assert(c.Container.WorkingDir).Equal(root.Path)
+ })
+
+ g.It("should not use workspace as working_dir for services", func() {
+ var base = "/drone"
+ var path = "/drone/src/github.com/octocat/hello-world"
+
+ root := parse.NewRootNode()
+ root.Base = base
+ root.Path = path
+
+ c := root.NewServiceNode()
+
+ op := NewWorkspaceOp(defaultBase, defaultPath)
+ op.VisitContainer(c)
+ g.Assert(c.Container.WorkingDir).Equal("")
+ })
+ })
+}
diff --git a/engine/compiler/compile.go b/engine/compiler/compile.go
new file mode 100644
index 00000000..2cfe7de4
--- /dev/null
+++ b/engine/compiler/compile.go
@@ -0,0 +1,146 @@
+package libyaml
+
+import (
+ "github.com/drone/drone/engine/runner"
+ "github.com/drone/drone/engine/runner/parse"
+
+ yaml "github.com/drone/drone/engine/compiler/parse"
+)
+
+// Compiler compiles the Yaml file to the intermediate representation.
+type Compiler struct {
+ trans []Transform
+}
+
+func New() *Compiler {
+ return &Compiler{}
+}
+
+// Transforms sets the compiler transforms use to transform the intermediate
+// representation during compilation.
+func (c *Compiler) Transforms(trans []Transform) *Compiler {
+ c.trans = append(c.trans, trans...)
+ return c
+}
+
+// CompileString compiles the Yaml configuration string and returns
+// the intermediate representation for the interpreter.
+func (c *Compiler) CompileString(in string) (*runner.Spec, error) {
+ return c.Compile([]byte(in))
+}
+
+// CompileString compiles the Yaml configuration file and returns
+// the intermediate representation for the interpreter.
+func (c *Compiler) Compile(in []byte) (*runner.Spec, error) {
+ root, err := yaml.Parse(in)
+ if err != nil {
+ return nil, err
+ }
+ if err := root.Walk(c.walk); err != nil {
+ return nil, err
+ }
+
+ config := &runner.Spec{}
+ tree := parse.NewTree()
+
+ // pod section
+ if root.Pod != nil {
+ node, ok := root.Pod.(*yaml.ContainerNode)
+ if ok {
+ config.Containers = append(config.Containers, &node.Container)
+ tree.Append(parse.NewRunNode().SetName(node.Container.Name).SetDetach(true))
+ }
+ }
+
+ // cache section
+ if root.Cache != nil {
+ node, ok := root.Cache.(*yaml.ContainerNode)
+ if ok && !node.Disabled {
+ config.Containers = append(config.Containers, &node.Container)
+ tree.Append(parse.NewRunNode().SetName(node.Container.Name))
+ }
+ }
+
+ // clone section
+ if root.Clone != nil {
+ node, ok := root.Clone.(*yaml.ContainerNode)
+ if ok && !node.Disabled {
+ config.Containers = append(config.Containers, &node.Container)
+ tree.Append(parse.NewRunNode().SetName(node.Container.Name))
+ }
+ }
+
+ // services section
+ for _, container := range root.Services {
+ node, ok := container.(*yaml.ContainerNode)
+ if !ok || node.Disabled {
+ continue
+ }
+
+ config.Containers = append(config.Containers, &node.Container)
+ tree.Append(parse.NewRunNode().SetName(node.Container.Name).SetDetach(true))
+ }
+
+ // pipeline section
+ for i, container := range root.Script {
+ node, ok := container.(*yaml.ContainerNode)
+ if !ok || node.Disabled {
+ continue
+ }
+
+ config.Containers = append(config.Containers, &node.Container)
+
+ // step 1: lookahead to see if any status=failure exist
+ list := parse.NewListNode()
+ for ii, next := range root.Script {
+ if i >= ii {
+ continue
+ }
+ node, ok := next.(*yaml.ContainerNode)
+ if !ok || node.Disabled || !node.OnFailure() {
+ continue
+ }
+
+ list.Append(
+ parse.NewRecoverNode().SetBody(
+ parse.NewRunNode().SetName(
+ node.Container.Name,
+ ),
+ ),
+ )
+ }
+ // step 2: if yes, collect these and append to "error" node
+ if len(list.Body) == 0 {
+ tree.Append(parse.NewRunNode().SetName(node.Container.Name))
+ } else {
+ errorNode := parse.NewErrorNode()
+ errorNode.SetBody(parse.NewRunNode().SetName(node.Container.Name))
+ errorNode.SetDefer(list)
+ tree.Append(errorNode)
+ }
+ }
+
+ config.Nodes = tree
+ return config, nil
+}
+
+func (c *Compiler) walk(node yaml.Node) (err error) {
+ for _, trans := range c.trans {
+ switch v := node.(type) {
+ case *yaml.BuildNode:
+ err = trans.VisitBuild(v)
+ case *yaml.ContainerNode:
+ err = trans.VisitContainer(v)
+ case *yaml.NetworkNode:
+ err = trans.VisitNetwork(v)
+ case *yaml.VolumeNode:
+ err = trans.VisitVolume(v)
+ case *yaml.RootNode:
+ err = trans.VisitRoot(v)
+ }
+ if err != nil {
+ break
+ }
+ }
+ return err
+}
\ No newline at end of file
diff --git a/engine/compiler/compile_test.go b/engine/compiler/compile_test.go
new file mode 100644
index 00000000..0160d811
--- /dev/null
+++ b/engine/compiler/compile_test.go
@@ -0,0 +1 @@
+package libyaml
diff --git a/engine/compiler/parse/node.go b/engine/compiler/parse/node.go
new file mode 100644
index 00000000..6a97159a
--- /dev/null
+++ b/engine/compiler/parse/node.go
@@ -0,0 +1,34 @@
+package parse
+
+const (
+ NodeBuild = "build"
+ NodeCache = "cache"
+ NodeClone = "clone"
+ NodeContainer = "container"
+ NodeNetwork = "network"
+ NodePlugin = "plugin"
+ NodeRoot = "root"
+ NodeService = "service"
+ NodeShell = "shell"
+ NodeVolume = "volume"
+)
+
+// NodeType identifies the type of parse tree node.
+type NodeType string
+
+// Type returns itself an provides an easy default implementation.
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+// String returns the string value of the Node type.
+func (t NodeType) String() string {
+ return string(t)
+}
+
+// A Node is an element in the parse tree.
+type Node interface {
+ Type() NodeType
+ Root() *RootNode
+}
diff --git a/engine/compiler/parse/node_build.go b/engine/compiler/parse/node_build.go
new file mode 100644
index 00000000..158529b8
--- /dev/null
+++ b/engine/compiler/parse/node_build.go
@@ -0,0 +1,42 @@
+package parse
+
+// BuildNode represents Docker image build instructions.
+type BuildNode struct {
+ NodeType
+
+ Context string
+ Dockerfile string
+ Args map[string]string
+
+ root *RootNode
+}
+
+// Root returns the root node.
+func (n *BuildNode) Root() *RootNode { return n.root }
+
+//
+// intermediate types for yaml decoding.
+//
+
+type build struct {
+ Context string
+ Dockerfile string
+ Args map[string]string
+}
+
+func (b *build) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ err := unmarshal(&b.Context)
+ if err == nil {
+ return nil
+ }
+ out := struct {
+ Context string
+ Dockerfile string
+ Args map[string]string
+ }{}
+ err = unmarshal(&out)
+ b.Context = out.Context
+ b.Args = out.Args
+ b.Dockerfile = out.Dockerfile
+ return err
+}
diff --git a/engine/compiler/parse/node_build_test.go b/engine/compiler/parse/node_build_test.go
new file mode 100644
index 00000000..223edbed
--- /dev/null
+++ b/engine/compiler/parse/node_build_test.go
@@ -0,0 +1,38 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestBuildNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Build", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal", func() {
+ in := []byte(".")
+ out := build{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(out.Context).Equal(".")
+ })
+
+ g.It("should unmarshal shorthand", func() {
+ in := []byte("{ context: ., dockerfile: Dockerfile }")
+ out := build{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(out.Context).Equal(".")
+ g.Assert(out.Dockerfile).Equal("Dockerfile")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/parse/node_container.go b/engine/compiler/parse/node_container.go
new file mode 100644
index 00000000..8ffad21a
--- /dev/null
+++ b/engine/compiler/parse/node_container.go
@@ -0,0 +1,180 @@
+package parse
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/engine/runner"
+
+ "gopkg.in/yaml.v2"
+)
+
+type Conditions struct {
+ Platform []string
+ Environment []string
+ Event []string
+ Branch []string
+ Status []string
+ Matrix map[string]string
+}
+
+// ContainerNode represents a Docker container.
+type ContainerNode struct {
+ NodeType
+
+ // Container represents the container configuration.
+ Container runner.Container
+ Conditions Conditions
+ Disabled bool
+ Commands []string
+ Vargs map[string]interface{}
+
+ root *RootNode
+}
+
+// Root returns the root node.
+func (n *ContainerNode) Root() *RootNode { return n.root }
+
+// OnSuccess returns true if the container should be executed
+// when the exit code of the previous step is 0.
+func (n *ContainerNode) OnSuccess() bool {
+ for _, status := range n.Conditions.Status {
+ if status == "success" {
+ return true
+ }
+ }
+ return false
+}
+
+// OnFailure returns true if the container should be executed
+// even when the exit code of the previous step != 0.
+func (n *ContainerNode) OnFailure() bool {
+ for _, status := range n.Conditions.Status {
+ if status == "failure" {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// intermediate types for yaml decoding.
+//
+
+type container struct {
+ Name string `yaml:"name"`
+ Image string `yaml:"image"`
+ Build string `yaml:"build"`
+ Pull bool `yaml:"pull"`
+ Privileged bool `yaml:"privileged"`
+ Environment mapEqualSlice `yaml:"environment"`
+ Entrypoint stringOrSlice `yaml:"entrypoint"`
+ Command stringOrSlice `yaml:"command"`
+ Commands stringOrSlice `yaml:"commands"`
+ ExtraHosts stringOrSlice `yaml:"extra_hosts"`
+ Volumes stringOrSlice `yaml:"volumes"`
+ VolumesFrom stringOrSlice `yaml:"volumes_from"`
+ Devices stringOrSlice `yaml:"devices"`
+ Network string `yaml:"network_mode"`
+ DNS stringOrSlice `yaml:"dns"`
+ DNSSearch stringOrSlice `yaml:"dns_search"`
+ MemSwapLimit int64 `yaml:"memswap_limit"`
+ MemLimit int64 `yaml:"mem_limit"`
+ CPUQuota int64 `yaml:"cpu_quota"`
+ CPUShares int64 `yaml:"cpu_shares"`
+ CPUSet string `yaml:"cpuset"`
+ OomKillDisable bool `yaml:"oom_kill_disable"`
+
+ AuthConfig struct {
+ Username string `yaml:"username"`
+ Password string `yaml:"password"`
+ Email string `yaml:"email"`
+ Token string `yaml:"registry_token"`
+ } `yaml:"auth_config"`
+
+ Conditions struct {
+ Platform stringOrSlice `yaml:"platform"`
+ Environment stringOrSlice `yaml:"environment"`
+ Event stringOrSlice `yaml:"event"`
+ Branch stringOrSlice `yaml:"branch"`
+ Status stringOrSlice `yaml:"status"`
+ Matrix map[string]string `yaml:"matrix"`
+ } `yaml:"when"`
+
+ Vargs map[string]interface{} `yaml:",inline"`
+}
+
+func (c *container) ToContainer() runner.Container {
+ return runner.Container{
+ Name: c.Name,
+ Image: c.Image,
+ Pull: c.Pull,
+ Privileged: c.Privileged,
+ Environment: c.Environment.parts,
+ Entrypoint: c.Entrypoint.parts,
+ Command: c.Command.parts,
+ ExtraHosts: c.ExtraHosts.parts,
+ Volumes: c.Volumes.parts,
+ VolumesFrom: c.VolumesFrom.parts,
+ Devices: c.Devices.parts,
+ Network: c.Network,
+ DNS: c.DNS.parts,
+ DNSSearch: c.DNSSearch.parts,
+ MemSwapLimit: c.MemSwapLimit,
+ MemLimit: c.MemLimit,
+ CPUQuota: c.CPUQuota,
+ CPUShares: c.CPUShares,
+ CPUSet: c.CPUSet,
+ OomKillDisable: c.OomKillDisable,
+ AuthConfig: runner.Auth{
+ Username: c.AuthConfig.Username,
+ Password: c.AuthConfig.Password,
+ Email: c.AuthConfig.Email,
+ Token: c.AuthConfig.Token,
+ },
+ }
+}
+
+func (c *container) ToConditions() Conditions {
+ return Conditions{
+ Platform: c.Conditions.Platform.parts,
+ Environment: c.Conditions.Environment.parts,
+ Event: c.Conditions.Event.parts,
+ Branch: c.Conditions.Branch.parts,
+ Status: c.Conditions.Status.parts,
+ Matrix: c.Conditions.Matrix,
+ }
+}
+
+type containerList struct {
+ containers []*container
+}
+
+func (c *containerList) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ slice := yaml.MapSlice{}
+ err := unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+
+ for _, s := range slice {
+ cc := container{}
+
+ out, err := yaml.Marshal(s.Value)
+ if err != nil {
+ return err
+ }
+
+ err = yaml.Unmarshal(out, &cc)
+ if err != nil {
+ return err
+ }
+ if cc.Name == "" {
+ cc.Name = fmt.Sprintf("%v", s.Key)
+ }
+ if cc.Image == "" {
+ cc.Image = fmt.Sprintf("%v", s.Key)
+ }
+ c.containers = append(c.containers, &cc)
+ }
+ return err
+}
diff --git a/engine/compiler/parse/node_container_test.go b/engine/compiler/parse/node_container_test.go
new file mode 100644
index 00000000..352e9809
--- /dev/null
+++ b/engine/compiler/parse/node_container_test.go
@@ -0,0 +1,97 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestContainerNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Containers", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal", func() {
+ in := []byte(sampleContainer)
+ out := containerList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.containers)).Equal(1)
+
+ c := out.containers[0]
+ g.Assert(c.Name).Equal("foo")
+ g.Assert(c.Image).Equal("golang")
+ g.Assert(c.Build).Equal(".")
+ g.Assert(c.Pull).Equal(true)
+ g.Assert(c.Privileged).Equal(true)
+ g.Assert(c.Entrypoint.parts).Equal([]string{"/bin/sh"})
+ g.Assert(c.Command.parts).Equal([]string{"yes"})
+ g.Assert(c.Commands.parts).Equal([]string{"whoami"})
+ g.Assert(c.ExtraHosts.parts).Equal([]string{"foo.com"})
+ g.Assert(c.Volumes.parts).Equal([]string{"/foo:/bar"})
+ g.Assert(c.VolumesFrom.parts).Equal([]string{"foo"})
+ g.Assert(c.Devices.parts).Equal([]string{"/dev/tty0"})
+ g.Assert(c.Network).Equal("bridge")
+ g.Assert(c.DNS.parts).Equal([]string{"8.8.8.8"})
+ g.Assert(c.MemSwapLimit).Equal(int64(1))
+ g.Assert(c.MemLimit).Equal(int64(2))
+ g.Assert(c.CPUQuota).Equal(int64(3))
+ g.Assert(c.CPUSet).Equal("1,2")
+ g.Assert(c.OomKillDisable).Equal(true)
+ g.Assert(c.AuthConfig.Username).Equal("octocat")
+ g.Assert(c.AuthConfig.Password).Equal("password")
+ g.Assert(c.AuthConfig.Email).Equal("octocat@github.com")
+ g.Assert(c.Vargs["access_key"]).Equal("970d28f4dd477bc184fbd10b376de753")
+ g.Assert(c.Vargs["secret_key"]).Equal("9c5785d3ece6a9cdefa42eb99b58986f9095ff1c")
+ })
+
+ g.It("should unmarshal named", func() {
+ in := []byte("foo: { name: bar }")
+ out := containerList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.containers)).Equal(1)
+ g.Assert(out.containers[0].Name).Equal("bar")
+ })
+
+ })
+ })
+}
+
+var sampleContainer = `
+foo:
+ image: golang
+ build: .
+ pull: true
+ privileged: true
+ environment:
+ FOO: BAR
+ entrypoint: /bin/sh
+ command: "yes"
+ commands: whoami
+ extra_hosts: foo.com
+ volumes: /foo:/bar
+ volumes_from: foo
+ devices: /dev/tty0
+ network_mode: bridge
+ dns: 8.8.8.8
+ memswap_limit: 1
+ mem_limit: 2
+ cpu_quota: 3
+ cpuset: 1,2
+ oom_kill_disable: true
+
+ auth_config:
+ username: octocat
+ password: password
+ email: octocat@github.com
+
+ access_key: 970d28f4dd477bc184fbd10b376de753
+ secret_key: 9c5785d3ece6a9cdefa42eb99b58986f9095ff1c
+`
diff --git a/engine/compiler/parse/node_network.go b/engine/compiler/parse/node_network.go
new file mode 100644
index 00000000..b78a4bb7
--- /dev/null
+++ b/engine/compiler/parse/node_network.go
@@ -0,0 +1,68 @@
+package parse
+
+import (
+ "fmt"
+
+ "gopkg.in/yaml.v2"
+)
+
+// NetworkNode represents a Docker network.
+type NetworkNode struct {
+ NodeType
+ root *RootNode
+
+ Name string
+ Driver string
+ DriverOpts map[string]string
+}
+
+// Root returns the root node.
+func (n *NetworkNode) Root() *RootNode { return n.root }
+
+//
+// intermediate types for yaml decoding.
+//
+
+// network is an intermediate type used for decoding a networks in a format
+// compatible with docker-compose.yml
+type network struct {
+ Name string
+ Driver string
+ DriverOpts map[string]string `yaml:"driver_opts"`
+}
+
+// networkList is an intermediate type used for decoding a slice of networks
+// in a format compatible with docker-compose.yml
+type networkList struct {
+ networks []*network
+}
+
+func (n *networkList) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ slice := yaml.MapSlice{}
+ err := unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+
+ for _, s := range slice {
+ nn := network{}
+
+ out, err := yaml.Marshal(s.Value)
+ if err != nil {
+ return err
+ }
+
+ err = yaml.Unmarshal(out, &nn)
+ if err != nil {
+ return err
+ }
+ if nn.Name == "" {
+ nn.Name = fmt.Sprintf("%v", s.Key)
+ }
+ if nn.Driver == "" {
+ nn.Driver = "bridge"
+ }
+ n.networks = append(n.networks, &nn)
+ }
+ return err
+}
diff --git a/engine/compiler/parse/node_network_test.go b/engine/compiler/parse/node_network_test.go
new file mode 100644
index 00000000..c4b1ca4f
--- /dev/null
+++ b/engine/compiler/parse/node_network_test.go
@@ -0,0 +1,51 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestNetworkNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Networks", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal", func() {
+ in := []byte("foo: { driver: overlay }")
+ out := networkList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.networks)).Equal(1)
+ g.Assert(out.networks[0].Name).Equal("foo")
+ g.Assert(out.networks[0].Driver).Equal("overlay")
+ })
+
+ g.It("should unmarshal named", func() {
+ in := []byte("foo: { name: bar }")
+ out := networkList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.networks)).Equal(1)
+ g.Assert(out.networks[0].Name).Equal("bar")
+ })
+
+ g.It("should unmarshal and use default driver", func() {
+ in := []byte("foo: { name: bar }")
+ out := volumeList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.volumes)).Equal(1)
+ g.Assert(out.volumes[0].Driver).Equal("local")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/parse/node_root.go b/engine/compiler/parse/node_root.go
new file mode 100644
index 00000000..fcf5ac01
--- /dev/null
+++ b/engine/compiler/parse/node_root.go
@@ -0,0 +1,148 @@
+package parse
+
+// RootNode is the root node in the parsed Yaml file.
+type RootNode struct {
+ NodeType
+
+ Platform string
+ Base string
+ Path string
+ Image string
+
+ Pod Node
+ Build Node
+ Cache Node
+ Clone Node
+ Script []Node
+ Volumes []Node
+ Networks []Node
+ Services []Node
+}
+
+// NewRootNode returns a new root node.
+func NewRootNode() *RootNode {
+ return &RootNode{
+ NodeType: NodeRoot,
+ }
+}
+
+// Root returns the root node.
+func (n *RootNode) Root() *RootNode { return n }
+
+// Returns a new Volume Node.
+func (n *RootNode) NewVolumeNode(name string) *VolumeNode {
+ return &VolumeNode{
+ NodeType: NodeVolume,
+ Name: name,
+ root: n,
+ }
+}
+
+// Returns a new Network Node.
+func (n *RootNode) NewNetworkNode(name string) *NetworkNode {
+ return &NetworkNode{
+ NodeType: NodeNetwork,
+ Name: name,
+ root: n,
+ }
+}
+
+// Returns a new Network Node.
+func (n *RootNode) NewBuildNode(context string) *BuildNode {
+ return &BuildNode{
+ NodeType: NodeBuild,
+ Context: context,
+ root: n,
+ }
+}
+
+// Returns a new Container Plugin Node.
+func (n *RootNode) NewPluginNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodePlugin,
+ root: n,
+ }
+}
+
+// Returns a new Container Shell Node.
+func (n *RootNode) NewShellNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodeShell,
+ root: n,
+ }
+}
+
+// Returns a new Container Service Node.
+func (n *RootNode) NewServiceNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodeService,
+ root: n,
+ }
+}
+
+// Returns a new Container Clone Node.
+func (n *RootNode) NewCloneNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodeClone,
+ root: n,
+ }
+}
+
+// Returns a new Container Cache Node.
+func (n *RootNode) NewCacheNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodeCache,
+ root: n,
+ }
+}
+
+// Returns a new Container Node.
+func (n *RootNode) NewContainerNode() *ContainerNode {
+ return &ContainerNode{
+ NodeType: NodeContainer,
+ root: n,
+ }
+}
+
+// Walk is a function that walk through all child nodes of the RootNode
+// and invokes the Walk callback function for each Node.
+func (n *RootNode) Walk(fn WalkFunc) (err error) {
+ var nodes []Node
+ nodes = append(nodes, n)
+ nodes = append(nodes, n.Build)
+ nodes = append(nodes, n.Cache)
+ nodes = append(nodes, n.Clone)
+ nodes = append(nodes, n.Script...)
+ nodes = append(nodes, n.Volumes...)
+ nodes = append(nodes, n.Networks...)
+ nodes = append(nodes, n.Services...)
+ for _, node := range nodes {
+ err = fn(node)
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+type WalkFunc func(Node) error
+
+//
+// intermediate types for yaml decoding.
+//
+
+type root struct {
+ Workspace struct {
+ Path string
+ Base string
+ }
+ Image string
+ Platform string
+ Volumes volumeList
+ Networks networkList
+ Services containerList
+ Script containerList
+ Cache container
+ Clone container
+ Build build
+}
diff --git a/engine/compiler/parse/node_root_test.go b/engine/compiler/parse/node_root_test.go
new file mode 100644
index 00000000..f4760109
--- /dev/null
+++ b/engine/compiler/parse/node_root_test.go
@@ -0,0 +1,85 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestRootNode(t *testing.T) {
+ g := goblin.Goblin(t)
+ r := &RootNode{}
+
+ g.Describe("Root Node", func() {
+
+ g.It("should return self as root", func() {
+ g.Assert(r).Equal(r.Root())
+ })
+
+ g.It("should create a Volume Node", func() {
+ n := r.NewVolumeNode("foo")
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.Name).Equal("foo")
+ g.Assert(n.String()).Equal(NodeVolume)
+ g.Assert(n.Type()).Equal(NodeType(NodeVolume))
+ })
+
+ g.It("should create a Network Node", func() {
+ n := r.NewNetworkNode("foo")
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.Name).Equal("foo")
+ g.Assert(n.String()).Equal(NodeNetwork)
+ g.Assert(n.Type()).Equal(NodeType(NodeNetwork))
+ })
+
+ g.It("should create a Plugin Node", func() {
+ n := r.NewPluginNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodePlugin)
+ g.Assert(n.Type()).Equal(NodeType(NodePlugin))
+ })
+
+ g.It("should create a Shell Node", func() {
+ n := r.NewShellNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodeShell)
+ g.Assert(n.Type()).Equal(NodeType(NodeShell))
+ })
+
+ g.It("should create a Service Node", func() {
+ n := r.NewServiceNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodeService)
+ g.Assert(n.Type()).Equal(NodeType(NodeService))
+ })
+
+ g.It("should create a Build Node", func() {
+ n := r.NewBuildNode(".")
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.Context).Equal(".")
+ g.Assert(n.String()).Equal(NodeBuild)
+ g.Assert(n.Type()).Equal(NodeType(NodeBuild))
+ })
+
+ g.It("should create a Cache Node", func() {
+ n := r.NewCacheNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodeCache)
+ g.Assert(n.Type()).Equal(NodeType(NodeCache))
+ })
+
+ g.It("should create a Clone Node", func() {
+ n := r.NewCloneNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodeClone)
+ g.Assert(n.Type()).Equal(NodeType(NodeClone))
+ })
+
+ g.It("should create a Container Node", func() {
+ n := r.NewContainerNode()
+ g.Assert(n.Root()).Equal(r)
+ g.Assert(n.String()).Equal(NodeContainer)
+ g.Assert(n.Type()).Equal(NodeType(NodeContainer))
+ })
+ })
+}
diff --git a/engine/compiler/parse/node_volume.go b/engine/compiler/parse/node_volume.go
new file mode 100644
index 00000000..1aadfa1f
--- /dev/null
+++ b/engine/compiler/parse/node_volume.go
@@ -0,0 +1,69 @@
+package parse
+
+import (
+ "fmt"
+
+ "gopkg.in/yaml.v2"
+)
+
+// VolumeNode represents a Docker volume.
+type VolumeNode struct {
+ NodeType
+ root *RootNode
+
+ Name string
+ Driver string
+ DriverOpts map[string]string
+ External bool
+}
+
+// Root returns the root node.
+func (n *VolumeNode) Root() *RootNode { return n.root }
+
+//
+// intermediate types for yaml decoding.
+//
+
+// volume is an intermediate type used for decoding a volumes in a format
+// compatible with docker-compose.yml
+type volume struct {
+ Name string
+ Driver string
+ DriverOpts map[string]string `yaml:"driver_opts"`
+}
+
+// volumeList is an intermediate type used for decoding a slice of volumes
+// in a format compatible with docker-compose.yml
+type volumeList struct {
+ volumes []*volume
+}
+
+func (v *volumeList) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ slice := yaml.MapSlice{}
+ err := unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+
+ for _, s := range slice {
+ vv := volume{}
+
+ out, err := yaml.Marshal(s.Value)
+ if err != nil {
+ return err
+ }
+
+ err = yaml.Unmarshal(out, &vv)
+ if err != nil {
+ return err
+ }
+ if vv.Name == "" {
+ vv.Name = fmt.Sprintf("%v", s.Key)
+ }
+ if vv.Driver == "" {
+ vv.Driver = "local"
+ }
+ v.volumes = append(v.volumes, &vv)
+ }
+ return err
+}
diff --git a/engine/compiler/parse/node_volume_test.go b/engine/compiler/parse/node_volume_test.go
new file mode 100644
index 00000000..79588091
--- /dev/null
+++ b/engine/compiler/parse/node_volume_test.go
@@ -0,0 +1,51 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestVolumeNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Volumes", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal", func() {
+ in := []byte("foo: { driver: blockbridge }")
+ out := volumeList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.volumes)).Equal(1)
+ g.Assert(out.volumes[0].Name).Equal("foo")
+ g.Assert(out.volumes[0].Driver).Equal("blockbridge")
+ })
+
+ g.It("should unmarshal named", func() {
+ in := []byte("foo: { name: bar }")
+ out := volumeList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.volumes)).Equal(1)
+ g.Assert(out.volumes[0].Name).Equal("bar")
+ })
+
+ g.It("should unmarshal and use default driver", func() {
+ in := []byte("foo: { name: bar }")
+ out := volumeList{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.volumes)).Equal(1)
+ g.Assert(out.volumes[0].Driver).Equal("local")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/parse/parse.go b/engine/compiler/parse/parse.go
new file mode 100644
index 00000000..a3be5ed3
--- /dev/null
+++ b/engine/compiler/parse/parse.go
@@ -0,0 +1,100 @@
+package parse
+
+import (
+ "gopkg.in/yaml.v2"
+)
+
+// Parse parses a Yaml file and returns a Tree structure.
+func Parse(in []byte) (*RootNode, error) {
+ out := root{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ return nil, err
+ }
+
+ root := NewRootNode()
+ root.Platform = out.Platform
+ root.Path = out.Workspace.Path
+ root.Base = out.Workspace.Base
+ root.Image = out.Image
+
+ // append volume nodes to tree
+ for _, v := range out.Volumes.volumes {
+ vv := root.NewVolumeNode(v.Name)
+ vv.Driver = v.Driver
+ vv.DriverOpts = v.DriverOpts
+ root.Volumes = append(root.Volumes, vv)
+ }
+
+ // append network nodes to tree
+ for _, n := range out.Networks.networks {
+ nn := root.NewNetworkNode(n.Name)
+ nn.Driver = n.Driver
+ nn.DriverOpts = n.DriverOpts
+ root.Networks = append(root.Networks, nn)
+ }
+
+ // add the build section
+ if out.Build.Context != "" {
+ root.Build = &BuildNode{
+ NodeType: NodeBuild,
+ Context: out.Build.Context,
+ Dockerfile: out.Build.Dockerfile,
+ Args: out.Build.Args,
+ root: root,
+ }
+ }
+
+ // add the cache section
+ {
+ cc := root.NewCacheNode()
+ cc.Container = out.Cache.ToContainer()
+ cc.Conditions = out.Cache.ToConditions()
+ cc.Container.Name = "cache"
+ cc.Vargs = out.Cache.Vargs
+ root.Cache = cc
+ }
+
+ // add the clone section
+ {
+ cc := root.NewCloneNode()
+ cc.Conditions = out.Clone.ToConditions()
+ cc.Container = out.Clone.ToContainer()
+ cc.Container.Name = "clone"
+ cc.Vargs = out.Clone.Vargs
+ root.Clone = cc
+ }
+
+ // append services
+ for _, c := range out.Services.containers {
+ if c.Build != "" {
+ continue
+ }
+ cc := root.NewServiceNode()
+ cc.Conditions = c.ToConditions()
+ cc.Container = c.ToContainer()
+ root.Services = append(root.Services, cc)
+ }
+
+ // append scripts
+ for _, c := range out.Script.containers {
+ var cc *ContainerNode
+ if len(c.Commands.parts) == 0 {
+ cc = root.NewPluginNode()
+ } else {
+ cc = root.NewShellNode()
+ }
+ cc.Commands = c.Commands.parts
+ cc.Vargs = c.Vargs
+ cc.Container = c.ToContainer()
+ cc.Conditions = c.ToConditions()
+ root.Script = append(root.Script, cc)
+ }
+
+ return root, nil
+}
+
+// ParseString parses a Yaml string and returns a Tree structure.
+func ParseString(in string) (*RootNode, error) {
+ return Parse([]byte(in))
+}
diff --git a/engine/compiler/parse/parse_test.go b/engine/compiler/parse/parse_test.go
new file mode 100644
index 00000000..472ed01e
--- /dev/null
+++ b/engine/compiler/parse/parse_test.go
@@ -0,0 +1,96 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestParse(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Parser", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal a string", func() {
+ out, err := ParseString(sampleYaml)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(out.Image).Equal("hello-world")
+ g.Assert(out.Base).Equal("/go")
+ g.Assert(out.Path).Equal("src/github.com/octocat/hello-world")
+ g.Assert(out.Build.(*BuildNode).Context).Equal(".")
+ g.Assert(out.Build.(*BuildNode).Dockerfile).Equal("Dockerfile")
+ g.Assert(out.Cache.(*ContainerNode).Vargs["mount"]).Equal("node_modules")
+ g.Assert(out.Clone.(*ContainerNode).Container.Image).Equal("git")
+ g.Assert(out.Clone.(*ContainerNode).Vargs["depth"]).Equal(1)
+ g.Assert(out.Volumes[0].(*VolumeNode).Name).Equal("custom")
+ g.Assert(out.Volumes[0].(*VolumeNode).Driver).Equal("blockbridge")
+ g.Assert(out.Networks[0].(*NetworkNode).Name).Equal("custom")
+ g.Assert(out.Networks[0].(*NetworkNode).Driver).Equal("overlay")
+ g.Assert(out.Services[0].(*ContainerNode).Container.Name).Equal("database")
+ g.Assert(out.Services[0].(*ContainerNode).Container.Image).Equal("mysql")
+ g.Assert(out.Script[0].(*ContainerNode).Container.Name).Equal("test")
+ g.Assert(out.Script[0].(*ContainerNode).Container.Image).Equal("golang")
+ g.Assert(out.Script[0].(*ContainerNode).Commands).Equal([]string{"go install", "go test"})
+ g.Assert(out.Script[0].(*ContainerNode).String()).Equal(NodeShell)
+ g.Assert(out.Script[1].(*ContainerNode).Container.Name).Equal("build")
+ g.Assert(out.Script[1].(*ContainerNode).Container.Image).Equal("golang")
+ g.Assert(out.Script[1].(*ContainerNode).Commands).Equal([]string{"go build"})
+ g.Assert(out.Script[1].(*ContainerNode).String()).Equal(NodeShell)
+ g.Assert(out.Script[2].(*ContainerNode).Container.Name).Equal("notify")
+ g.Assert(out.Script[2].(*ContainerNode).Container.Image).Equal("slack")
+ g.Assert(out.Script[2].(*ContainerNode).String()).Equal(NodePlugin)
+ })
+ })
+ })
+}
+
+var sampleYaml = `
+image: hello-world
+build:
+ context: .
+ dockerfile: Dockerfile
+
+workspace:
+ path: src/github.com/octocat/hello-world
+ base: /go
+
+clone:
+ image: git
+ depth: 1
+
+cache:
+ mount: node_modules
+
+script:
+ test:
+ image: golang
+ commands:
+ - go install
+ - go test
+ build:
+ image: golang
+ commands:
+ - go build
+ when:
+ event: push
+ notify:
+ image: slack
+ channel: dev
+ when:
+ event: failure
+
+services:
+ database:
+ image: mysql
+
+networks:
+ custom:
+ driver: overlay
+
+volumes:
+ custom:
+ driver: blockbridge
+`
diff --git a/engine/compiler/parse/types.go b/engine/compiler/parse/types.go
new file mode 100644
index 00000000..cf459688
--- /dev/null
+++ b/engine/compiler/parse/types.go
@@ -0,0 +1,55 @@
+package parse
+
+import "strings"
+
+// mapEqualSlice represents a map[string]string or a slice of
+// strings in key=value format.
+type mapEqualSlice struct {
+ parts map[string]string
+}
+
+func (s *mapEqualSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ s.parts = map[string]string{}
+ err := unmarshal(&s.parts)
+ if err == nil {
+ return nil
+ }
+
+ var slice []string
+ err = unmarshal(&slice)
+ if err != nil {
+ return err
+ }
+ for _, v := range slice {
+ parts := strings.SplitN(v, "=", 2)
+ if len(parts) == 2 {
+ key := parts[0]
+ val := parts[1]
+ s.parts[key] = val
+ }
+ }
+ return nil
+}
+
+// stringOrSlice represents a string or an array of strings.
+type stringOrSlice struct {
+ parts []string
+}
+
+func (s *stringOrSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var sliceType []string
+ err := unmarshal(&sliceType)
+ if err == nil {
+ s.parts = sliceType
+ return nil
+ }
+
+ var stringType string
+ err = unmarshal(&stringType)
+ if err == nil {
+ sliceType = make([]string, 0, 1)
+ s.parts = append(sliceType, string(stringType))
+ return nil
+ }
+ return err
+}
diff --git a/engine/compiler/parse/types_test.go b/engine/compiler/parse/types_test.go
new file mode 100644
index 00000000..463a72c7
--- /dev/null
+++ b/engine/compiler/parse/types_test.go
@@ -0,0 +1,75 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestTypes(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Yaml types", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal a string", func() {
+ in := []byte("foo")
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts[0]).Equal("foo")
+ })
+
+ g.It("should unmarshal a string slice", func() {
+ in := []byte("[ foo ]")
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts[0]).Equal("foo")
+ })
+
+ g.It("should throw error when invalid string slice", func() {
+ in := []byte("{ }") // string value should fail parse
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ g.Assert(err != nil).IsTrue("expects error")
+ })
+
+ g.It("should unmarshal a map", func() {
+ in := []byte("foo: bar")
+ out := mapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts["foo"]).Equal("bar")
+ })
+
+ g.It("should unmarshal a map equal slice", func() {
+ in := []byte("[ foo=bar ]")
+ out := mapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts["foo"]).Equal("bar")
+ })
+
+ g.It("should throw error when invalid map equal slice", func() {
+ in := []byte("foo") // string value should fail parse
+ out := mapEqualSlice{}
+ err := yaml.Unmarshal(in, &out)
+ g.Assert(err != nil).IsTrue("expects error")
+ })
+ })
+ })
+}
diff --git a/engine/compiler/transform.go b/engine/compiler/transform.go
new file mode 100644
index 00000000..48f4d05d
--- /dev/null
+++ b/engine/compiler/transform.go
@@ -0,0 +1,13 @@
+package libyaml
+
+import "github.com/drone/drone/engine/compiler/parse"
+
+// Transform is used to transform nodes from the parsed Yaml file during the
+// compilation process. A Transform may be used to add, disable or alter nodes.
+type Transform interface {
+ VisitRoot(*parse.RootNode) error
+ VisitVolume(*parse.VolumeNode) error
+ VisitNetwork(*parse.NetworkNode) error
+ VisitBuild(*parse.BuildNode) error
+ VisitContainer(*parse.ContainerNode) error
+}
diff --git a/engine/context.go b/engine/context.go
new file mode 100644
index 00000000..2321fa07
--- /dev/null
+++ b/engine/context.go
@@ -0,0 +1,23 @@
+package engine
+
+import (
+ "golang.org/x/net/context"
+)
+
+const key = "engine"
+
+// Setter defines a context that enables setting values.
+type Setter interface {
+ Set(string, interface{})
+}
+
+// FromContext returns the Engine associated with this context.
+func FromContext(c context.Context) Engine {
+ return c.Value(key).(Engine)
+}
+
+// ToContext adds the Engine to this context if it supports
+// the Setter interface.
+func ToContext(c Setter, engine Engine) {
+ c.Set(key, engine)
+}
diff --git a/engine/engine.go b/engine/engine.go
index bf3569c2..32a2c6a1 100644
--- a/engine/engine.go
+++ b/engine/engine.go
@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"io/ioutil"
+ "os"
"runtime"
"time"
@@ -15,7 +16,6 @@ import (
"github.com/docker/docker/pkg/stdcopy"
"github.com/drone/drone/model"
"github.com/drone/drone/shared/docker"
- "github.com/drone/drone/shared/envconfig"
"github.com/drone/drone/store"
"github.com/samalba/dockerclient"
"golang.org/x/net/context"
@@ -60,7 +60,7 @@ type engine struct {
// Load creates a new build engine, loaded with registered nodes from the
// database. The registered nodes are added to the pool of nodes to immediately
// start accepting workloads.
-func Load(env envconfig.Env, s store.Store) Engine {
+func Load(s store.Store) Engine {
engine := &engine{}
engine.bus = newEventbus()
engine.pool = newPool()
@@ -70,7 +70,7 @@ func Load(env envconfig.Env, s store.Store) Engine {
// throughout the build environment.
var proxyVars = []string{"HTTP_PROXY", "http_proxy", "HTTPS_PROXY", "https_proxy", "NO_PROXY", "no_proxy"}
for _, proxyVar := range proxyVars {
- proxyVal := env.Get(proxyVar)
+ proxyVal := os.Getenv(proxyVar)
if len(proxyVal) != 0 {
engine.envs = append(engine.envs, proxyVar+"="+proxyVal)
}
diff --git a/engine/expander/expand.go b/engine/expander/expand.go
new file mode 100644
index 00000000..fbf7af03
--- /dev/null
+++ b/engine/expander/expand.go
@@ -0,0 +1,33 @@
+package expander
+
+import "sort"
+
+// Expand expands variables into the Yaml configuration using a
+// ${key} template parameter with limited support for bash string functions.
+func Expand(config []byte, envs map[string]string) []byte {
+ return []byte(
+ ExpandString(string(config), envs),
+ )
+}
+
+// ExpandString injects the variables into the Yaml configuration string using
+// a ${key} template parameter with limited support for bash string functions.
+func ExpandString(config string, envs map[string]string) string {
+ if envs == nil || len(envs) == 0 {
+ return config
+ }
+ keys := []string{}
+ for k := range envs {
+ keys = append(keys, k)
+ }
+ sort.Sort(sort.Reverse(sort.StringSlice(keys)))
+ expanded := config
+ for _, k := range keys {
+ v := envs[k]
+
+ for _, substitute := range substitutors {
+ expanded = substitute(expanded, k, v)
+ }
+ }
+ return expanded
+}
diff --git a/engine/expander/expand_test.go b/engine/expander/expand_test.go
new file mode 100644
index 00000000..60a6ba29
--- /dev/null
+++ b/engine/expander/expand_test.go
@@ -0,0 +1,48 @@
+package expander
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestExpand(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Expand params", func() {
+
+ g.It("Should replace vars with ${key}", func() {
+ s := "echo ${FOO} $BAR"
+ m := map[string]string{}
+ m["FOO"] = "BAZ"
+ g.Assert("echo BAZ $BAR").Equal(ExpandString(s, m))
+ })
+
+ g.It("Should not replace vars in nil map", func() {
+ s := "echo ${FOO} $BAR"
+ g.Assert(s).Equal(ExpandString(s, nil))
+ })
+
+ g.It("Should escape quoted variables", func() {
+ s := `echo "${FOO}"`
+ m := map[string]string{}
+ m["FOO"] = "hello\nworld"
+ g.Assert(`echo "hello\nworld"`).Equal(ExpandString(s, m))
+ })
+
+ g.It("Should replace variable prefix", func() {
+ s := `tag: ${TAG=${SHA:8}}`
+ m := map[string]string{}
+ m["TAG"] = ""
+ m["SHA"] = "f36cbf54ee1a1eeab264c8e388f386218ab1701b"
+ g.Assert("tag: f36cbf54").Equal(ExpandString(s, m))
+ })
+
+ g.It("Should handle nested substitution operations", func() {
+ s := `echo "${TAG##v}"`
+ m := map[string]string{}
+ m["TAG"] = "v1.0.0"
+ g.Assert(`echo "1.0.0"`).Equal(ExpandString(s, m))
+ })
+ })
+}
diff --git a/engine/expander/func.go b/engine/expander/func.go
new file mode 100644
index 00000000..7399b059
--- /dev/null
+++ b/engine/expander/func.go
@@ -0,0 +1,172 @@
+package expander
+
+import (
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// these are helper functions that bring bash-substitution to the drone yaml file.
+// see http://tldp.org/LDP/abs/html/parameter-substitution.html
+
+type substituteFunc func(str, key, val string) string
+
+var substitutors = []substituteFunc{
+ substituteQ,
+ substitute,
+ substitutePrefix,
+ substituteSuffix,
+ substituteDefault,
+ substituteReplace,
+ substituteLeft,
+ substituteSubstr,
+}
+
+// substitute is a helper function that substitutes a simple parameter using
+// ${parameter} notation.
+func substitute(str, key, val string) string {
+ key = fmt.Sprintf("${%s}", key)
+ return strings.Replace(str, key, val, -1)
+}
+
+// substituteQ is a helper function that substitutes a simple parameter using
+// "${parameter}" notation with the escaped value, using %q.
+func substituteQ(str, key, val string) string {
+ key = fmt.Sprintf(`"${%s}"`, key)
+ val = fmt.Sprintf("%q", val)
+ return strings.Replace(str, key, val, -1)
+}
+
+// substitutePrefix is a helper function that substitutes paramters using
+// ${parameter##prefix} notation with the parameter value minus the trimmed prefix.
+func substitutePrefix(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s##(.+)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 2 {
+ continue
+ }
+ val_ := strings.TrimPrefix(val, match[1])
+ str = strings.Replace(str, match[0], val_, -1)
+ }
+ return str
+}
+
+// substituteSuffix is a helper function that substitutes paramters using
+// ${parameter%%suffix} notation with the parameter value minus the trimmed suffix.
+func substituteSuffix(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s%%%%(.+)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 2 {
+ continue
+ }
+ val_ := strings.TrimSuffix(val, match[1])
+ str = strings.Replace(str, match[0], val_, -1)
+ }
+ return str
+}
+
+// substituteDefault is a helper function that substitutes paramters using
+// ${parameter=default} notation with the parameter value. When empty the
+// default value is used.
+func substituteDefault(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s=(.+)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 2 {
+ continue
+ }
+ if len(val) == 0 {
+ str = strings.Replace(str, match[0], match[1], -1)
+ } else {
+ str = strings.Replace(str, match[0], val, -1)
+ }
+ }
+ return str
+}
+
+// substituteReplace is a helper function that substitutes paramters using
+// ${parameter/old/new} notation with the parameter value. A find and replace
+// is performed before injecting the strings, replacing the old pattern with
+// the new value.
+func substituteReplace(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s/(.+)/(.+)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 3 {
+ continue
+ }
+ with := strings.Replace(val, match[1], match[2], -1)
+ str = strings.Replace(str, match[0], with, -1)
+ }
+ return str
+}
+
+// substituteLeft is a helper function that substitutes paramters using
+// ${parameter:pos} notation with the parameter value, sliced up to the
+// specified position.
+func substituteLeft(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s:([0-9]*)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 2 {
+ continue
+ }
+ index, err := strconv.Atoi(match[1])
+ if err != nil {
+ continue // skip
+ }
+ if index > len(val)-1 {
+ continue // skip
+ }
+
+ str = strings.Replace(str, match[0], val[:index], -1)
+ }
+ return str
+}
+
+// substituteLeft is a helper function that substitutes paramters using
+// ${parameter:pos:len} notation with the parameter value as a substring,
+// starting at the specified position for the specified length.
+func substituteSubstr(str, key, val string) string {
+ key = fmt.Sprintf("\\${%s:([0-9]*):([0-9]*)}", key)
+ reg, err := regexp.Compile(key)
+ if err != nil {
+ return str
+ }
+ for _, match := range reg.FindAllStringSubmatch(str, -1) {
+ if len(match) != 3 {
+ continue
+ }
+ pos, err := strconv.Atoi(match[1])
+ if err != nil {
+ continue // skip
+ }
+ length, err := strconv.Atoi(match[2])
+ if err != nil {
+ continue // skip
+ }
+ if pos+length > len(val)-1 {
+ continue // skip
+ }
+ str = strings.Replace(str, match[0], val[pos:pos+length], -1)
+ }
+ return str
+}
diff --git a/engine/expander/func_test.go b/engine/expander/func_test.go
new file mode 100644
index 00000000..2a9528cf
--- /dev/null
+++ b/engine/expander/func_test.go
@@ -0,0 +1,68 @@
+package expander
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestSubstitution(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Parameter Substitution", func() {
+
+ g.It("Should substitute simple parameters", func() {
+ before := "echo ${GREETING} WORLD"
+ after := "echo HELLO WORLD"
+ g.Assert(substitute(before, "GREETING", "HELLO")).Equal(after)
+ })
+
+ g.It("Should substitute quoted parameters", func() {
+ before := "echo \"${GREETING}\" WORLD"
+ after := "echo \"HELLO\" WORLD"
+ g.Assert(substituteQ(before, "GREETING", "HELLO")).Equal(after)
+ })
+
+ g.It("Should substitute parameters and trim prefix", func() {
+ before := "echo ${GREETING##asdf} WORLD"
+ after := "echo HELLO WORLD"
+ g.Assert(substitutePrefix(before, "GREETING", "asdfHELLO")).Equal(after)
+ })
+
+ g.It("Should substitute parameters and trim suffix", func() {
+ before := "echo ${GREETING%%asdf} WORLD"
+ after := "echo HELLO WORLD"
+ g.Assert(substituteSuffix(before, "GREETING", "HELLOasdf")).Equal(after)
+ })
+
+ g.It("Should substitute parameters without using the default", func() {
+ before := "echo ${GREETING=HOLA} WORLD"
+ after := "echo HELLO WORLD"
+ g.Assert(substituteDefault(before, "GREETING", "HELLO")).Equal(after)
+ })
+
+ g.It("Should substitute parameters using the a default", func() {
+ before := "echo ${GREETING=HOLA} WORLD"
+ after := "echo HOLA WORLD"
+ g.Assert(substituteDefault(before, "GREETING", "")).Equal(after)
+ })
+
+ g.It("Should substitute parameters with replacement", func() {
+ before := "echo ${GREETING/HE/A} MONDE"
+ after := "echo ALLO MONDE"
+ g.Assert(substituteReplace(before, "GREETING", "HELLO")).Equal(after)
+ })
+
+ g.It("Should substitute parameters with left substr", func() {
+ before := "echo ${FOO:4} IS COOL"
+ after := "echo THIS IS COOL"
+ g.Assert(substituteLeft(before, "FOO", "THIS IS A REALLY LONG STRING")).Equal(after)
+ })
+
+ g.It("Should substitute parameters with substr", func() {
+ before := "echo ${FOO:8:5} IS COOL"
+ after := "echo DRONE IS COOL"
+ g.Assert(substituteSubstr(before, "FOO", "THIS IS DRONE CI")).Equal(after)
+ })
+ })
+}
diff --git a/engine/parser/branch.go b/engine/parser/branch.go
new file mode 100644
index 00000000..0ba73f1b
--- /dev/null
+++ b/engine/parser/branch.go
@@ -0,0 +1,77 @@
+package parser
+
+import (
+ "path/filepath"
+
+ "gopkg.in/yaml.v2"
+)
+
+type Branch struct {
+ Include []string `yaml:"include"`
+ Exclude []string `yaml:"exclude"`
+}
+
+// ParseBranch parses the branch section of the Yaml document.
+func ParseBranch(in []byte) *Branch {
+ return parseBranch(in)
+}
+
+// ParseBranchString parses the branch section of the Yaml document.
+func ParseBranchString(in string) *Branch {
+ return ParseBranch([]byte(in))
+}
+
+// Matches returns true if the branch matches the include patterns and
+// does not match any of the exclude patterns.
+func (b *Branch) Matches(branch string) bool {
+ // when no includes or excludes automatically match
+ if len(b.Include) == 0 && len(b.Exclude) == 0 {
+ return true
+ }
+
+ // exclusions are processed first. So we can include everything and
+ // then selectively exclude certain sub-patterns.
+ for _, pattern := range b.Exclude {
+ if pattern == branch {
+ return false
+ }
+ if ok, _ := filepath.Match(pattern, branch); ok {
+ return false
+ }
+ }
+
+ for _, pattern := range b.Include {
+ if pattern == branch {
+ return true
+ }
+ if ok, _ := filepath.Match(pattern, branch); ok {
+ return true
+ }
+ }
+
+ return false
+}
+
+func parseBranch(in []byte) *Branch {
+ out1 := struct {
+ Branch struct {
+ Include stringOrSlice `yaml:"include"`
+ Exclude stringOrSlice `yaml:"exclude"`
+ } `yaml:"branches"`
+ }{}
+
+ out2 := struct {
+ Include stringOrSlice `yaml:"branches"`
+ }{}
+
+ yaml.Unmarshal(in, &out1)
+ yaml.Unmarshal(in, &out2)
+
+ return &Branch{
+ Exclude: out1.Branch.Exclude.Slice(),
+ Include: append(
+ out1.Branch.Include.Slice(),
+ out2.Include.Slice()...,
+ ),
+ }
+}
diff --git a/engine/parser/branch_test.go b/engine/parser/branch_test.go
new file mode 100644
index 00000000..a2736c97
--- /dev/null
+++ b/engine/parser/branch_test.go
@@ -0,0 +1,74 @@
+package parser
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestBranch(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Branch filter", func() {
+
+ g.It("Should parse and match emtpy", func() {
+ branch := ParseBranchString("")
+ g.Assert(branch.Matches("master")).IsTrue()
+ })
+
+ g.It("Should parse and match", func() {
+ branch := ParseBranchString("branches: { include: [ master, develop ] }")
+ g.Assert(branch.Matches("master")).IsTrue()
+ })
+
+ g.It("Should parse and match shortand", func() {
+ branch := ParseBranchString("branches: [ master, develop ]")
+ g.Assert(branch.Matches("master")).IsTrue()
+ })
+
+ g.It("Should parse and match shortand string", func() {
+ branch := ParseBranchString("branches: master")
+ g.Assert(branch.Matches("master")).IsTrue()
+ })
+
+ g.It("Should parse and match exclude", func() {
+ branch := ParseBranchString("branches: { exclude: [ master, develop ] }")
+ g.Assert(branch.Matches("master")).IsFalse()
+ })
+
+ g.It("Should parse and match exclude shorthand", func() {
+ branch := ParseBranchString("branches: { exclude: master }")
+ g.Assert(branch.Matches("master")).IsFalse()
+ })
+
+ g.It("Should match include", func() {
+ b := Branch{}
+ b.Include = []string{"master"}
+ g.Assert(b.Matches("master")).IsTrue()
+ })
+
+ g.It("Should match include pattern", func() {
+ b := Branch{}
+ b.Include = []string{"feature/*"}
+ g.Assert(b.Matches("feature/foo")).IsTrue()
+ })
+
+ g.It("Should fail to match include pattern", func() {
+ b := Branch{}
+ b.Include = []string{"feature/*"}
+ g.Assert(b.Matches("master")).IsFalse()
+ })
+
+ g.It("Should match exclude", func() {
+ b := Branch{}
+ b.Exclude = []string{"master"}
+ g.Assert(b.Matches("master")).IsFalse()
+ })
+
+ g.It("Should match exclude pattern", func() {
+ b := Branch{}
+ b.Exclude = []string{"feature/*"}
+ g.Assert(b.Matches("feature/foo")).IsFalse()
+ })
+ })
+}
diff --git a/engine/parser/matrix.go b/engine/parser/matrix.go
new file mode 100644
index 00000000..77bc03e4
--- /dev/null
+++ b/engine/parser/matrix.go
@@ -0,0 +1,100 @@
+package parser
+
+import (
+ "strings"
+
+ "gopkg.in/yaml.v2"
+)
+
+const (
+ limitTags = 10
+ limitAxis = 25
+)
+
+// Matrix represents the build matrix.
+type Matrix map[string][]string
+
+// Axis represents a single permutation of entries from the build matrix.
+type Axis map[string]string
+
+// String returns a string representation of an Axis as a comma-separated list
+// of environment variables.
+func (a Axis) String() string {
+ var envs []string
+ for k, v := range a {
+ envs = append(envs, k+"="+v)
+ }
+ return strings.Join(envs, " ")
+}
+
+// ParseMatrix parses the Yaml matrix definition.
+func ParseMatrix(data []byte) ([]Axis, error) {
+ matrix, err := parseMatrix(data)
+ if err != nil {
+ return nil, err
+ }
+
+ // if not a matrix build return an array with just the single axis.
+ if len(matrix) == 0 {
+ return nil, nil
+ }
+
+ return calcMatrix(matrix), nil
+}
+
+// ParseMatrixString parses the Yaml string matrix definition.
+func ParseMatrixString(data string) ([]Axis, error) {
+ return ParseMatrix([]byte(data))
+}
+
+func calcMatrix(matrix Matrix) []Axis {
+ // calculate number of permutations and extract the list of tags
+ // (ie go_version, redis_version, etc)
+ var perm int
+ var tags []string
+ for k, v := range matrix {
+ perm *= len(v)
+ if perm == 0 {
+ perm = len(v)
+ }
+ tags = append(tags, k)
+ }
+
+ // structure to hold the transformed result set
+ axisList := []Axis{}
+
+ // for each axis calculate the uniqe set of values that should be used.
+ for p := 0; p < perm; p++ {
+ axis := map[string]string{}
+ decr := perm
+ for i, tag := range tags {
+ elems := matrix[tag]
+ decr = decr / len(elems)
+ elem := p / decr % len(elems)
+ axis[tag] = elems[elem]
+
+ // enforce a maximum number of tags in the build matrix.
+ if i > limitTags {
+ break
+ }
+ }
+
+ // append to the list of axis.
+ axisList = append(axisList, axis)
+
+ // enforce a maximum number of axis that should be calculated.
+ if p > limitAxis {
+ break
+ }
+ }
+
+ return axisList
+}
+
+func parseMatrix(raw []byte) (Matrix, error) {
+ data := struct {
+ Matrix map[string][]string
+ }{}
+ err := yaml.Unmarshal(raw, &data)
+ return data.Matrix, err
+}
diff --git a/yaml/matrix/matrix_test.go b/engine/parser/matrix_test.go
similarity index 84%
rename from yaml/matrix/matrix_test.go
rename to engine/parser/matrix_test.go
index 5906c0c1..2586391d 100644
--- a/yaml/matrix/matrix_test.go
+++ b/engine/parser/matrix_test.go
@@ -1,4 +1,4 @@
-package matrix
+package parser
import (
"testing"
@@ -6,12 +6,12 @@ import (
"github.com/franela/goblin"
)
-func Test_Matrix(t *testing.T) {
+func TestMatrix(t *testing.T) {
g := goblin.Goblin(t)
g.Describe("Calculate matrix", func() {
- axis, _ := Parse(fakeMatrix)
+ axis, _ := ParseMatrixString(fakeMatrix)
g.It("Should calculate permutations", func() {
g.Assert(len(axis)).Equal(24)
@@ -26,7 +26,7 @@ func Test_Matrix(t *testing.T) {
})
g.It("Should return nil if no matrix", func() {
- axis, err := Parse("")
+ axis, err := ParseMatrixString("")
g.Assert(err == nil).IsTrue()
g.Assert(axis == nil).IsTrue()
})
diff --git a/engine/parser/types.go b/engine/parser/types.go
new file mode 100644
index 00000000..c1fabf88
--- /dev/null
+++ b/engine/parser/types.go
@@ -0,0 +1,28 @@
+package parser
+
+// stringOrSlice represents a string or an array of strings.
+type stringOrSlice struct {
+ parts []string
+}
+
+func (s *stringOrSlice) UnmarshalYAML(unmarshal func(interface{}) error) error {
+ var sliceType []string
+ err := unmarshal(&sliceType)
+ if err == nil {
+ s.parts = sliceType
+ return nil
+ }
+
+ var stringType string
+ err = unmarshal(&stringType)
+ if err == nil {
+ sliceType = make([]string, 0, 1)
+ s.parts = append(sliceType, string(stringType))
+ return nil
+ }
+ return err
+}
+
+func (s stringOrSlice) Slice() []string {
+ return s.parts
+}
diff --git a/engine/parser/types_test.go b/engine/parser/types_test.go
new file mode 100644
index 00000000..48e6eb29
--- /dev/null
+++ b/engine/parser/types_test.go
@@ -0,0 +1,46 @@
+package parser
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+ "gopkg.in/yaml.v2"
+)
+
+func TestTypes(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Yaml types", func() {
+ g.Describe("given a yaml file", func() {
+
+ g.It("should unmarshal a string", func() {
+ in := []byte("foo")
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts[0]).Equal("foo")
+ })
+
+ g.It("should unmarshal a string slice", func() {
+ in := []byte("[ foo ]")
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ if err != nil {
+ g.Fail(err)
+ }
+ g.Assert(len(out.parts)).Equal(1)
+ g.Assert(out.parts[0]).Equal("foo")
+ })
+
+ g.It("should throw error when invalid string slice", func() {
+ in := []byte("{ }") // string value should fail parse
+ out := stringOrSlice{}
+ err := yaml.Unmarshal(in, &out)
+ g.Assert(err != nil).IsTrue("expects error")
+ })
+ })
+ })
+}
diff --git a/engine/runner/container.go b/engine/runner/container.go
new file mode 100644
index 00000000..e901e3b1
--- /dev/null
+++ b/engine/runner/container.go
@@ -0,0 +1,72 @@
+package runner
+
+import "fmt"
+
+// Container defines the container configuration.
+type Container struct {
+ Name string `json:"name"`
+ Alias string `json:"alias"`
+ Image string `json:"image"`
+ Pull bool `json:"pull,omitempty"`
+ AuthConfig Auth `json:"auth_config,omitempty"`
+ Privileged bool `json:"privileged,omitempty"`
+ WorkingDir string `json:"working_dir,omitempty"`
+ Environment map[string]string `json:"environment,omitempty"`
+ Entrypoint []string `json:"entrypoint,omitempty"`
+ Command []string `json:"command,omitempty"`
+ ExtraHosts []string `json:"extra_hosts,omitempty"`
+ Volumes []string `json:"volumes,omitempty"`
+ VolumesFrom []string `json:"volumes_from,omitempty"`
+ Devices []string `json:"devices,omitempty"`
+ Network string `json:"network_mode,omitempty"`
+ DNS []string `json:"dns,omitempty"`
+ DNSSearch []string `json:"dns_search,omitempty"`
+ MemSwapLimit int64 `json:"memswap_limit,omitempty"`
+ MemLimit int64 `json:"mem_limit,omitempty"`
+ CPUQuota int64 `json:"cpu_quota,omitempty"`
+ CPUShares int64 `json:"cpu_shares,omitempty"`
+ CPUSet string `json:"cpuset,omitempty"`
+ OomKillDisable bool `json:"oom_kill_disable,omitempty"`
+}
+
+// Validate validates the container configuration details and returns an error
+// if the validation fails.
+func (c *Container) Validate() error {
+ switch {
+
+ case c.Name == "":
+ return fmt.Errorf("Missing container name")
+ case c.Image == "":
+ return fmt.Errorf("Missing container image")
+ default:
+ return nil
+ }
+
+}
+
+// Auth provides authentication parameters to authenticate to a remote
+// container registry for image download.
+type Auth struct {
+ Username string `json:"username,omitempty"`
+ Password string `json:"password,omitempty"`
+ Email string `json:"email,omitempty"`
+ Token string `json:"registry_token,omitempty"`
+}
+
+// Volume defines a container volume.
+type Volume struct {
+ Name string `json:"name,omitempty"`
+ Alias string `json:"alias,omitempty"`
+ Driver string `json:"driver,omitempty"`
+ DriverOpts map[string]string `json:"driver_opts,omitempty"`
+ External bool `json:"external,omitempty"`
+}
+
+// Network defines a container network.
+type Network struct {
+ Name string `json:"name,omitempty"`
+ Alias string `json:"alias,omitempty"`
+ Driver string `json:"driver,omitempty"`
+ DriverOpts map[string]string `json:"driver_opts,omitempty"`
+ External bool `json:"external,omitempty"`
+}
diff --git a/engine/runner/container_test.go b/engine/runner/container_test.go
new file mode 100644
index 00000000..6fab60ee
--- /dev/null
+++ b/engine/runner/container_test.go
@@ -0,0 +1,40 @@
+package runner
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestContainer(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Container validation", func() {
+
+ g.It("fails with an invalid name", func() {
+ c := Container{
+ Image: "golang:1.5",
+ }
+ err := c.Validate()
+ g.Assert(err != nil).IsTrue()
+ g.Assert(err.Error()).Equal("Missing container name")
+ })
+
+ g.It("fails with an invalid image", func() {
+ c := Container{
+ Name: "container_0",
+ }
+ err := c.Validate()
+ g.Assert(err != nil).IsTrue()
+ g.Assert(err.Error()).Equal("Missing container image")
+ })
+
+ g.It("passes with valid attributes", func() {
+ c := Container{
+ Name: "container_0",
+ Image: "golang:1.5",
+ }
+ g.Assert(c.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/engine.go b/engine/runner/engine.go
new file mode 100644
index 00000000..5f24cc32
--- /dev/null
+++ b/engine/runner/engine.go
@@ -0,0 +1,22 @@
+package runner
+
+//go:generate mockery -name Engine -output mock -case=underscore
+
+import "io"
+
+// Engine defines the container runtime engine.
+type Engine interface {
+ // VolumeCreate(*Volume) (string, error)
+ // VolumeRemove(string) error
+ ContainerStart(*Container) (string, error)
+ ContainerStop(string) error
+ ContainerRemove(string) error
+ ContainerWait(string) (*State, error)
+ ContainerLogs(string) (io.ReadCloser, error)
+}
+
+// State defines the state of the container.
+type State struct {
+ ExitCode int // container exit code
+ OOMKilled bool // container exited due to oom error
+}
diff --git a/engine/runner/error.go b/engine/runner/error.go
new file mode 100644
index 00000000..e10040cb
--- /dev/null
+++ b/engine/runner/error.go
@@ -0,0 +1,37 @@
+package runner
+
+import (
+ "errors"
+ "fmt"
+)
+
+var (
+ // ErrSkip is used as a return value when container execution should be
+ // skipped at runtime. It is not returned as an error by any function.
+ ErrSkip = errors.New("Skip")
+
+ // ErrTerm is used as a return value when the runner should terminate
+ // execution and exit. It is not returned as an error by any function.
+ ErrTerm = errors.New("Terminate")
+)
+
+// An ExitError reports an unsuccessful exit.
+type ExitError struct {
+ Name string
+ Code int
+}
+
+// Error reteurns the error message in string format.
+func (e *ExitError) Error() string {
+ return fmt.Sprintf("%s : exit code %d", e.Name, e.Code)
+}
+
+// An OomError reports the process received an OOMKill from the kernel.
+type OomError struct {
+ Name string
+}
+
+// Error reteurns the error message in string format.
+func (e *OomError) Error() string {
+ return fmt.Sprintf("%s : received oom kill", e.Name)
+}
diff --git a/engine/runner/error_test.go b/engine/runner/error_test.go
new file mode 100644
index 00000000..4bee938d
--- /dev/null
+++ b/engine/runner/error_test.go
@@ -0,0 +1,26 @@
+package runner
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestErrors(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Error messages", func() {
+
+ g.It("should include OOM details", func() {
+ err := OomError{Name: "golang"}
+ got, want := err.Error(), "golang : received oom kill"
+ g.Assert(got).Equal(want)
+ })
+
+ g.It("should include Exit code", func() {
+ err := ExitError{Name: "golang", Code: 255}
+ got, want := err.Error(), "golang : exit code 255"
+ g.Assert(got).Equal(want)
+ })
+ })
+}
diff --git a/engine/runner/helper.go b/engine/runner/helper.go
new file mode 100644
index 00000000..1b49caf2
--- /dev/null
+++ b/engine/runner/helper.go
@@ -0,0 +1,24 @@
+package runner
+
+import (
+ "encoding/json"
+ "io/ioutil"
+)
+
+// Parse parses a raw file containing a JSON encoded format of an intermediate
+// representation of the pipeline.
+func Parse(data []byte) (*Spec, error) {
+ v := &Spec{}
+ err := json.Unmarshal(data, v)
+ return v, err
+}
+
+// ParseFile parses a file containing a JSON encoded format of an intermediate
+// representation of the pipeline.
+func ParseFile(filename string) (*Spec, error) {
+ out, err := ioutil.ReadFile(filename)
+ if err != nil {
+ return nil, err
+ }
+ return Parse(out)
+}
diff --git a/engine/runner/helper_test.go b/engine/runner/helper_test.go
new file mode 100644
index 00000000..2a60efc2
--- /dev/null
+++ b/engine/runner/helper_test.go
@@ -0,0 +1,97 @@
+package runner
+
+import (
+ "io/ioutil"
+ "os"
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestHelper(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Parsing", func() {
+
+ g.It("should unmarhsal file []byte", func() {
+ res, err := Parse(sample)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ g.Assert(err == nil).IsTrue("expect file parsed")
+ g.Assert(len(res.Containers)).Equal(2)
+ g.Assert(len(res.Volumes)).Equal(1)
+ })
+
+ g.It("should unmarshal from file", func() {
+ temp, _ := ioutil.TempFile("", "spec_")
+ defer os.Remove(temp.Name())
+
+ ioutil.WriteFile(temp.Name(), sample, 0700)
+
+ _, err := ParseFile(temp.Name())
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ g.Assert(err == nil).IsTrue("expect file parsed")
+ })
+
+ g.It("should error when file not found", func() {
+ _, err := ParseFile("/tmp/foo/bar/dummy/file.json")
+ g.Assert(err == nil).IsFalse("expect file not found error")
+ })
+ })
+}
+
+// invalid json representation, simulate parsing error
+var invalid = []byte(`[]`)
+
+// valid json representation, verify parsing
+var sample = []byte(`{
+ "containers": [
+ {
+ "name": "container_0",
+ "image": "node:latest"
+ },
+ {
+ "name": "container_1",
+ "image": "golang:latest"
+ }
+ ],
+ "volumes": [
+ {
+ "name": "volume_0"
+ }
+ ],
+ "program": {
+ "type": "list",
+ "body": [
+ {
+ "type": "defer",
+ "body": {
+ "type": "recover",
+ "body": {
+ "type": "run",
+ "name": "container_0"
+ }
+ },
+ "defer": {
+ "type": "parallel",
+ "body": [
+ {
+ "type": "run",
+ "name": "container_1"
+ },
+ {
+ "type": "run",
+ "name": "container_1"
+ }
+ ],
+ "limit": 2
+ }
+ }
+ ]
+ }
+}`)
diff --git a/engine/runner/parse/node.go b/engine/runner/parse/node.go
new file mode 100644
index 00000000..0c8b7050
--- /dev/null
+++ b/engine/runner/parse/node.go
@@ -0,0 +1,30 @@
+package parse
+
+const (
+ NodeList = "list"
+ NodeDefer = "defer"
+ NodeError = "error"
+ NodeRecover = "recover"
+ NodeParallel = "parallel"
+ NodeRun = "run"
+)
+
+// NodeType identifies the type of a parse tree node.
+type NodeType string
+
+// Type returns itself and provides an easy default implementation
+// for embedding in a Node. Embedded in all non-trivial Nodes.
+func (t NodeType) Type() NodeType {
+ return t
+}
+
+// String returns the string value of the Node type.
+func (t NodeType) String() string {
+ return string(t)
+}
+
+// A Node is an element in the parse tree.
+type Node interface {
+ Type() NodeType
+ Validate() error
+}
diff --git a/engine/runner/parse/node_defer.go b/engine/runner/parse/node_defer.go
new file mode 100644
index 00000000..bc6935f2
--- /dev/null
+++ b/engine/runner/parse/node_defer.go
@@ -0,0 +1,40 @@
+package parse
+
+import "fmt"
+
+// DeferNode executes the child node, and then executes the deffered node.
+// The deffered node is guaranteed to execute, even when the child node fails.
+type DeferNode struct {
+ NodeType `json:"type"`
+
+ Body Node `json:"body"` // evaluate node
+ Defer Node `json:"defer"` // defer evaluation of node.
+}
+
+// NewDeferNode returns a new DeferNode.
+func NewDeferNode() *DeferNode {
+ return &DeferNode{NodeType: NodeDefer}
+}
+
+func (n *DeferNode) SetBody(node Node) *DeferNode {
+ n.Body = node
+ return n
+}
+
+func (n *DeferNode) SetDefer(node Node) *DeferNode {
+ n.Defer = node
+ return n
+}
+
+func (n *DeferNode) Validate() error {
+ switch {
+ case n.NodeType != NodeDefer:
+ return fmt.Errorf("Defer Node uses an invalid type")
+ case n.Body == nil:
+ return fmt.Errorf("Defer Node body is empty")
+ case n.Defer == nil:
+ return fmt.Errorf("Defer Node defer is empty")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_defer_test.go b/engine/runner/parse/node_defer_test.go
new file mode 100644
index 00000000..9de1bf88
--- /dev/null
+++ b/engine/runner/parse/node_defer_test.go
@@ -0,0 +1,56 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestDeferNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("DeferNode", func() {
+ g.It("should set body and defer node", func() {
+ node0 := NewRunNode()
+ node1 := NewRunNode()
+
+ defer0 := NewDeferNode()
+ defer1 := defer0.SetBody(node0)
+ defer2 := defer0.SetDefer(node1)
+ g.Assert(defer0.Type().String()).Equal(NodeDefer)
+ g.Assert(defer0.Body).Equal(node0)
+ g.Assert(defer0.Defer).Equal(node1)
+ g.Assert(defer0).Equal(defer1)
+ g.Assert(defer0).Equal(defer2)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ defer0 := DeferNode{}
+ err := defer0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Defer Node uses an invalid type")
+ })
+
+ g.It("should fail validation when empty body", func() {
+ defer0 := NewDeferNode()
+ err := defer0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Defer Node body is empty")
+ })
+
+ g.It("should fail validation when empty defer", func() {
+ defer0 := NewDeferNode()
+ defer0.SetBody(NewRunNode())
+ err := defer0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Defer Node defer is empty")
+ })
+
+ g.It("should pass validation", func() {
+ defer0 := NewDeferNode()
+ defer0.SetBody(NewRunNode())
+ defer0.SetDefer(NewRunNode())
+ g.Assert(defer0.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/node_error.go b/engine/runner/parse/node_error.go
new file mode 100644
index 00000000..cb3f55e7
--- /dev/null
+++ b/engine/runner/parse/node_error.go
@@ -0,0 +1,40 @@
+package parse
+
+import "fmt"
+
+// ErrorNode executes the body node, and then executes the error node if
+// the body node errors. This is similar to defer but only executes on error.
+type ErrorNode struct {
+ NodeType `json:"type"`
+
+ Body Node `json:"body"` // evaluate node
+ Defer Node `json:"defer"` // defer evaluation of node on error.
+}
+
+// NewErrorNode returns a new ErrorNode.
+func NewErrorNode() *ErrorNode {
+ return &ErrorNode{NodeType: NodeError}
+}
+
+func (n *ErrorNode) SetBody(node Node) *ErrorNode {
+ n.Body = node
+ return n
+}
+
+func (n *ErrorNode) SetDefer(node Node) *ErrorNode {
+ n.Defer = node
+ return n
+}
+
+func (n *ErrorNode) Validate() error {
+ switch {
+ case n.NodeType != NodeError:
+ return fmt.Errorf("Error Node uses an invalid type")
+ case n.Body == nil:
+ return fmt.Errorf("Error Node body is empty")
+ case n.Defer == nil:
+ return fmt.Errorf("Error Node defer is empty")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_error_test.go b/engine/runner/parse/node_error_test.go
new file mode 100644
index 00000000..f68cce85
--- /dev/null
+++ b/engine/runner/parse/node_error_test.go
@@ -0,0 +1,56 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestErrorNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("ErrorNode", func() {
+ g.It("should set body and error node", func() {
+ node0 := NewRunNode()
+ node1 := NewRunNode()
+
+ error0 := NewErrorNode()
+ error1 := error0.SetBody(node0)
+ error2 := error0.SetDefer(node1)
+ g.Assert(error0.Type().String()).Equal(NodeError)
+ g.Assert(error0.Body).Equal(node0)
+ g.Assert(error0.Defer).Equal(node1)
+ g.Assert(error0).Equal(error1)
+ g.Assert(error0).Equal(error2)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ error0 := ErrorNode{}
+ err := error0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Error Node uses an invalid type")
+ })
+
+ g.It("should fail validation when empty body", func() {
+ error0 := NewErrorNode()
+ err := error0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Error Node body is empty")
+ })
+
+ g.It("should fail validation when empty error", func() {
+ error0 := NewErrorNode()
+ error0.SetBody(NewRunNode())
+ err := error0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Error Node defer is empty")
+ })
+
+ g.It("should pass validation", func() {
+ error0 := NewErrorNode()
+ error0.SetBody(NewRunNode())
+ error0.SetDefer(NewRunNode())
+ g.Assert(error0.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/node_list.go b/engine/runner/parse/node_list.go
new file mode 100644
index 00000000..514cd7ba
--- /dev/null
+++ b/engine/runner/parse/node_list.go
@@ -0,0 +1,33 @@
+package parse
+
+import "fmt"
+
+// ListNode serially executes a list of child nodes.
+type ListNode struct {
+ NodeType `json:"type"`
+
+ // Body is the list of child nodes
+ Body []Node `json:"body"`
+}
+
+// NewListNode returns a new ListNode.
+func NewListNode() *ListNode {
+ return &ListNode{NodeType: NodeList}
+}
+
+// Append appens a child node to the list.
+func (n *ListNode) Append(node Node) *ListNode {
+ n.Body = append(n.Body, node)
+ return n
+}
+
+func (n *ListNode) Validate() error {
+ switch {
+ case n.NodeType != NodeList:
+ return fmt.Errorf("List Node uses an invalid type")
+ case len(n.Body) == 0:
+ return fmt.Errorf("List Node body is empty")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_list_test.go b/engine/runner/parse/node_list_test.go
new file mode 100644
index 00000000..5c0ad328
--- /dev/null
+++ b/engine/runner/parse/node_list_test.go
@@ -0,0 +1,44 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestListNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("ListNode", func() {
+ g.It("should append nodes", func() {
+ node := NewRunNode()
+
+ list0 := NewListNode()
+ list1 := list0.Append(node)
+ g.Assert(list0.Type().String()).Equal(NodeList)
+ g.Assert(list0.Body[0]).Equal(node)
+ g.Assert(list0).Equal(list1)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ list := ListNode{}
+ err := list.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("List Node uses an invalid type")
+ })
+
+ g.It("should fail validation when empty body", func() {
+ list := NewListNode()
+ err := list.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("List Node body is empty")
+ })
+
+ g.It("should pass validation", func() {
+ node := NewRunNode()
+ list := NewListNode()
+ list.Append(node)
+ g.Assert(list.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/node_parallel.go b/engine/runner/parse/node_parallel.go
new file mode 100644
index 00000000..a587235e
--- /dev/null
+++ b/engine/runner/parse/node_parallel.go
@@ -0,0 +1,36 @@
+package parse
+
+import "fmt"
+
+// ParallelNode executes a list of child nodes in parallel.
+type ParallelNode struct {
+ NodeType `json:"type"`
+
+ Body []Node `json:"body"` // nodes for parallel evaluation.
+ Limit int `json:"limit"` // limit for parallel evaluation.
+}
+
+func NewParallelNode() *ParallelNode {
+ return &ParallelNode{NodeType: NodeParallel}
+}
+
+func (n *ParallelNode) Append(node Node) *ParallelNode {
+ n.Body = append(n.Body, node)
+ return n
+}
+
+func (n *ParallelNode) SetLimit(limit int) *ParallelNode {
+ n.Limit = limit
+ return n
+}
+
+func (n *ParallelNode) Validate() error {
+ switch {
+ case n.NodeType != NodeParallel:
+ return fmt.Errorf("Parallel Node uses an invalid type")
+ case len(n.Body) == 0:
+ return fmt.Errorf("Parallel Node body is empty")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_parallel_test.go b/engine/runner/parse/node_parallel_test.go
new file mode 100644
index 00000000..9c0f0fb7
--- /dev/null
+++ b/engine/runner/parse/node_parallel_test.go
@@ -0,0 +1,42 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestParallelNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("ParallelNode", func() {
+ g.It("should append nodes", func() {
+ node := NewRunNode()
+
+ parallel0 := NewParallelNode()
+ parallel1 := parallel0.Append(node)
+ g.Assert(parallel0.Type().String()).Equal(NodeParallel)
+ g.Assert(parallel0.Body[0]).Equal(node)
+ g.Assert(parallel0).Equal(parallel1)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ node := ParallelNode{}
+ err := node.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Parallel Node uses an invalid type")
+ })
+
+ g.It("should fail validation when empty body", func() {
+ node := NewParallelNode()
+ err := node.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Parallel Node body is empty")
+ })
+
+ g.It("should pass validation", func() {
+ node := NewParallelNode().Append(NewRunNode())
+ g.Assert(node.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/node_recover.go b/engine/runner/parse/node_recover.go
new file mode 100644
index 00000000..9cac51a1
--- /dev/null
+++ b/engine/runner/parse/node_recover.go
@@ -0,0 +1,29 @@
+package parse
+
+import "fmt"
+
+type RecoverNode struct {
+ NodeType `json:"type"`
+
+ Body Node `json:"body"` // evaluate node and catch all errors.
+}
+
+func NewRecoverNode() *RecoverNode {
+ return &RecoverNode{NodeType: NodeRecover}
+}
+
+func (n *RecoverNode) SetBody(node Node) *RecoverNode {
+ n.Body = node
+ return n
+}
+
+func (n *RecoverNode) Validate() error {
+ switch {
+ case n.NodeType != NodeRecover:
+ return fmt.Errorf("Recover Node uses an invalid type")
+ case n.Body == nil:
+ return fmt.Errorf("Recover Node body is empty")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_recover_test.go b/engine/runner/parse/node_recover_test.go
new file mode 100644
index 00000000..20248655
--- /dev/null
+++ b/engine/runner/parse/node_recover_test.go
@@ -0,0 +1,43 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestRecoverNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("RecoverNode", func() {
+ g.It("should set body", func() {
+ node0 := NewRunNode()
+
+ recover0 := NewRecoverNode()
+ recover1 := recover0.SetBody(node0)
+ g.Assert(recover0.Type().String()).Equal(NodeRecover)
+ g.Assert(recover0.Body).Equal(node0)
+ g.Assert(recover0).Equal(recover1)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ recover0 := RecoverNode{}
+ err := recover0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Recover Node uses an invalid type")
+ })
+
+ g.It("should fail validation when empty body", func() {
+ recover0 := NewRecoverNode()
+ err := recover0.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Recover Node body is empty")
+ })
+
+ g.It("should pass validation", func() {
+ recover0 := NewRecoverNode()
+ recover0.SetBody(NewRunNode())
+ g.Assert(recover0.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/node_run.go b/engine/runner/parse/node_run.go
new file mode 100644
index 00000000..dedc9073
--- /dev/null
+++ b/engine/runner/parse/node_run.go
@@ -0,0 +1,41 @@
+package parse
+
+import "fmt"
+
+type RunNode struct {
+ NodeType `json:"type"`
+
+ Name string `json:"name"`
+ Detach bool `json:"detach,omitempty"`
+ Silent bool `json:"silent,omitempty"`
+}
+
+func (n *RunNode) SetName(name string) *RunNode {
+ n.Name = name
+ return n
+}
+
+func (n *RunNode) SetDetach(detach bool) *RunNode {
+ n.Detach = detach
+ return n
+}
+
+func (n *RunNode) SetSilent(silent bool) *RunNode {
+ n.Silent = silent
+ return n
+}
+
+func NewRunNode() *RunNode {
+ return &RunNode{NodeType: NodeRun}
+}
+
+func (n *RunNode) Validate() error {
+ switch {
+ case n.NodeType != NodeRun:
+ return fmt.Errorf("Run Node uses an invalid type")
+ case n.Name == "":
+ return fmt.Errorf("Run Node has an invalid name")
+ default:
+ return nil
+ }
+}
diff --git a/engine/runner/parse/node_run_test.go b/engine/runner/parse/node_run_test.go
new file mode 100644
index 00000000..9051249d
--- /dev/null
+++ b/engine/runner/parse/node_run_test.go
@@ -0,0 +1,41 @@
+package parse
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestRunNode(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("RunNode", func() {
+ g.It("should set container name for lookup", func() {
+ node0 := NewRunNode()
+ node1 := node0.SetName("foo")
+
+ g.Assert(node0.Type().String()).Equal(NodeRun)
+ g.Assert(node0.Name).Equal("foo")
+ g.Assert(node0).Equal(node1)
+ })
+
+ g.It("should fail validation when invalid type", func() {
+ node := RunNode{}
+ err := node.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Run Node uses an invalid type")
+ })
+
+ g.It("should fail validation when invalid name", func() {
+ node := NewRunNode()
+ err := node.Validate()
+ g.Assert(err == nil).IsFalse()
+ g.Assert(err.Error()).Equal("Run Node has an invalid name")
+ })
+
+ g.It("should pass validation", func() {
+ node := NewRunNode().SetName("foo")
+ g.Assert(node.Validate() == nil).IsTrue()
+ })
+ })
+}
diff --git a/engine/runner/parse/parse.go b/engine/runner/parse/parse.go
new file mode 100644
index 00000000..b027cff3
--- /dev/null
+++ b/engine/runner/parse/parse.go
@@ -0,0 +1,221 @@
+package parse
+
+import "encoding/json"
+
+// Tree is the intermediate representation of a pipeline.
+type Tree struct {
+ *ListNode // top-level Tree node
+}
+
+// New allocates a new Tree.
+func NewTree() *Tree {
+ return &Tree{
+ NewListNode(),
+ }
+}
+
+// Parse parses a JSON encoded Tree.
+func Parse(data []byte) (*Tree, error) {
+ tree := &Tree{}
+ err := tree.UnmarshalJSON(data)
+ return tree, err
+}
+
+// MarshalJSON implements the Marshaler interface and returns
+// a JSON encoded representation of the Tree.
+func (t *Tree) MarshalJSON() ([]byte, error) {
+ return json.Marshal(t.ListNode)
+}
+
+// UnmarshalJSON implements the Unmarshaler interface and returns
+// a Tree from a JSON representation.
+func (t *Tree) UnmarshalJSON(data []byte) error {
+ block, err := decodeList(data)
+ if err != nil {
+ return nil
+ }
+ t.ListNode = block.(*ListNode)
+ return nil
+}
+
+//
+// below are custom decoding functions. We cannot use the default json
+// decoder because the tree structure uses interfaces and the json decoder
+// has difficulty ascertaining the interface type when decoding.
+//
+
+func decodeNode(data []byte) (Node, error) {
+ node := &nodeType{}
+
+ err := json.Unmarshal(data, node)
+ if err != nil {
+ return nil, err
+ }
+ switch node.Type {
+ case NodeList:
+ return decodeList(data)
+ case NodeDefer:
+ return decodeDefer(data)
+ case NodeError:
+ return decodeError(data)
+ case NodeRecover:
+ return decodeRecover(data)
+ case NodeParallel:
+ return decodeParallel(data)
+ case NodeRun:
+ return decodeRun(data)
+ }
+ return nil, nil
+}
+
+func decodeNodes(data []json.RawMessage) ([]Node, error) {
+ var nodes []Node
+ for _, d := range data {
+ node, err := decodeNode(d)
+ if err != nil {
+ return nil, err
+ }
+ nodes = append(nodes, node)
+ }
+ return nodes, nil
+}
+
+func decodeList(data []byte) (Node, error) {
+ v := &nodeList{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ b, err := decodeNodes(v.Body)
+ if err != nil {
+ return nil, err
+ }
+ n := NewListNode()
+ n.Body = b
+ return n, nil
+}
+
+func decodeDefer(data []byte) (Node, error) {
+ v := &nodeDefer{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ b, err := decodeNode(v.Body)
+ if err != nil {
+ return nil, err
+ }
+ d, err := decodeNode(v.Defer)
+ if err != nil {
+ return nil, err
+ }
+ n := NewDeferNode()
+ n.Body = b
+ n.Defer = d
+ return n, nil
+}
+
+func decodeError(data []byte) (Node, error) {
+ v := &nodeError{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ b, err := decodeNode(v.Body)
+ if err != nil {
+ return nil, err
+ }
+ d, err := decodeNode(v.Defer)
+ if err != nil {
+ return nil, err
+ }
+ n := NewErrorNode()
+ n.Body = b
+ n.Defer = d
+ return n, nil
+}
+
+func decodeRecover(data []byte) (Node, error) {
+ v := &nodeRecover{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ b, err := decodeNode(v.Body)
+ if err != nil {
+ return nil, err
+ }
+ n := NewRecoverNode()
+ n.Body = b
+ return n, nil
+}
+
+func decodeParallel(data []byte) (Node, error) {
+ v := &nodeParallel{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ b, err := decodeNodes(v.Body)
+ if err != nil {
+ return nil, err
+ }
+ n := NewParallelNode()
+ n.Body = b
+ n.Limit = v.Limit
+ return n, nil
+}
+
+func decodeRun(data []byte) (Node, error) {
+ v := &nodeRun{}
+ err := json.Unmarshal(data, v)
+ if err != nil {
+ return nil, err
+ }
+ return &RunNode{NodeRun, v.Name, v.Detach, v.Silent}, nil
+}
+
+//
+// below are intermediate representations of the node structures
+// since we cannot simply encode / decode using the built-in json
+// encoding and decoder.
+//
+
+type nodeType struct {
+ Type NodeType `json:"type"`
+}
+
+type nodeDefer struct {
+ Type NodeType `json:"type"`
+ Body json.RawMessage `json:"body"`
+ Defer json.RawMessage `json:"defer"`
+}
+
+type nodeError struct {
+ Type NodeType `json:"type"`
+ Body json.RawMessage `json:"body"`
+ Defer json.RawMessage `json:"defer"`
+}
+
+type nodeList struct {
+ Type NodeType `json:"type"`
+ Body []json.RawMessage `json:"body"`
+}
+
+type nodeRecover struct {
+ Type NodeType `json:"type"`
+ Body json.RawMessage `json:"body"`
+}
+
+type nodeParallel struct {
+ Type NodeType `json:"type"`
+ Body []json.RawMessage `json:"body"`
+ Limit int `json:"limit"`
+}
+
+type nodeRun struct {
+ Type NodeType `json:"type"`
+ Name string `json:"name"`
+ Detach bool `json:"detach,omitempty"`
+ Silent bool `json:"silent,omitempty"`
+}
diff --git a/engine/runner/parse/parse_test.go b/engine/runner/parse/parse_test.go
new file mode 100644
index 00000000..b384882d
--- /dev/null
+++ b/engine/runner/parse/parse_test.go
@@ -0,0 +1,80 @@
+package parse
+
+import (
+ "bytes"
+ "encoding/json"
+ "reflect"
+ "testing"
+)
+
+func TestUnmarshal(t *testing.T) {
+
+ node1 := NewRunNode().SetName("foo")
+ node2 := NewRecoverNode().SetBody(node1)
+
+ node3 := NewRunNode().SetName("bar")
+ node4 := NewRunNode().SetName("bar")
+
+ node5 := NewParallelNode().
+ Append(node3).
+ Append(node4).
+ SetLimit(2)
+
+ node6 := NewDeferNode().
+ SetBody(node2).
+ SetDefer(node5)
+
+ tree := NewTree()
+ tree.Append(node6)
+
+ encoded, err := json.MarshalIndent(tree, "", "\t")
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !bytes.Equal(encoded, sample) {
+ t.Errorf("Want to marshal Tree to %s, got %s",
+ string(sample),
+ string(encoded),
+ )
+ }
+
+ parsed, err := Parse(encoded)
+ if err != nil {
+ t.Error(err)
+ }
+
+ if !reflect.DeepEqual(tree, parsed) {
+ t.Errorf("Want to marsnal and then unmarshal Tree")
+ }
+}
+
+var sample = []byte(`{
+ "type": "list",
+ "body": [
+ {
+ "type": "defer",
+ "body": {
+ "type": "recover",
+ "body": {
+ "type": "run",
+ "name": "foo"
+ }
+ },
+ "defer": {
+ "type": "parallel",
+ "body": [
+ {
+ "type": "run",
+ "name": "bar"
+ },
+ {
+ "type": "run",
+ "name": "bar"
+ }
+ ],
+ "limit": 2
+ }
+ }
+ ]
+}`)
diff --git a/engine/runner/pipe.go b/engine/runner/pipe.go
new file mode 100644
index 00000000..d4965429
--- /dev/null
+++ b/engine/runner/pipe.go
@@ -0,0 +1,49 @@
+package runner
+
+import "fmt"
+
+// Pipe returns a buffered pipe that is connected to the console output.
+type Pipe struct {
+ lines chan *Line
+ eof chan bool
+}
+
+// Next returns the next Line of console output.
+func (p *Pipe) Next() *Line {
+ select {
+ case line := <-p.lines:
+ return line
+ case <-p.eof:
+ return nil
+ }
+}
+
+// Close closes the pipe of console output.
+func (p *Pipe) Close() {
+ go func() {
+ p.eof <- true
+ }()
+}
+
+func newPipe(buffer int) *Pipe {
+ return &Pipe{
+ lines: make(chan *Line, buffer),
+ eof: make(chan bool),
+ }
+}
+
+// Line is a line of console output.
+type Line struct {
+ Proc string `json:"proc,omitempty"`
+ Time int64 `json:"time,omitempty"`
+ Type int `json:"type,omitempty"`
+ Pos int `json:"pos,omityempty"`
+ Out string `json:"out,omitempty"`
+}
+
+func (l *Line) String() string {
+ return fmt.Sprintf("[%s:L%v:%vs] %s", l.Proc, l.Pos, l.Time, l.Out)
+}
+
+// TODO(bradrydzewski) consider an alternate buffer impelmentation based on the
+// x.crypto ssh buffer https://github.com/golang/crypto/blob/master/ssh/buffer.go
diff --git a/engine/runner/pipe_test.go b/engine/runner/pipe_test.go
new file mode 100644
index 00000000..d7be3294
--- /dev/null
+++ b/engine/runner/pipe_test.go
@@ -0,0 +1,54 @@
+package runner
+
+import (
+ "sync"
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestPipe(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Pipe", func() {
+ g.It("should get next line from buffer", func() {
+ line := &Line{
+ Proc: "redis",
+ Pos: 1,
+ Out: "starting redis server",
+ }
+ pipe := newPipe(10)
+ pipe.lines <- line
+ next := pipe.Next()
+ g.Assert(next).Equal(line)
+ })
+
+ g.It("should get null line on buffer closed", func() {
+ pipe := newPipe(10)
+
+ var wg sync.WaitGroup
+ wg.Add(1)
+
+ go func() {
+ next := pipe.Next()
+ g.Assert(next == nil).IsTrue("line should be nil")
+ wg.Done()
+ }()
+
+ pipe.Close()
+ wg.Wait()
+ })
+
+ g.Describe("Line output", func() {
+ g.It("should prefix string() with metadata", func() {
+ line := Line{
+ Proc: "redis",
+ Time: 60,
+ Pos: 1,
+ Out: "starting redis server",
+ }
+ g.Assert(line.String()).Equal("[redis:L1:60s] starting redis server")
+ })
+ })
+ })
+}
diff --git a/engine/runner/runner.go b/engine/runner/runner.go
new file mode 100644
index 00000000..8085c214
--- /dev/null
+++ b/engine/runner/runner.go
@@ -0,0 +1,247 @@
+package runner
+
+import (
+ "bufio"
+ "time"
+ "fmt"
+
+ "github.com/drone/drone/engine/runner/parse"
+
+ "golang.org/x/net/context"
+)
+
+// NoContext is the default context you should supply if not using your own
+// context.Context
+var NoContext = context.TODO()
+
+// Tracer defines a tracing function that is invoked prior to creating and
+// running the container.
+type Tracer func(c *Container) error
+
+// Config defines the configuration for creating the Runner.
+type Config struct {
+ Tracer Tracer
+ Engine Engine
+
+ // Buffer defines the size of the buffer for the channel to which the
+ // console output is streamed.
+ Buffer uint
+}
+
+// Runner creates a build Runner using the specific configuration for the given
+// Context and Specification.
+func (c *Config) Runner(ctx context.Context, spec *Spec) *Runner {
+
+ // TODO(bradyrdzewski) we should make a copy of the configuration parameters
+ // instead of a direct reference. This helps avoid any race conditions or
+ //unexpected behavior if the Config changes.
+ return &Runner{
+ ctx: ctx,
+ conf: c,
+ spec: spec,
+ errc: make(chan error),
+ pipe: newPipe(int(c.Buffer) + 1),
+ }
+}
+
+type Runner struct {
+ ctx context.Context
+ conf *Config
+ spec *Spec
+ pipe *Pipe
+ errc chan (error)
+
+ containers []string
+ volumes []string
+ networks []string
+}
+
+// Run starts the build runner but does not wait for it to complete. The Wait
+// method will return the exit code and release associated resources once the
+// running containers exit.
+func (r *Runner) Run() error {
+
+ go func() {
+ r.setup()
+ err := r.exec(r.spec.Nodes.ListNode)
+ r.pipe.Close()
+ r.cancel()
+ r.teardown()
+ r.errc <- err
+ }()
+
+ go func() {
+ <-r.ctx.Done()
+ r.cancel()
+ }()
+
+ return nil
+}
+
+// Wait waits for the runner to exit.
+func (r *Runner) Wait() error {
+ return <-r.errc
+}
+
+// Pipe returns a Pipe that is connected to the console output stream.
+func (r *Runner) Pipe() *Pipe {
+ return r.pipe
+}
+
+func (r *Runner) exec(node parse.Node) error {
+ switch v := node.(type) {
+ case *parse.ListNode:
+ return r.execList(v)
+ case *parse.DeferNode:
+ return r.execDefer(v)
+ case *parse.ErrorNode:
+ return r.execError(v)
+ case *parse.RecoverNode:
+ return r.execRecover(v)
+ case *parse.ParallelNode:
+ return r.execParallel(v)
+ case *parse.RunNode:
+ return r.execRun(v)
+ }
+ return fmt.Errorf("runner: unexepected node %s", node)
+}
+
+func (r *Runner) execList(node *parse.ListNode) error {
+ for _, n := range node.Body {
+ err := r.exec(n)
+ if err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (r *Runner) execDefer(node *parse.DeferNode) error {
+ err1 := r.exec(node.Body)
+ err2 := r.exec(node.Defer)
+ if err1 != nil {
+ return err1
+ }
+ return err2
+}
+
+func (r *Runner) execError(node *parse.ErrorNode) error {
+ err := r.exec(node.Body)
+ if err != nil {
+ r.exec(node.Defer)
+ }
+ return err
+}
+
+func (r *Runner) execRecover(node *parse.RecoverNode) error {
+ r.exec(node.Body)
+ return nil
+}
+
+func (r *Runner) execParallel(node *parse.ParallelNode) error {
+ errc := make(chan error)
+
+ for _, n := range node.Body {
+ go func(node parse.Node) {
+ errc <- r.exec(node)
+ }(n)
+ }
+
+ var err error
+ for i := 0; i < len(node.Body); i++ {
+ select {
+ case cerr := <-errc:
+ if cerr != nil {
+ err = cerr
+ }
+ }
+ }
+
+ return err
+}
+
+func (r *Runner) execRun(node *parse.RunNode) error {
+ container, err := r.spec.lookupContainer(node.Name)
+ if err != nil {
+ return err
+ }
+ if r.conf.Tracer != nil {
+ err := r.conf.Tracer(container)
+ switch {
+ case err == ErrSkip:
+ return nil
+ case err != nil:
+ return err
+ }
+ }
+ // TODO(bradrydzewski) there is potential here for a race condition where
+ // the context is cancelled just after this line, resulting in the container
+ // still being started.
+ if r.ctx.Err() != nil {
+ return err
+ }
+
+ name, err := r.conf.Engine.ContainerStart(container)
+ if err != nil {
+ return err
+ }
+ r.containers = append(r.containers, name)
+
+ go func() {
+ if node.Silent {
+ return
+ }
+ rc, err := r.conf.Engine.ContainerLogs(name)
+ if err != nil {
+ return
+ }
+ defer rc.Close()
+
+ num := 0
+ now := time.Now().UTC()
+ scanner := bufio.NewScanner(rc)
+ for scanner.Scan() {
+ r.pipe.lines <- &Line{
+ Proc: container.Alias,
+ Time: int64(time.Since(now).Seconds()),
+ Pos: num,
+ Out: scanner.Text(),
+ }
+ num++
+ }
+ }()
+
+ // exit when running container in detached mode in background
+ if node.Detach {
+ return nil
+ }
+
+ state, err := r.conf.Engine.ContainerWait(name)
+ if err != nil {
+ return err
+ }
+ if state.OOMKilled {
+ return &OomError{name}
+ } else if state.ExitCode != 0 {
+ return &ExitError{name, state.ExitCode}
+ }
+ return nil
+}
+
+func (r *Runner) setup() {
+ // this is where we will setup network and volumes
+}
+
+func (r *Runner) teardown() {
+ // TODO(bradrydzewski) this is not yet thread safe.
+ for _, container := range r.containers {
+ r.conf.Engine.ContainerRemove(container)
+ }
+}
+
+func (r *Runner) cancel() {
+ // TODO(bradrydzewski) this is not yet thread safe.
+ for _, container := range r.containers {
+ r.conf.Engine.ContainerStop(container)
+ }
+}
diff --git a/engine/runner/runner_test.go b/engine/runner/runner_test.go
new file mode 100644
index 00000000..09a3ecd6
--- /dev/null
+++ b/engine/runner/runner_test.go
@@ -0,0 +1,7 @@
+package runner
+
+import "testing"
+
+func TestRunner(t *testing.T) {
+ t.Skip()
+}
diff --git a/engine/runner/spec.go b/engine/runner/spec.go
new file mode 100644
index 00000000..1f78a001
--- /dev/null
+++ b/engine/runner/spec.go
@@ -0,0 +1,33 @@
+package runner
+
+import (
+ "fmt"
+
+ "github.com/drone/drone/engine/runner/parse"
+)
+
+// Spec defines the pipeline configuration and exeuction.
+type Spec struct {
+ // Volumes defines a list of all container volumes.
+ Volumes []*Volume `json:"volumes,omitempty"`
+
+ // Networks defines a list of all container networks.
+ Networks []*Network `json:"networks,omitempty"`
+
+ // Containers defines a list of all containers in the pipeline.
+ Containers []*Container `json:"containers,omitempty"`
+
+ // Nodes defines the container execution tree.
+ Nodes *parse.Tree `json:"program,omitempty"`
+}
+
+// lookupContainer is a helper funciton that returns the named container from
+// the slice of containers.
+func (s *Spec) lookupContainer(name string) (*Container, error) {
+ for _, container := range s.Containers {
+ if container.Name == name {
+ return container, nil
+ }
+ }
+ return nil, fmt.Errorf("runner: unknown container %s", name)
+}
diff --git a/engine/runner/spec_test.go b/engine/runner/spec_test.go
new file mode 100644
index 00000000..ba627000
--- /dev/null
+++ b/engine/runner/spec_test.go
@@ -0,0 +1,35 @@
+package runner
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestSpec(t *testing.T) {
+ g := goblin.Goblin(t)
+
+ g.Describe("Spec file", func() {
+
+ g.Describe("when looking up a container", func() {
+
+ spec := Spec{}
+ spec.Containers = append(spec.Containers, &Container{
+ Name: "golang",
+ })
+
+ g.It("should find and return the container", func() {
+ c, err := spec.lookupContainer("golang")
+ g.Assert(err == nil).IsTrue("error should be nil")
+ g.Assert(c).Equal(spec.Containers[0])
+ })
+
+ g.It("should return an error when not found", func() {
+ c, err := spec.lookupContainer("node")
+ g.Assert(err == nil).IsFalse("should return error")
+ g.Assert(c == nil).IsTrue("should return nil container")
+ })
+
+ })
+ })
+}
diff --git a/engine/shasum/shasum.go b/engine/shasum/shasum.go
new file mode 100644
index 00000000..3c6d504d
--- /dev/null
+++ b/engine/shasum/shasum.go
@@ -0,0 +1,72 @@
+package shasum
+
+import (
+ "crypto/sha1"
+ "crypto/sha256"
+ "crypto/sha512"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Check is a calculates and verifies a file checksum.
+// This supports the sha1, sha256 and sha512 values.
+func Check(in, checksum string) bool {
+ hash, size, _ := split(checksum)
+
+ // if a byte size is provided for the
+ // Yaml file it must match.
+ if size > 0 && int64(len(in)) != size {
+ return false
+ }
+
+ switch len(hash) {
+ case 64:
+ return sha256sum(in) == hash
+ case 128:
+ return sha512sum(in) == hash
+ case 40:
+ return sha1sum(in) == hash
+ case 0:
+ return true // if no checksum assume valid
+ }
+
+ return false
+}
+
+func sha1sum(in string) string {
+ h := sha1.New()
+ io.WriteString(h, in)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func sha256sum(in string) string {
+ h := sha256.New()
+ io.WriteString(h, in)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func sha512sum(in string) string {
+ h := sha512.New()
+ io.WriteString(h, in)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+func split(in string) (string, int64, string) {
+ var hash string
+ var name string
+ var size int64
+
+ // the checksum might be split into multiple
+ // sections including the file size and name.
+ switch strings.Count(in, " ") {
+ case 1:
+ fmt.Sscanf(in, "%s %s", &hash, &name)
+ case 2:
+ fmt.Sscanf(in, "%s %d %s", &hash, &size, &name)
+ default:
+ hash = in
+ }
+
+ return hash, size, name
+}
diff --git a/engine/shasum/shasum_test.go b/engine/shasum/shasum_test.go
new file mode 100644
index 00000000..ff7c4ccc
--- /dev/null
+++ b/engine/shasum/shasum_test.go
@@ -0,0 +1,97 @@
+package shasum
+
+import (
+ "testing"
+
+ "github.com/franela/goblin"
+)
+
+func TestParse(t *testing.T) {
+
+ g := goblin.Goblin(t)
+ g.Describe("Shasum", func() {
+
+ g.It("Should parse the shasum string", func() {
+ hash, _, _ := split("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ g.Assert(hash).Equal("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ })
+
+ g.It("Should parse a two-part shasum string", func() {
+ hash, _, name := split("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 .drone.yml")
+ g.Assert(hash).Equal("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ g.Assert(name).Equal(".drone.yml")
+ })
+
+ g.It("Should parse a three-part shasum string", func() {
+ hash, size, name := split("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 42 .drone.yml")
+ g.Assert(hash).Equal("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ g.Assert(name).Equal(".drone.yml")
+ g.Assert(size).Equal(int64(42))
+ })
+
+ g.It("Should calc a sha1 sum", func() {
+ hash := sha1sum("foo\n")
+ g.Assert(hash).Equal("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ })
+
+ g.It("Should calc a sha256 sum", func() {
+ hash := sha256sum("foo\n")
+ g.Assert(hash).Equal("b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c")
+ })
+
+ g.It("Should calc a sha512 sum", func() {
+ hash := sha512sum("foo\n")
+ g.Assert(hash).Equal("0cf9180a764aba863a67b6d72f0918bc131c6772642cb2dce5a34f0a702f9470ddc2bf125c12198b1995c233c34b4afd346c54a2334c350a948a51b6e8b4e6b6")
+ })
+
+ g.It("Should calc a sha1 sum", func() {
+ hash := sha1sum("foo\n")
+ g.Assert(hash).Equal("f1d2d2f924e986ac86fdf7b36c94bcdf32beec15")
+ })
+
+ g.It("Should validate sha1 sum with file size", func() {
+ ok := Check("foo\n", "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 4 -")
+ g.Assert(ok).IsTrue()
+ })
+
+ g.It("Should validate sha256 sum with file size", func() {
+ ok := Check("foo\n", "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c 4 -")
+ g.Assert(ok).IsTrue()
+ })
+
+ g.It("Should validate sha512 sum with file size", func() {
+ ok := Check("foo\n", "0cf9180a764aba863a67b6d72f0918bc131c6772642cb2dce5a34f0a702f9470ddc2bf125c12198b1995c233c34b4afd346c54a2334c350a948a51b6e8b4e6b6 4 -")
+ g.Assert(ok).IsTrue()
+ })
+
+ g.It("Should fail validation if incorrect sha1", func() {
+ ok := Check("bar\n", "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 4 -")
+ g.Assert(ok).IsFalse()
+ })
+
+ g.It("Should fail validation if incorrect sha256", func() {
+ ok := Check("bar\n", "b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c 4 -")
+ g.Assert(ok).IsFalse()
+ })
+
+ g.It("Should fail validation if incorrect sha512", func() {
+ ok := Check("bar\n", "0cf9180a764aba863a67b6d72f0918bc131c6772642cb2dce5a34f0a702f9470ddc2bf125c12198b1995c233c34b4afd346c54a2334c350a948a51b6e8b4e6b6 4 -")
+ g.Assert(ok).IsFalse()
+ })
+
+ g.It("Should return false if file size mismatch", func() {
+ ok := Check("foo\n", "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15 12 -")
+ g.Assert(ok).IsFalse()
+ })
+
+ g.It("Should return false if invalid checksum string", func() {
+ ok := Check("foo\n", "f1d2d2f924e986ac86fdf7b36c94bcdf32beec15234")
+ g.Assert(ok).IsFalse()
+ })
+
+ g.It("Should return true if empty checksum", func() {
+ ok := Check("foo\n", "")
+ g.Assert(ok).IsTrue()
+ })
+ })
+}
diff --git a/queue/context.go b/queue/context.go
new file mode 100644
index 00000000..98a78e9e
--- /dev/null
+++ b/queue/context.go
@@ -0,0 +1,23 @@
+package queue
+
+import (
+ "golang.org/x/net/context"
+)
+
+const key = "queue"
+
+// Setter defines a context that enables setting values.
+type Setter interface {
+ Set(string, interface{})
+}
+
+// FromContext returns the Queue associated with this context.
+func FromContext(c context.Context) Queue {
+ return c.Value(key).(Queue)
+}
+
+// ToContext adds the Queue to this context if it supports
+// the Setter interface.
+func ToContext(c Setter, q Queue) {
+ c.Set(key, q)
+}
diff --git a/queue/queue.go b/queue/queue.go
new file mode 100644
index 00000000..3399d198
--- /dev/null
+++ b/queue/queue.go
@@ -0,0 +1,67 @@
+package queue
+
+//go:generate mockery -name Queue -output mock -case=underscore
+
+import (
+ "errors"
+
+ "golang.org/x/net/context"
+)
+
+// ErrNotFound indicates the requested work item does not
+// exist in the queue.
+var ErrNotFound = errors.New("queue item not found")
+
+type Queue interface {
+ // Publish inserts work at the tail of this queue, waiting for
+ // space to become available if the queue is full.
+ Publish(*Work) error
+
+ // Remove removes the specified work item from this queue,
+ // if it is present.
+ Remove(*Work) error
+
+ // PullClose retrieves and removes the head of this queue,
+ // waiting if necessary until work becomes available.
+ Pull() *Work
+
+ // PullClose retrieves and removes the head of this queue,
+ // waiting if necessary until work becomes available. The
+ // CloseNotifier should be provided to clone the channel
+ // if the subscribing client terminates its connection.
+ PullClose(CloseNotifier) *Work
+}
+
+// Publish inserts work at the tail of this queue, waiting for
+// space to become available if the queue is full.
+func Publish(c context.Context, w *Work) error {
+ return FromContext(c).Publish(w)
+}
+
+// Remove removes the specified work item from this queue,
+// if it is present.
+func Remove(c context.Context, w *Work) error {
+ return FromContext(c).Remove(w)
+}
+
+// PullClose retrieves and removes the head of this queue,
+// waiting if necessary until work becomes available.
+func Pull(c context.Context) *Work {
+ return FromContext(c).Pull()
+}
+
+// PullClose retrieves and removes the head of this queue,
+// waiting if necessary until work becomes available. The
+// CloseNotifier should be provided to clone the channel
+// if the subscribing client terminates its connection.
+func PullClose(c context.Context, cn CloseNotifier) *Work {
+ return FromContext(c).PullClose(cn)
+}
+
+// CloseNotifier defines a datastructure that is capable of notifying
+// a subscriber when its connection is closed.
+type CloseNotifier interface {
+ // CloseNotify returns a channel that receives a single value
+ // when the client connection has gone away.
+ CloseNotify() <-chan bool
+}
diff --git a/queue/queue_impl.go b/queue/queue_impl.go
new file mode 100644
index 00000000..8882bc24
--- /dev/null
+++ b/queue/queue_impl.go
@@ -0,0 +1,85 @@
+package queue
+
+import "sync"
+
+type queue struct {
+ sync.Mutex
+
+ items map[*Work]struct{}
+ itemc chan *Work
+}
+
+func New() Queue {
+ return newQueue()
+}
+
+func newQueue() *queue {
+ return &queue{
+ items: make(map[*Work]struct{}),
+ itemc: make(chan *Work, 999),
+ }
+}
+
+func (q *queue) Publish(work *Work) error {
+ q.Lock()
+ q.items[work] = struct{}{}
+ q.Unlock()
+ q.itemc <- work
+ return nil
+}
+
+func (q *queue) Remove(work *Work) error {
+ q.Lock()
+ defer q.Unlock()
+
+ _, ok := q.items[work]
+ if !ok {
+ return ErrNotFound
+ }
+ var items []*Work
+
+ // loop through and drain all items
+ // from the
+drain:
+ for {
+ select {
+ case item := <-q.itemc:
+ items = append(items, item)
+ default:
+ break drain
+ }
+ }
+
+ // re-add all items to the queue except
+ // the item we're trying to remove
+ for _, item := range items {
+ if item == work {
+ delete(q.items, work)
+ continue
+ }
+ q.itemc <- item
+ }
+ return nil
+}
+
+func (q *queue) Pull() *Work {
+ work := <-q.itemc
+ q.Lock()
+ delete(q.items, work)
+ q.Unlock()
+ return work
+}
+
+func (q *queue) PullClose(cn CloseNotifier) *Work {
+ for {
+ select {
+ case <-cn.CloseNotify():
+ return nil
+ case work := <-q.itemc:
+ q.Lock()
+ delete(q.items, work)
+ q.Unlock()
+ return work
+ }
+ }
+}
diff --git a/queue/queue_impl_test.go b/queue/queue_impl_test.go
new file mode 100644
index 00000000..77857623
--- /dev/null
+++ b/queue/queue_impl_test.go
@@ -0,0 +1,93 @@
+package queue
+
+import (
+ "sync"
+ "testing"
+
+ . "github.com/franela/goblin"
+ "github.com/gin-gonic/gin"
+)
+
+func TestBuild(t *testing.T) {
+ g := Goblin(t)
+ g.Describe("Queue", func() {
+
+ g.It("Should publish item", func() {
+ c := new(gin.Context)
+ q := newQueue()
+ ToContext(c, q)
+
+ w1 := &Work{}
+ w2 := &Work{}
+ Publish(c, w1)
+ Publish(c, w2)
+ g.Assert(len(q.items)).Equal(2)
+ g.Assert(len(q.itemc)).Equal(2)
+ })
+
+ g.It("Should remove item", func() {
+ c := new(gin.Context)
+ q := newQueue()
+ ToContext(c, q)
+
+ w1 := &Work{}
+ w2 := &Work{}
+ w3 := &Work{}
+ Publish(c, w1)
+ Publish(c, w2)
+ Publish(c, w3)
+ Remove(c, w2)
+ g.Assert(len(q.items)).Equal(2)
+ g.Assert(len(q.itemc)).Equal(2)
+
+ g.Assert(Pull(c)).Equal(w1)
+ g.Assert(Pull(c)).Equal(w3)
+ g.Assert(Remove(c, w2)).Equal(ErrNotFound)
+ })
+
+ g.It("Should pull item", func() {
+ c := new(gin.Context)
+ q := New()
+ ToContext(c, q)
+
+ cn := new(closeNotifier)
+ cn.closec = make(chan bool, 1)
+ w1 := &Work{}
+ w2 := &Work{}
+
+ Publish(c, w1)
+ g.Assert(Pull(c)).Equal(w1)
+
+ Publish(c, w2)
+ g.Assert(PullClose(c, cn)).Equal(w2)
+ })
+
+ g.It("Should cancel pulling item", func() {
+ c := new(gin.Context)
+ q := New()
+ ToContext(c, q)
+
+ cn := new(closeNotifier)
+ cn.closec = make(chan bool, 1)
+ var wg sync.WaitGroup
+ go func() {
+ wg.Add(1)
+ g.Assert(PullClose(c, cn) == nil).IsTrue()
+ wg.Done()
+ }()
+ go func() {
+ cn.closec <- true
+ }()
+ wg.Wait()
+
+ })
+ })
+}
+
+type closeNotifier struct {
+ closec chan bool
+}
+
+func (c *closeNotifier) CloseNotify() <-chan bool {
+ return c.closec
+}
diff --git a/queue/types.go b/queue/types.go
new file mode 100644
index 00000000..1740c7fe
--- /dev/null
+++ b/queue/types.go
@@ -0,0 +1,18 @@
+package queue
+
+import "github.com/drone/drone/model"
+
+// Work represents an item for work to be
+// processed by a worker.
+type Work struct {
+ Yaml string `json:"config"`
+ YamlEnc string `json:"secret"`
+ Repo *model.Repo `json:"repo"`
+ Build *model.Build `json:"build"`
+ BuildLast *model.Build `json:"build_last"`
+ Job *model.Job `json:"job"`
+ Netrc *model.Netrc `json:"netrc"`
+ Keys *model.Key `json:"keys"`
+ System *model.System `json:"system"`
+ User *model.User `json:"user"`
+}
diff --git a/remote/bitbucket/bitbucket.go b/remote/bitbucket/bitbucket.go
index 12d8a076..c37319a2 100644
--- a/remote/bitbucket/bitbucket.go
+++ b/remote/bitbucket/bitbucket.go
@@ -9,7 +9,6 @@ import (
"strconv"
"github.com/drone/drone/model"
- "github.com/drone/drone/shared/envconfig"
"github.com/drone/drone/shared/httputil"
log "github.com/Sirupsen/logrus"
@@ -24,8 +23,7 @@ type Bitbucket struct {
Open bool
}
-func Load(env envconfig.Env) *Bitbucket {
- config := env.String("REMOTE_CONFIG", "")
+func Load(config string) *Bitbucket {
// parse the remote DSN configuration string
url_, err := url.Parse(config)
@@ -416,10 +414,10 @@ func (bb *Bitbucket) pushHook(r *http.Request) (*model.Repo, *model.Build, error
// we only support tag and branch pushes for now
buildEventType := model.EventPush
buildRef := fmt.Sprintf("refs/heads/%s", change.New.Name)
- if change.New.Type == "tag" || change.New.Type == "annotated_tag" {
+ if change.New.Type == "tag" || change.New.Type == "annotated_tag" || change.New.Type == "bookmark" {
buildEventType = model.EventTag
buildRef = fmt.Sprintf("refs/tags/%s", change.New.Name)
- } else if change.New.Type != "branch" {
+ } else if change.New.Type != "branch" && change.New.Type != "named_branch" {
continue
}
diff --git a/remote/bitbucketserver/bitbucketserver.go b/remote/bitbucketserver/bitbucketserver.go
index afa02754..99e1552c 100644
--- a/remote/bitbucketserver/bitbucketserver.go
+++ b/remote/bitbucketserver/bitbucketserver.go
@@ -1,10 +1,8 @@
package bitbucketserver
// Requires the following to be set
-// GIT_USERNAME -> a username on the stash server that has access to clone all repos
-// GIT_USERPASSWORD -> password to the user that has access to clone all repos
// REMOTE_DRIVER=bitbucketserver
-// REMOTE_CONFIG=https://{servername}?consumer_key={key added on the stash server for oath1}&open={not used yet}
+// REMOTE_CONFIG=https://{servername}?consumer_key={key added on the stash server for oath1}&git_username={username for clone}&git_password={password for clone}&open={not used yet}
// Configure application links in the bitbucket server --
// application url needs to be the base url to drone
// incoming auth needs to have the consumer key (same as the key in REMOTE_CONFIG)
@@ -36,12 +34,7 @@ type BitbucketServer struct {
Open bool
}
-func Load(env envconfig.Env) *BitbucketServer{
-
- //Read
- config := env.String("REMOTE_CONFIG", "")
- gitUserName := env.String("GIT_USERNAME", "")
- gitUserPassword := env.String("GIT_USERPASSWORD","")
+func Load(config string) *BitbucketServer{
url_, err := url.Parse(config)
if err != nil {
@@ -53,8 +46,8 @@ func Load(env envconfig.Env) *BitbucketServer{
bitbucketserver := BitbucketServer{}
bitbucketserver.URL = url_.String()
- bitbucketserver.GitUserName = gitUserName
- bitbucketserver.GitPassword = gitUserPassword
+ bitbucketserver.GitUserName = params.Get("git_username")
+ bitbucketserver.GitPassword = params.Get("git_password")
bitbucketserver.ConsumerKey = params.Get("consumer_key")
bitbucketserver.Open, _ = strconv.ParseBool(params.Get("open"))
@@ -69,7 +62,7 @@ func (bs *BitbucketServer) Login(res http.ResponseWriter, req *http.Request) (*m
log.Info("getting the requestToken")
requestToken, url, err := c.GetRequestTokenAndUrl("oob")
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
var code = req.FormValue("oauth_verifier")
@@ -88,12 +81,12 @@ func (bs *BitbucketServer) Login(res http.ResponseWriter, req *http.Request) (*m
client, err := c.MakeHttpClient(accessToken)
if err != nil {
- log.Fatal(err)
+ log.error(err)
}
response, err := client.Get(bs.URL + "/plugins/servlet/applinks/whoami")
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
defer response.Body.Close()
bits, err := ioutil.ReadAll(response.Body)
@@ -133,7 +126,7 @@ func (bs *BitbucketServer) Repo(u *model.User, owner, name string) (*model.Repo,
log.Info("Trying to get " + url)
response, err := client.Get(url)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
@@ -159,9 +152,6 @@ func (bs *BitbucketServer) Repo(u *model.User, owner, name string) (*model.Repo,
repo.Link = repoLink
repo.Name=bsRepo.Slug
repo.Owner=bsRepo.Project.Key
- repo.AllowTag=false
- repo.AllowDeploy=false
- repo.AllowPull=false
repo.AllowPush=true
repo.FullName = bsRepo.Project.Key +"/" +bsRepo.Slug
repo.Branch = "master"
@@ -180,7 +170,7 @@ func (bs *BitbucketServer) Repos(u *model.User) ([]*model.RepoLite, error){
response, err := client.Get(bs.URL + "/rest/api/1.0/repos?limit=10000")
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
@@ -218,7 +208,7 @@ func (bs *BitbucketServer) File(u *model.User, r *model.Repo, b *model.Build, f
log.Info(fileURL)
response, err := client.Get(fileURL)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
if response.StatusCode == 404 {
return nil,nil
@@ -226,7 +216,7 @@ func (bs *BitbucketServer) File(u *model.User, r *model.Repo, b *model.Build, f
defer response.Body.Close()
responseBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
@@ -242,7 +232,7 @@ func (bs *BitbucketServer) Netrc(user *model.User, r *model.Repo) (*model.Netrc,
log.Info("Starting the Netrc lookup")
u, err := url.Parse(bs.URL)
if err != nil {
- panic(err)
+ return nil, err
}
return &model.Netrc{
Machine: u.Host,
diff --git a/remote/bitbucketserver/client.go b/remote/bitbucketserver/client.go
index b6598597..7604d1d4 100644
--- a/remote/bitbucketserver/client.go
+++ b/remote/bitbucketserver/client.go
@@ -15,13 +15,13 @@ func NewClient(ConsumerKey string, URL string) *oauth.Consumer{
privateKeyFileContents, err := ioutil.ReadFile("/private_key.pem")
log.Info("Tried to read the key")
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
block, _ := pem.Decode([]byte(privateKeyFileContents))
privateKey, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
c := oauth.NewRSAConsumer(
@@ -49,7 +49,7 @@ func NewClientWithToken(ConsumerKey string, URL string, AccessToken string) *htt
token.Token = AccessToken
client, err := c.MakeHttpClient(&token)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
return client
}
diff --git a/remote/bitbucketserver/helper.go b/remote/bitbucketserver/helper.go
index 1a83c5fe..7a24eac9 100644
--- a/remote/bitbucketserver/helper.go
+++ b/remote/bitbucketserver/helper.go
@@ -15,7 +15,7 @@ func avatarLink(email string) (url string) {
hasher := md5.New()
hasher.Write([]byte(strings.ToLower(email)))
emailHash := fmt.Sprintf("%v", hex.EncodeToString(hasher.Sum(nil)))
- avatarURL := fmt.Sprintf("http://www.gravatar.com/avatar/%s.jpg",emailHash)
+ avatarURL := fmt.Sprintf("https://www.gravatar.com/avatar/%s.jpg",emailHash)
log.Info(avatarURL)
return avatarURL
}
@@ -25,12 +25,12 @@ func doPut(client *http.Client, url string, body []byte) {
request.Header.Add("Content-Type","application/json")
response, err := client.Do(request)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
} else {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
fmt.Println("The calculated length is:", len(string(contents)), "for the url:", url)
fmt.Println(" ", response.StatusCode)
@@ -46,12 +46,12 @@ func doDelete(client *http.Client, url string) {
request, err := http.NewRequest("DELETE", url, nil)
response, err := client.Do(request)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
} else {
defer response.Body.Close()
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
- log.Fatal(err)
+ log.Error(err)
}
fmt.Println("The calculated length is:", len(string(contents)), "for the url:", url)
fmt.Println(" ", response.StatusCode)
diff --git a/remote/github/github.go b/remote/github/github.go
index 9ace2477..e935f20f 100644
--- a/remote/github/github.go
+++ b/remote/github/github.go
@@ -11,7 +11,6 @@ import (
"strings"
"github.com/drone/drone/model"
- "github.com/drone/drone/shared/envconfig"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/oauth2"
@@ -42,8 +41,7 @@ type Github struct {
GitSSH bool
}
-func Load(env envconfig.Env) *Github {
- config := env.String("REMOTE_CONFIG", "")
+func Load(config string) *Github {
// parse the remote DSN configuration string
url_, err := url.Parse(config)
diff --git a/remote/github/github_test.go b/remote/github/github_test.go
index 50348e85..85ad3572 100644
--- a/remote/github/github_test.go
+++ b/remote/github/github_test.go
@@ -6,7 +6,6 @@ import (
"net/http"
"testing"
- "github.com/drone/drone/shared/envconfig"
"github.com/franela/goblin"
)
@@ -48,12 +47,11 @@ func TestHook(t *testing.T) {
}
func TestLoad(t *testing.T) {
- env := envconfig.Env{
- "REMOTE_CONFIG": "https://github.com?client_id=client&client_secret=secret&scope=scope1,scope2",
- }
- g := Load(env)
+ conf := "https://github.com?client_id=client&client_secret=secret&scope=scope1,scope2"
+
+ g := Load(conf)
if g.URL != "https://github.com" {
- t.Errorf("g.URL = %q; want https://github.com")
+ t.Errorf("g.URL = %q; want https://github.com", g.URL)
}
if g.Client != "client" {
t.Errorf("g.Client = %q; want client", g.Client)
@@ -71,7 +69,7 @@ func TestLoad(t *testing.T) {
t.Errorf("g.MergeRef = %q; want %q", g.MergeRef, DefaultMergeRef)
}
- g = Load(envconfig.Env{})
+ g = Load("")
if g.Scope != DefaultScope {
t.Errorf("g.Scope = %q; want %q", g.Scope, DefaultScope)
}
diff --git a/remote/gitlab/gitlab.go b/remote/gitlab/gitlab.go
index 1be25292..60ffc5ed 100644
--- a/remote/gitlab/gitlab.go
+++ b/remote/gitlab/gitlab.go
@@ -10,7 +10,6 @@ import (
"strings"
"github.com/drone/drone/model"
- "github.com/drone/drone/shared/envconfig"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/oauth2"
"github.com/drone/drone/shared/token"
@@ -35,9 +34,7 @@ type Gitlab struct {
Search bool
}
-func Load(env envconfig.Env) *Gitlab {
- config := env.String("REMOTE_CONFIG", "")
-
+func Load(config string) *Gitlab {
url_, err := url.Parse(config)
if err != nil {
panic(err)
diff --git a/remote/gitlab/gitlab_test.go b/remote/gitlab/gitlab_test.go
index c6faa05d..944afbb7 100644
--- a/remote/gitlab/gitlab_test.go
+++ b/remote/gitlab/gitlab_test.go
@@ -15,8 +15,7 @@ func Test_Gitlab(t *testing.T) {
var server = testdata.NewServer()
defer server.Close()
- env := map[string]string{}
- env["REMOTE_CONFIG"] = server.URL + "?client_id=test&client_secret=test"
+ env := server.URL + "?client_id=test&client_secret=test"
gitlab := Load(env)
diff --git a/remote/gogs/gogs.go b/remote/gogs/gogs.go
index 1a3535a5..d30a6323 100644
--- a/remote/gogs/gogs.go
+++ b/remote/gogs/gogs.go
@@ -9,7 +9,6 @@ import (
"strconv"
"github.com/drone/drone/model"
- "github.com/drone/drone/shared/envconfig"
"github.com/gogits/go-gogs-client"
log "github.com/Sirupsen/logrus"
@@ -22,9 +21,7 @@ type Gogs struct {
SkipVerify bool
}
-func Load(env envconfig.Env) *Gogs {
- config := env.String("REMOTE_CONFIG", "")
-
+func Load(config string) *Gogs {
// parse the remote DSN configuration string
url_, err := url.Parse(config)
if err != nil {
diff --git a/remote/remote.go b/remote/remote.go
index b0a89832..875988d2 100644
--- a/remote/remote.go
+++ b/remote/remote.go
@@ -6,39 +6,10 @@ import (
"net/http"
"github.com/drone/drone/model"
- "github.com/drone/drone/remote/bitbucket"
- "github.com/drone/drone/remote/bitbucketserver"
- "github.com/drone/drone/remote/github"
- "github.com/drone/drone/remote/gitlab"
- "github.com/drone/drone/remote/gogs"
- "github.com/drone/drone/shared/envconfig"
- "github.com/Sirupsen/logrus"
"golang.org/x/net/context"
)
-func Load(env envconfig.Env) Remote {
- driver := env.Get("REMOTE_DRIVER")
-
- switch driver {
- case "bitbucket":
- return bitbucket.Load(env)
- case "github":
- return github.Load(env)
- case "gitlab":
- return gitlab.Load(env)
- case "gogs":
- return gogs.Load(env)
- case "bitbucketserver":
- return bitbucketserver.Load(env)
-
- default:
- logrus.Fatalf("unknown remote driver %s", driver)
- }
-
- return nil
-}
-
type Remote interface {
// Login authenticates the session and returns the
// remote user details.
diff --git a/router/middleware/cache.go b/router/middleware/cache.go
new file mode 100644
index 00000000..aa8d46b4
--- /dev/null
+++ b/router/middleware/cache.go
@@ -0,0 +1,22 @@
+package middleware
+
+import (
+ "time"
+
+ "github.com/drone/drone/cache"
+
+ "github.com/gin-gonic/gin"
+ "github.com/ianschenck/envflag"
+)
+
+var ttl = envflag.Duration("CACHE_TTL", time.Minute*15, "")
+
+// Cache is a middleware function that initializes the Cache and attaches to
+// the context of every http.Request.
+func Cache() gin.HandlerFunc {
+ cc := cache.NewTTL(*ttl)
+ return func(c *gin.Context) {
+ cache.ToContext(c, cc)
+ c.Next()
+ }
+}
diff --git a/router/middleware/cache/cache.go b/router/middleware/cache/cache.go
deleted file mode 100644
index 72fa0279..00000000
--- a/router/middleware/cache/cache.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package cache
-
-import (
- "github.com/drone/drone/cache"
- "github.com/gin-gonic/gin"
-)
-
-func Default() gin.HandlerFunc {
- cc := cache.Default()
- return func(c *gin.Context) {
- cache.ToContext(c, cc)
- c.Next()
- }
-}
diff --git a/router/middleware/context/context.go b/router/middleware/context/context.go
deleted file mode 100644
index 2c81ee45..00000000
--- a/router/middleware/context/context.go
+++ /dev/null
@@ -1,37 +0,0 @@
-package context
-
-import (
- "github.com/drone/drone/engine"
- "github.com/drone/drone/remote"
- "github.com/drone/drone/store"
- "github.com/gin-gonic/gin"
-)
-
-func SetStore(s store.Store) gin.HandlerFunc {
- return func(c *gin.Context) {
- store.ToContext(c, s)
- c.Next()
- }
-}
-
-func SetRemote(remote remote.Remote) gin.HandlerFunc {
- return func(c *gin.Context) {
- c.Set("remote", remote)
- c.Next()
- }
-}
-
-func Remote(c *gin.Context) remote.Remote {
- return c.MustGet("remote").(remote.Remote)
-}
-
-func SetEngine(engine engine.Engine) gin.HandlerFunc {
- return func(c *gin.Context) {
- c.Set("engine", engine)
- c.Next()
- }
-}
-
-func Engine(c *gin.Context) engine.Engine {
- return c.MustGet("engine").(engine.Engine)
-}
diff --git a/router/middleware/engine.go b/router/middleware/engine.go
new file mode 100644
index 00000000..01da0706
--- /dev/null
+++ b/router/middleware/engine.go
@@ -0,0 +1,28 @@
+package middleware
+
+import (
+ "sync"
+
+ "github.com/drone/drone/engine"
+ "github.com/drone/drone/store"
+
+ "github.com/gin-gonic/gin"
+)
+
+// Engine is a middleware function that initializes the Engine and attaches to
+// the context of every http.Request.
+func Engine() gin.HandlerFunc {
+ var once sync.Once
+ var engine_ engine.Engine
+
+ return func(c *gin.Context) {
+
+ once.Do(func() {
+ store_ := store.FromContext(c)
+ engine_ = engine.Load(store_)
+ })
+
+ engine.ToContext(c, engine_)
+ c.Next()
+ }
+}
diff --git a/router/middleware/location/location.go b/router/middleware/location/location.go
deleted file mode 100644
index 9e529011..00000000
--- a/router/middleware/location/location.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package location
-
-import (
- "net/http"
- "strings"
-
- "github.com/gin-gonic/gin"
-)
-
-// Resolve is a middleware function that resolves the hostname
-// and scheme for the http.Request and adds to the context.
-func Resolve(c *gin.Context) {
- c.Set("host", resolveHost(c.Request))
- c.Set("scheme", resolveScheme(c.Request))
- c.Next()
-}
-
-// parseHeader parses non unique headers value
-// from a http.Request and return a slice of the values
-// queried from the header
-func parseHeader(r *http.Request, header string, token string) (val []string) {
- for _, v := range r.Header[header] {
- options := strings.Split(v, ";")
- for _, o := range options {
- keyvalue := strings.Split(o, "=")
- var key, value string
- if len(keyvalue) > 1 {
- key, value = strings.TrimSpace(keyvalue[0]), strings.TrimSpace(keyvalue[1])
- }
- key = strings.ToLower(key)
- if key == token {
- val = append(val, value)
- }
- }
- }
- return
-}
-
-// resolveScheme is a helper function that evaluates the http.Request
-// and returns the scheme, HTTP or HTTPS. It is able to detect,
-// using the X-Forwarded-Proto, if the original request was HTTPS
-// and routed through a reverse proxy with SSL termination.
-func resolveScheme(r *http.Request) string {
- switch {
- case r.URL.Scheme == "https":
- return "https"
- case r.TLS != nil:
- return "https"
- case strings.HasPrefix(r.Proto, "HTTPS"):
- return "https"
- case r.Header.Get("X-Forwarded-Proto") == "https":
- return "https"
- case len(r.Header.Get("Forwarded")) != 0 && len(parseHeader(r, "Forwarded", "proto")) != 0 && parseHeader(r, "Forwarded", "proto")[0] == "https":
- return "https"
- default:
- return "http"
- }
-}
-
-// resolveHost is a helper function that evaluates the http.Request
-// and returns the hostname. It is able to detect, using the
-// X-Forarded-For header, the original hostname when routed
-// through a reverse proxy.
-func resolveHost(r *http.Request) string {
- switch {
- case len(r.Host) != 0:
- return r.Host
- case len(r.URL.Host) != 0:
- return r.URL.Host
- case len(r.Header.Get("X-Forwarded-For")) != 0:
- return r.Header.Get("X-Forwarded-For")
- case len(r.Header.Get("Forwarded")) != 0 && len(parseHeader(r, "Forwarded", "for")) != 0:
- return parseHeader(r, "Forwarded", "for")[0]
- case len(r.Header.Get("X-Host")) != 0:
- return r.Header.Get("X-Host")
- case len(r.Header.Get("Forwarded")) != 0 && len(parseHeader(r, "Forwarded", "host")) != 0:
- return parseHeader(r, "Forwarded", "host")[0]
- case len(r.Header.Get("XFF")) != 0:
- return r.Header.Get("XFF")
- case len(r.Header.Get("X-Real-IP")) != 0:
- return r.Header.Get("X-Real-IP")
- default:
- return "localhost:8000"
- }
-}
-
-// Hostname returns the hostname associated with
-// the current context.
-func Hostname(c *gin.Context) (host string) {
- v, ok := c.Get("host")
- if ok {
- host = v.(string)
- }
- return
-}
diff --git a/router/middleware/location/location_test.go b/router/middleware/location/location_test.go
deleted file mode 100644
index 46ddca67..00000000
--- a/router/middleware/location/location_test.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package location
-
-import (
- "github.com/franela/goblin"
- "net/http"
- "reflect"
- "testing"
-)
-
-var mockHeader []string
-var mockRequest *http.Request
-
-var wronglyFormedHeader []string
-var wronglyFormedRequest *http.Request
-
-func init() {
- mockHeader = []string{"For= 110.0.2.2", "for = \"[::1]\"; Host=example.com; foR=10.2.3.4; pRoto =https ; By = 127.0.0.1"}
- mockRequest = &http.Request{Header: map[string][]string{"Forwarded": mockHeader}}
- wronglyFormedHeader = []string{"Fro= 110.0.2.2", "for = \"[:1]\"% Host=example:.com| foR=10.278.3.4% poto =https | Bi % 127.0.0.1", ""}
- wronglyFormedRequest = &http.Request{Header: map[string][]string{"Forwarded": wronglyFormedHeader}}
-}
-
-func TestParseForwardedHeadersProto(t *testing.T) {
- g := goblin.Goblin(t)
-
- g.Describe("Parse proto Forwarded Headers", func() {
- g.It("Should parse a normal proto Forwarded header", func() {
- parsedHeader := parseHeader(mockRequest, "Forwarded", "proto")
- g.Assert("https" == parsedHeader[0]).IsTrue()
- })
- g.It("Should parse a normal for Forwarded header", func() {
- parsedHeader := parseHeader(mockRequest, "Forwarded", "for")
- g.Assert(reflect.DeepEqual([]string{"110.0.2.2", "\"[::1]\"", "10.2.3.4"}, parsedHeader)).IsTrue()
- })
- g.It("Should parse a normal host Forwarded header", func() {
- parsedHeader := parseHeader(mockRequest, "Forwarded", "host")
- g.Assert("example.com" == parsedHeader[0]).IsTrue()
- })
- g.It("Should parse a normal by Forwarded header", func() {
- parsedHeader := parseHeader(mockRequest, "Forwarded", "by")
- g.Assert("127.0.0.1" == parsedHeader[0]).IsTrue()
- })
- g.It("Should not crash if a wrongly formed Forwarder header is sent", func() {
- parsedHeader := parseHeader(wronglyFormedRequest, "Forwarded", "by")
- g.Assert(len(parsedHeader) == 0).IsTrue()
- })
- })
-}
diff --git a/router/middleware/remote.go b/router/middleware/remote.go
new file mode 100644
index 00000000..5f245676
--- /dev/null
+++ b/router/middleware/remote.go
@@ -0,0 +1,48 @@
+package middleware
+
+import (
+ "github.com/drone/drone/remote"
+ "github.com/drone/drone/remote/bitbucket"
+ "github.com/drone/drone/remote/github"
+ "github.com/drone/drone/remote/gitlab"
+ "github.com/drone/drone/remote/gogs"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/gin-gonic/gin"
+ "github.com/ianschenck/envflag"
+ "github.com/drone/drone/remote/bitbucketserver"
+)
+
+var (
+ driver = envflag.String("REMOTE_DRIVER", "", "")
+ config = envflag.String("REMOTE_CONFIG", "", "")
+)
+
+// Remote is a middleware function that initializes the Remote and attaches to
+// the context of every http.Request.
+func Remote() gin.HandlerFunc {
+
+ logrus.Infof("using remote driver %s", *driver)
+ logrus.Infof("using remote config %s", *config)
+
+ var remote_ remote.Remote
+ switch *driver {
+ case "github":
+ remote_ = github.Load(*config)
+ case "bitbucket":
+ remote_ = bitbucket.Load(*config)
+ case "gogs":
+ remote_ = gogs.Load(*config)
+ case "gitlab":
+ remote_ = gitlab.Load(*config)
+ case "bitbucketserver":
+ remote_ = bitbucketserver.Load(*config)
+ default:
+ logrus.Fatalln("remote configuraiton not found")
+ }
+
+ return func(c *gin.Context) {
+ remote.ToContext(c, remote_)
+ c.Next()
+ }
+}
diff --git a/router/middleware/store.go b/router/middleware/store.go
new file mode 100644
index 00000000..91439d43
--- /dev/null
+++ b/router/middleware/store.go
@@ -0,0 +1,29 @@
+package middleware
+
+import (
+ "github.com/drone/drone/store"
+ "github.com/drone/drone/store/datastore"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/gin-gonic/gin"
+ "github.com/ianschenck/envflag"
+)
+
+var (
+ database = envflag.String("DATABASE_DRIVER", "sqlite3", "")
+ datasource = envflag.String("DATABASE_CONFIG", "drone.sqlite", "")
+)
+
+// Store is a middleware function that initializes the Datastore and attaches to
+// the context of every http.Request.
+func Store() gin.HandlerFunc {
+ db := datastore.New(*database, *datasource)
+
+ logrus.Infof("using database driver %s", *database)
+ logrus.Infof("using database config %s", *datasource)
+
+ return func(c *gin.Context) {
+ store.ToContext(c, db)
+ c.Next()
+ }
+}
diff --git a/router/middleware/version.go b/router/middleware/version.go
new file mode 100644
index 00000000..b4de05f9
--- /dev/null
+++ b/router/middleware/version.go
@@ -0,0 +1,14 @@
+package middleware
+
+import (
+ "github.com/drone/drone/version"
+ "github.com/gin-gonic/gin"
+)
+
+// Version is a middleware function that appends the Drone
+// version information to the HTTP response. This is intended
+// for debugging and troubleshooting.
+func Version(c *gin.Context) {
+ c.Header("X-DRONE-VERSION", version.Version)
+ c.Next()
+}
diff --git a/router/router.go b/router/router.go
index 6d313646..84587352 100644
--- a/router/router.go
+++ b/router/router.go
@@ -8,7 +8,6 @@ import (
"github.com/drone/drone/api"
"github.com/drone/drone/router/middleware/header"
- "github.com/drone/drone/router/middleware/location"
"github.com/drone/drone/router/middleware/session"
"github.com/drone/drone/router/middleware/token"
"github.com/drone/drone/static"
@@ -17,11 +16,12 @@ import (
)
func Load(middleware ...gin.HandlerFunc) http.Handler {
- e := gin.Default()
+ e := gin.New()
+ e.Use(gin.Recovery())
+
e.SetHTMLTemplate(template.Load())
e.StaticFS("/static", static.FileSystem())
- e.Use(location.Resolve)
e.Use(header.NoCache)
e.Use(header.Options)
e.Use(header.Secure)
@@ -140,6 +140,13 @@ func Load(middleware ...gin.HandlerFunc) http.Handler {
stream.GET("/:owner/:name/:build/:number", web.GetStream)
}
+ bots := e.Group("/bots")
+ {
+ bots.Use(session.MustUser())
+ bots.POST("/slack", web.Slack)
+ bots.POST("/slack/:command", web.Slack)
+ }
+
auth := e.Group("/authorize")
{
auth.GET("", web.GetLogin)
@@ -170,7 +177,7 @@ func normalize(h http.Handler) http.Handler {
parts := strings.Split(r.URL.Path, "/")[1:]
switch parts[0] {
- case "settings", "repos", "api", "login", "logout", "", "authorize", "hook", "static", "gitlab":
+ case "settings", "bots", "repos", "api", "login", "logout", "", "authorize", "hook", "static", "gitlab":
// no-op
default:
diff --git a/shared/envconfig/envconfig.go b/shared/envconfig/envconfig.go
deleted file mode 100644
index 26059aac..00000000
--- a/shared/envconfig/envconfig.go
+++ /dev/null
@@ -1,117 +0,0 @@
-package envconfig
-
-import (
- "bufio"
- "errors"
- "os"
- "strconv"
- "strings"
-)
-
-type Env map[string]string
-
-// Get returns the value of the environment variable named by the key.
-func (env Env) Get(key string) string {
- return env[key]
-}
-
-// String returns the string value of the environment variable named by the
-// key. If the variable is not present, the default value is returned.
-func (env Env) String(key, value string) string {
- got, ok := env[key]
- if ok {
- value = got
- }
- return value
-}
-
-// Bool returns the boolean value of the environment variable named by the key.
-// If the variable is not present, the default value is returned.
-func (env Env) Bool(name string, value bool) bool {
- got, ok := env[name]
- if ok {
- value, _ = strconv.ParseBool(got)
- }
- return value
-}
-
-// Int returns the integer value of the environment variable named by the key.
-// If the variable is not present, the default value is returned.
-func (env Env) Int(name string, value int) int {
- got, ok := env[name]
- if ok {
- value, _ = strconv.Atoi(got)
- }
- return value
-}
-
-// Load reads the environment file and reads variables in "key=value" format.
-// Then it read the system environment variables. It returns the combined
-// results in a key value map.
-func Load(filepath string) Env {
- var envs = map[string]string{}
-
- // load the environment file
- f, err := os.Open(filepath)
- if err == nil {
- defer f.Close()
-
- r := bufio.NewReader(f)
- for {
- line, _, err := r.ReadLine()
- if err != nil {
- break
- }
-
- key, val, err := parseln(string(line))
- if err != nil {
- continue
- }
-
- os.Setenv(key, val)
- }
- }
-
- // load the environment variables
- for _, env := range os.Environ() {
- key, val, err := parseln(env)
- if err != nil {
- continue
- }
-
- envs[key] = val
- }
-
- return Env(envs)
-}
-
-// helper function to parse a "key=value" environment variable string.
-func parseln(line string) (key string, val string, err error) {
- line = removeComments(line)
- if len(line) == 0 {
- return
- }
- splits := strings.SplitN(line, "=", 2)
-
- if len(splits) < 2 {
- err = errors.New("missing delimiter '='")
- return
- }
-
- key = strings.Trim(splits[0], " ")
- val = strings.Trim(splits[1], ` "'`)
- return
-}
-
-// helper function to trim comments and whitespace from a string.
-func removeComments(s string) (_ string) {
- if len(s) == 0 || string(s[0]) == "#" {
- return
- } else {
- index := strings.Index(s, " #")
- if index > -1 {
- s = strings.TrimSpace(s[0:index])
- }
- }
- return s
-}
diff --git a/shared/server/server.go b/shared/server/server.go
deleted file mode 100644
index 9c3fafe8..00000000
--- a/shared/server/server.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package server
-
-import (
- "net/http"
-
- log "github.com/Sirupsen/logrus"
- "github.com/drone/drone/shared/envconfig"
-)
-
-type Server struct {
- Addr string
- Cert string
- Key string
-}
-
-func Load(env envconfig.Env) *Server {
- return &Server{
- Addr: env.String("SERVER_ADDR", ":8000"),
- Cert: env.String("SERVER_CERT", ""),
- Key: env.String("SERVER_KEY", ""),
- }
-}
-
-func (s *Server) Run(handler http.Handler) {
- log.Infof("starting server %s", s.Addr)
-
- if len(s.Cert) != 0 {
- log.Fatal(
- http.ListenAndServeTLS(s.Addr, s.Cert, s.Key, handler),
- )
- } else {
- log.Fatal(
- http.ListenAndServe(s.Addr, handler),
- )
- }
-}
diff --git a/store/datastore/store.go b/store/datastore/store.go
index 4619c082..18169d6a 100644
--- a/store/datastore/store.go
+++ b/store/datastore/store.go
@@ -5,7 +5,6 @@ import (
"os"
"time"
- "github.com/drone/drone/shared/envconfig"
"github.com/drone/drone/store"
"github.com/drone/drone/store/datastore/ddl"
_ "github.com/go-sql-driver/mysql"
@@ -23,20 +22,6 @@ type datastore struct {
*sql.DB
}
-// Load opens a new database connection with the specified driver
-// and connection string specified in the environment variables.
-func Load(env envconfig.Env) store.Store {
- var (
- driver = env.String("DATABASE_DRIVER", "sqlite3")
- config = env.String("DATABASE_CONFIG", "drone.sqlite")
- )
-
- logrus.Infof("using database driver %s", driver)
- logrus.Infof("using database config %s", config)
-
- return New(driver, config)
-}
-
// New creates a database connection for the given driver and datasource
// and returns a new Store.
func New(driver, config string) store.Store {
diff --git a/stream/context.go b/stream/context.go
new file mode 100644
index 00000000..9b89accf
--- /dev/null
+++ b/stream/context.go
@@ -0,0 +1,21 @@
+package stream
+
+import "golang.org/x/net/context"
+
+const key = "stream"
+
+// Setter defines a context that enables setting values.
+type Setter interface {
+ Set(string, interface{})
+}
+
+// FromContext returns the Mux associated with this context.
+func FromContext(c context.Context) Mux {
+ return c.Value(key).(Mux)
+}
+
+// ToContext adds the Mux to this context if it supports
+// the Setter interface.
+func ToContext(c Setter, m Mux) {
+ c.Set(key, m)
+}
diff --git a/stream/stream.go b/stream/stream.go
new file mode 100644
index 00000000..787ea48e
--- /dev/null
+++ b/stream/stream.go
@@ -0,0 +1,73 @@
+package stream
+
+//go:generate mockery -name Mux -output mock -case=underscore
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+
+ "golang.org/x/net/context"
+)
+
+// Mux defines a stream multiplexer
+type Mux interface {
+ // Create creates and returns a new stream identified by
+ // the specified key.
+ Create(key string) (io.ReadCloser, io.WriteCloser, error)
+
+ // Open returns the existing stream by key. If the stream
+ // does not exist an error is returned.
+ Open(key string) (io.ReadCloser, io.WriteCloser, error)
+
+ // Remove deletes the stream by key.
+ Remove(key string) error
+
+ // Exists return true if the stream exists.
+ Exists(key string) bool
+}
+
+// Create creates and returns a new stream identified
+// by the specified key.
+func Create(c context.Context, key string) (io.ReadCloser, io.WriteCloser, error) {
+ return FromContext(c).Create(key)
+}
+
+// Open returns the existing stream by key. If the stream does
+// not exist an error is returned.
+func Open(c context.Context, key string) (io.ReadCloser, io.WriteCloser, error) {
+ return FromContext(c).Open(key)
+}
+
+// Exists return true if the stream exists.
+func Exists(c context.Context, key string) bool {
+ return FromContext(c).Exists(key)
+}
+
+// Remove deletes the stream by key.
+func Remove(c context.Context, key string) error {
+ return FromContext(c).Remove(key)
+}
+
+// ToKey is a helper function that converts a unique identifier
+// of type int64 into a string.
+func ToKey(i int64) string {
+ return strconv.FormatInt(i, 10)
+}
+
+// Copy copies the stream from the source to the destination in
+// valid JSON format. This converts the logs, which are per-line
+// JSON objects, to a JSON array.
+func Copy(dest io.Writer, src io.Reader) error {
+ io.WriteString(dest, "[")
+
+ scanner := bufio.NewScanner(src)
+ for scanner.Scan() {
+ io.WriteString(dest, scanner.Text())
+ io.WriteString(dest, ",\n")
+ }
+
+ io.WriteString(dest, "{}]")
+
+ return nil
+}
diff --git a/stream/stream_impl.go b/stream/stream_impl.go
new file mode 100644
index 00000000..a25a3b15
--- /dev/null
+++ b/stream/stream_impl.go
@@ -0,0 +1,96 @@
+package stream
+
+import (
+ "io"
+ "sync"
+
+ "github.com/djherbis/fscache"
+)
+
+var noexp fscache.Reaper
+
+// New creates a new Mux using an in-memory filesystem.
+func New() Mux {
+ fs := fscache.NewMemFs()
+ c, err := fscache.NewCache(fs, noexp)
+ if err != nil {
+ panic(err)
+ }
+ return &mux{c}
+}
+
+// New creates a new Mux using a persistent filesystem.
+func NewFileSystem(path string) Mux {
+ fs, err := fscache.NewFs(path, 0777)
+ if err != nil {
+ panic(err)
+ }
+ c, err := fscache.NewCache(fs, noexp)
+ if err != nil {
+ panic(err)
+ }
+ return &mux{c}
+}
+
+// mux wraps the default fscache.Cache to match the
+// defined interface and to wrap the ReadCloser and
+// WriteCloser to avoid panics when we over-aggressively
+// close streams.
+type mux struct {
+ cache fscache.Cache
+}
+
+func (m *mux) Create(key string) (io.ReadCloser, io.WriteCloser, error) {
+ rc, wc, err := m.cache.Get(key)
+ if rc != nil {
+ rc = &closeOnceReader{ReaderAt: rc, ReadCloser: rc}
+ }
+ if wc != nil {
+ wc = &closeOnceWriter{WriteCloser: wc}
+ }
+ return rc, wc, err
+}
+
+func (m *mux) Open(key string) (io.ReadCloser, io.WriteCloser, error) {
+ return m.Create(key)
+}
+
+func (m *mux) Exists(key string) bool {
+ return m.cache.Exists(key)
+}
+func (m *mux) Remove(key string) error {
+ return m.cache.Remove(key)
+}
+
+// closeOnceReader is a helper function that ensures
+// the reader is only closed once. This is because
+// attempting to close the fscache reader more than
+// once results in a panic.
+type closeOnceReader struct {
+ io.ReaderAt
+ io.ReadCloser
+ once sync.Once
+}
+
+func (c *closeOnceReader) Close() error {
+ c.once.Do(func() {
+ c.ReadCloser.Close()
+ })
+ return nil
+}
+
+// closeOnceWriter is a helper function that ensures
+// the writer is only closed once. This is because
+// attempting to close the fscache writer more than
+// once results in a panic.
+type closeOnceWriter struct {
+ io.WriteCloser
+ once sync.Once
+}
+
+func (c *closeOnceWriter) Close() error {
+ c.once.Do(func() {
+ c.WriteCloser.Close()
+ })
+ return nil
+}
diff --git a/stream/stream_impl_test.go b/stream/stream_impl_test.go
new file mode 100644
index 00000000..11541cc2
--- /dev/null
+++ b/stream/stream_impl_test.go
@@ -0,0 +1 @@
+package stream
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 00000000..f2c2bc21
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,66 @@
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
index ef6501f1..6e1721a7 100644
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -1,4 +1,4 @@
-# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![godoc reference](https://godoc.org/github.com/Sirupsen/logrus?status.png)][godoc]
+# Logrus [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
@@ -12,7 +12,7 @@ plain text):
![Colored](http://i.imgur.com/PY7qMwd.png)
-With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
@@ -32,16 +32,18 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
-With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
-time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
-time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
-time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
-time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
-time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
```
#### Example
@@ -73,17 +75,12 @@ package main
import (
"os"
log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(&logrus_airbrake.AirbrakeHook{})
-
// Output to stderr instead of stdout, could also be a file.
log.SetOutput(os.Stderr)
@@ -106,6 +103,16 @@ func main() {
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
}
```
@@ -164,54 +171,22 @@ You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
-```go
-// Not the real implementation of the Airbrake hook. Just a simple sample.
-import (
- log "github.com/Sirupsen/logrus"
-)
-
-func init() {
- log.AddHook(new(AirbrakeHook))
-}
-
-type AirbrakeHook struct{}
-
-// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
-// the fields for the entry. See the Fields section of the README.
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
- err := airbrake.Notify(entry.Data["error"].(error))
- if err != nil {
- log.WithFields(log.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- }).Info("Failed to send error to Airbrake")
- }
-
- return nil
-}
-
-// `Levels()` returns a slice of `Levels` the hook is fired for.
-func (hook *AirbrakeHook) Levels() []log.Level {
- return []log.Level{
- log.ErrorLevel,
- log.FatalLevel,
- log.PanicLevel,
- }
-}
-```
-
-Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
```go
import (
log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
- "github.com/Sirupsen/logrus/hooks/syslog"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
- log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
@@ -221,26 +196,37 @@ func init() {
}
}
```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
- Send errors to an exception tracking service compatible with the Airbrake API.
- Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
-* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
- Send errors to the Papertrail hosted logging service via UDP.
-
-* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
- Send errors to remote syslog server.
- Uses standard library `log/syslog` behind the scenes.
-
-* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
- Send errors to a channel in hipchat.
-
-* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
- Send logs to Loggly (https://www.loggly.com/)
-
-* [`github.com/johntdyer/slackrus`](https://github.com/johntdyer/slackrus)
- Hook for Slack chat.
#### Level logging
@@ -296,10 +282,10 @@ init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
- log.SetFormatter(logrus.JSONFormatter)
+ log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(logrus.TextFormatter)
+ log.SetFormatter(&log.TextFormatter{})
}
}
```
@@ -318,10 +304,16 @@ The built-in logging formatters are:
field to `true`. To force no colored output even if there is a TTY set the
`DisableColors` field to `true`
* `logrus.JSONFormatter`. Logs fields as JSON.
+* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
+
+ ```go
+ logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
+ ```
Third party logging formatters:
-* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -334,7 +326,7 @@ type MyJSONFormatter struct {
log.SetFormatter(new(MyJSONFormatter))
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
@@ -348,7 +340,7 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
#### Logger as an `io.Writer`
-Logrus can be transormed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
```go
w := logger.Writer()
@@ -370,5 +362,27 @@ Log rotation is not provided with Logrus. Log rotation should be done by an
external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
+#### Tools
-[godoc]: https://godoc.org/github.com/Sirupsen/logrus
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+logger, hook := NewNullLogger()
+logger.Error("Hello error")
+
+assert.Equal(1, len(hook.Entries))
+assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
+assert.Equal("Hello error", hook.LastEntry().Message)
+
+hook.Reset()
+assert.Nil(hook.LastEntry())
+```
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 00000000..dddd5f87
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/Sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/Sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
index 17fe6f70..89e966e7 100644
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -8,6 +8,9 @@ import (
"time"
)
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
@@ -53,6 +56,11 @@ func (entry *Entry) String() (string, error) {
return reader.String(), err
}
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
+}
+
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
@@ -60,7 +68,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
- data := Fields{}
+ data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
@@ -70,12 +78,14 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
return &Entry{Logger: entry.Logger, Data: data}
}
-func (entry *Entry) log(level Level, msg string) {
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
- if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
@@ -100,7 +110,7 @@ func (entry *Entry) log(level Level, msg string) {
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
- panic(entry)
+ panic(&entry)
}
}
@@ -188,6 +198,7 @@ func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
+ os.Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
@@ -234,6 +245,7 @@ func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
+ os.Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
index fd092fc7..9a0120ac 100644
--- a/vendor/github.com/Sirupsen/logrus/exported.go
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -36,6 +36,8 @@ func SetLevel(level Level) {
// GetLevel returns the standard logger level.
func GetLevel() Level {
+ std.mu.Lock()
+ defer std.mu.Unlock()
return std.Level
}
@@ -46,6 +48,11 @@ func AddHook(hook Hook) {
std.Hooks.Add(hook)
}
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
index 038ce9fd..104d689f 100644
--- a/vendor/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -1,5 +1,9 @@
package logrus
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
index 0da2b365..3f151cdc 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -11,11 +11,11 @@ type Hook interface {
}
// Internal type for storing the hooks on a logger instance.
-type levelHooks map[Level][]Hook
+type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks levelHooks) Add(hook Hook) {
+func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
@@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) {
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
-func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
index b09227c2..2ad6dc5c 100644
--- a/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -3,18 +3,33 @@ package logrus
import (
"encoding/json"
"fmt"
- "time"
)
-type JSONFormatter struct{}
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
- data[k] = v
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/Sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
}
prefixFieldClashes(data)
- data["time"] = entry.Time.Format(time.RFC3339)
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ data["time"] = entry.Time.Format(timestampFormat)
data["msg"] = entry.Message
data["level"] = entry.Level.String()
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
index b392e547..2fdb2317 100644
--- a/vendor/github.com/Sirupsen/logrus/logger.go
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -8,13 +8,13 @@ import (
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stdout`. You can also set this to
+ // file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
- Hooks levelHooks
+ Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
@@ -37,23 +37,23 @@ type Logger struct {
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
-// Hooks: make(levelHooks),
+// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
- Out: os.Stdout,
+ Out: os.Stderr,
Formatter: new(TextFormatter),
- Hooks: make(levelHooks),
+ Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
// Adds a field to the log entry, note that you it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
-// Ff you want multiple fields, use `WithFields`.
+// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
return NewEntry(logger).WithField(key, value)
}
@@ -64,12 +64,22 @@ func (logger *Logger) WithFields(fields Fields) *Entry {
return NewEntry(logger).WithFields(fields)
}
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ return NewEntry(logger).WithError(err)
+}
+
func (logger *Logger) Debugf(format string, args ...interface{}) {
- NewEntry(logger).Debugf(format, args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugf(format, args...)
+ }
}
func (logger *Logger) Infof(format string, args ...interface{}) {
- NewEntry(logger).Infof(format, args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infof(format, args...)
+ }
}
func (logger *Logger) Printf(format string, args ...interface{}) {
@@ -77,31 +87,46 @@ func (logger *Logger) Printf(format string, args ...interface{}) {
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnf(format, args...)
+ }
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- NewEntry(logger).Errorf(format, args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorf(format, args...)
+ }
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- NewEntry(logger).Fatalf(format, args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalf(format, args...)
+ }
+ os.Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- NewEntry(logger).Panicf(format, args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicf(format, args...)
+ }
}
func (logger *Logger) Debug(args ...interface{}) {
- NewEntry(logger).Debug(args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debug(args...)
+ }
}
func (logger *Logger) Info(args ...interface{}) {
- NewEntry(logger).Info(args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Info(args...)
+ }
}
func (logger *Logger) Print(args ...interface{}) {
@@ -109,31 +134,46 @@ func (logger *Logger) Print(args ...interface{}) {
}
func (logger *Logger) Warn(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
}
func (logger *Logger) Warning(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warn(args...)
+ }
}
func (logger *Logger) Error(args ...interface{}) {
- NewEntry(logger).Error(args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Error(args...)
+ }
}
func (logger *Logger) Fatal(args ...interface{}) {
- NewEntry(logger).Fatal(args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatal(args...)
+ }
+ os.Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
- NewEntry(logger).Panic(args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panic(args...)
+ }
}
func (logger *Logger) Debugln(args ...interface{}) {
- NewEntry(logger).Debugln(args...)
+ if logger.Level >= DebugLevel {
+ NewEntry(logger).Debugln(args...)
+ }
}
func (logger *Logger) Infoln(args ...interface{}) {
- NewEntry(logger).Infoln(args...)
+ if logger.Level >= InfoLevel {
+ NewEntry(logger).Infoln(args...)
+ }
}
func (logger *Logger) Println(args ...interface{}) {
@@ -141,21 +181,32 @@ func (logger *Logger) Println(args ...interface{}) {
}
func (logger *Logger) Warnln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
}
func (logger *Logger) Warningln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.Level >= WarnLevel {
+ NewEntry(logger).Warnln(args...)
+ }
}
func (logger *Logger) Errorln(args ...interface{}) {
- NewEntry(logger).Errorln(args...)
+ if logger.Level >= ErrorLevel {
+ NewEntry(logger).Errorln(args...)
+ }
}
func (logger *Logger) Fatalln(args ...interface{}) {
- NewEntry(logger).Fatalln(args...)
+ if logger.Level >= FatalLevel {
+ NewEntry(logger).Fatalln(args...)
+ }
+ os.Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
- NewEntry(logger).Panicln(args...)
+ if logger.Level >= PanicLevel {
+ NewEntry(logger).Panicln(args...)
+ }
}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
index 43ee12e9..e5966911 100644
--- a/vendor/github.com/Sirupsen/logrus/logrus.go
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -3,6 +3,7 @@ package logrus
import (
"fmt"
"log"
+ "strings"
)
// Fields type, used to pass to `WithFields`.
@@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
- switch lvl {
+ switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
@@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
@@ -74,7 +85,11 @@ const (
)
// Won't compile if StdLogger can't be realized by a log.Logger
-var _ StdLogger = &log.Logger{}
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
@@ -92,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{})
Panicln(...interface{})
}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
similarity index 68%
rename from vendor/github.com/Sirupsen/logrus/terminal_openbsd.go
rename to vendor/github.com/Sirupsen/logrus/terminal_bsd.go
index d238bfa0..71f8d67a 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_openbsd.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -1,3 +1,4 @@
+// +build darwin freebsd openbsd netbsd dragonfly
package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
deleted file mode 100644
index 8fe02a4a..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
deleted file mode 100644
index 0428ee5d..00000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
-*/
-package logrus
-
-import (
- "syscall"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios struct {
- Iflag uint32
- Oflag uint32
- Cflag uint32
- Lflag uint32
- Cc [20]uint8
- Ispeed uint32
- Ospeed uint32
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
index b8bebc13..b343b3a3 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -3,7 +3,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build linux darwin freebsd openbsd
+// +build linux darwin freebsd openbsd netbsd dragonfly
package logrus
@@ -12,9 +12,9 @@ import (
"unsafe"
)
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
- fd := syscall.Stdout
+ fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 00000000..3e70bf7b
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,15 @@
+// +build solaris
+
+package logrus
+
+import (
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal() bool {
+ _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
+ return err == nil
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
index 2e09f6f7..0146845d 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -18,9 +18,9 @@ var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
-// IsTerminal returns true if the given file descriptor is a terminal.
+// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
- fd := syscall.Stdout
+ fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
index 4e517341..06ef2023 100644
--- a/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -3,7 +3,7 @@ package logrus
import (
"bytes"
"fmt"
- "regexp"
+ "runtime"
"sort"
"strings"
"time"
@@ -15,12 +15,12 @@ const (
green = 32
yellow = 33
blue = 34
+ gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
- noQuoteNeeded *regexp.Regexp
)
func init() {
@@ -34,35 +34,59 @@ func miniTS() int {
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
- ForceColors bool
+ ForceColors bool
+
+ // Force disabling colors.
DisableColors bool
- // Set to true to disable timestamp logging (useful when the output
- // is redirected to a logging system already adding a timestamp)
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
-
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
- sort.Strings(keys)
+
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
b := &bytes.Buffer{}
prefixFieldClashes(entry.Data)
- isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+ isColorTerminal := isTerminal && (runtime.GOOS != "windows")
+ isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
if isColored {
- printColored(b, entry, keys)
+ f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
- f.appendKeyValue(b, "msg", entry.Message)
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
@@ -72,9 +96,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
-func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
@@ -85,10 +111,14 @@ func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
levelText := strings.ToUpper(entry.Level.String())[0:4]
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
for _, k := range keys {
v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
}
}
@@ -96,7 +126,7 @@ func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch < '9') ||
+ (ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return false
}
@@ -104,21 +134,28 @@ func needsQuoting(text string) bool {
return true
}
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
- switch value.(type) {
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+
+ switch value := value.(type) {
case string:
- if needsQuoting(value.(string)) {
- fmt.Fprintf(b, "%v=%s ", key, value)
+ if needsQuoting(value) {
+ b.WriteString(value)
} else {
- fmt.Fprintf(b, "%v=%q ", key, value)
+ fmt.Fprintf(b, "%q", value)
}
case error:
- if needsQuoting(value.(error).Error()) {
- fmt.Fprintf(b, "%v=%s ", key, value)
+ errmsg := value.Error()
+ if needsQuoting(errmsg) {
+ b.WriteString(errmsg)
} else {
- fmt.Fprintf(b, "%v=%q ", key, value)
+ fmt.Fprintf(b, "%q", value)
}
default:
- fmt.Fprintf(b, "%v=%v ", key, value)
+ fmt.Fprint(b, value)
}
+
+ b.WriteByte(' ')
}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
index 90d3e01b..1e30b1c7 100644
--- a/vendor/github.com/Sirupsen/logrus/writer.go
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -6,7 +6,7 @@ import (
"runtime"
)
-func (logger *Logger) Writer() (*io.PipeWriter) {
+func (logger *Logger) Writer() *io.PipeWriter {
reader, writer := io.Pipe()
go logger.writerScanner(reader)
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
new file mode 100644
index 00000000..d9a10c0d
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/LICENSE
@@ -0,0 +1,176 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md b/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
new file mode 100644
index 00000000..266c28c7
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/README.md
@@ -0,0 +1,57 @@
+[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
+
+candiedyaml
+===========
+
+YAML for Go
+
+A YAML 1.1 parser with support for YAML 1.2 features
+
+Usage
+-----
+
+```go
+package myApp
+
+import (
+ "github.com/cloudfoundry-incubator/candiedyaml"
+ "fmt"
+ "os"
+)
+
+func main() {
+ file, err := os.Open("path/to/some/file.yml")
+ if err != nil {
+ println("File does not exist:", err.Error())
+ os.Exit(1)
+ }
+ defer file.Close()
+
+ document := new(interface{})
+ decoder := candiedyaml.NewDecoder(file)
+ err = decoder.Decode(document)
+
+ if err != nil {
+ println("Failed to decode document:", err.Error())
+ }
+
+ println("parsed yml into interface:", fmt.Sprintf("%#v", document))
+
+ fileToWrite, err := os.Create("path/to/some/new/file.yml")
+ if err != nil {
+ println("Failed to open file for writing:", err.Error())
+ os.Exit(1)
+ }
+ defer fileToWrite.Close()
+
+ encoder := candiedyaml.NewEncoder(fileToWrite)
+ err = encoder.Encode(document)
+
+ if err != nil {
+ println("Failed to encode document:", err.Error())
+ os.Exit(1)
+ }
+
+ return
+}
+```
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
new file mode 100644
index 00000000..87c1043e
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/api.go
@@ -0,0 +1,834 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "io"
+)
+
+/*
+ * Create a new parser object.
+ */
+
+func yaml_parser_initialize(parser *yaml_parser_t) bool {
+ *parser = yaml_parser_t{
+ raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
+ buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
+ }
+
+ return true
+}
+
+/*
+ * Destroy a parser object.
+ */
+func yaml_parser_delete(parser *yaml_parser_t) {
+ *parser = yaml_parser_t{}
+}
+
+/*
+ * String read handler.
+ */
+
+func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
+ if parser.input_pos == len(parser.input) {
+ return 0, io.EOF
+ }
+
+ n := copy(buffer, parser.input[parser.input_pos:])
+ parser.input_pos += n
+ return n, nil
+}
+
+/*
+ * File read handler.
+ */
+
+func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
+ return parser.input_reader.Read(buffer)
+}
+
+/*
+ * Set a string input.
+ */
+
+func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
+ if parser.read_handler != nil {
+ panic("input already set")
+ }
+
+ parser.read_handler = yaml_string_read_handler
+
+ parser.input = input
+ parser.input_pos = 0
+}
+
+/*
+ * Set a reader input
+ */
+func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
+ if parser.read_handler != nil {
+ panic("input already set")
+ }
+
+ parser.read_handler = yaml_file_read_handler
+ parser.input_reader = reader
+}
+
+/*
+ * Set a generic input.
+ */
+
+func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
+ if parser.read_handler != nil {
+ panic("input already set")
+ }
+
+ parser.read_handler = handler
+}
+
+/*
+ * Set the source encoding.
+ */
+
+func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
+ if parser.encoding != yaml_ANY_ENCODING {
+ panic("encoding already set")
+ }
+
+ parser.encoding = encoding
+}
+
+/*
+ * Create a new emitter object.
+ */
+
+func yaml_emitter_initialize(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{
+ buffer: make([]byte, OUTPUT_BUFFER_SIZE),
+ raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
+ states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
+ events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
+ }
+}
+
+func yaml_emitter_delete(emitter *yaml_emitter_t) {
+ *emitter = yaml_emitter_t{}
+}
+
+/*
+ * String write handler.
+ */
+
+func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
+ return nil
+}
+
+/*
+ * File write handler.
+ */
+
+func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
+ _, err := emitter.output_writer.Write(buffer)
+ return err
+}
+
+/*
+ * Set a string output.
+ */
+
+func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
+ if emitter.write_handler != nil {
+ panic("output already set")
+ }
+
+ emitter.write_handler = yaml_string_write_handler
+ emitter.output_buffer = buffer
+}
+
+/*
+ * Set a file output.
+ */
+
+func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
+ if emitter.write_handler != nil {
+ panic("output already set")
+ }
+
+ emitter.write_handler = yaml_writer_write_handler
+ emitter.output_writer = w
+}
+
+/*
+ * Set a generic output handler.
+ */
+
+func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
+ if emitter.write_handler != nil {
+ panic("output already set")
+ }
+
+ emitter.write_handler = handler
+}
+
+/*
+ * Set the output encoding.
+ */
+
+func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
+ if emitter.encoding != yaml_ANY_ENCODING {
+ panic("encoding already set")
+ }
+
+ emitter.encoding = encoding
+}
+
+/*
+ * Set the canonical output style.
+ */
+
+func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
+ emitter.canonical = canonical
+}
+
+/*
+ * Set the indentation increment.
+ */
+
+func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
+ if indent < 2 || indent > 9 {
+ indent = 2
+ }
+ emitter.best_indent = indent
+}
+
+/*
+ * Set the preferred line width.
+ */
+
+func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
+ if width < 0 {
+ width = -1
+ }
+ emitter.best_width = width
+}
+
+/*
+ * Set if unescaped non-ASCII characters are allowed.
+ */
+
+func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
+ emitter.unicode = unicode
+}
+
+/*
+ * Set the preferred line break character.
+ */
+
+func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
+ emitter.line_break = line_break
+}
+
+/*
+ * Destroy a token object.
+ */
+
+// yaml_DECLARE(void)
+// yaml_token_delete(yaml_token_t *token)
+// {
+// assert(token); /* Non-NULL token object expected. */
+//
+// switch (token.type)
+// {
+// case yaml_TAG_DIRECTIVE_TOKEN:
+// yaml_free(token.data.tag_directive.handle);
+// yaml_free(token.data.tag_directive.prefix);
+// break;
+//
+// case yaml_ALIAS_TOKEN:
+// yaml_free(token.data.alias.value);
+// break;
+//
+// case yaml_ANCHOR_TOKEN:
+// yaml_free(token.data.anchor.value);
+// break;
+//
+// case yaml_TAG_TOKEN:
+// yaml_free(token.data.tag.handle);
+// yaml_free(token.data.tag.suffix);
+// break;
+//
+// case yaml_SCALAR_TOKEN:
+// yaml_free(token.data.scalar.value);
+// break;
+//
+// default:
+// break;
+// }
+//
+// memset(token, 0, sizeof(yaml_token_t));
+// }
+
+/*
+ * Check if a string is a valid UTF-8 sequence.
+ *
+ * Check 'reader.c' for more details on UTF-8 encoding.
+ */
+
+// static int
+// yaml_check_utf8(yaml_char_t *start, size_t length)
+// {
+// yaml_char_t *end = start+length;
+// yaml_char_t *pointer = start;
+//
+// while (pointer < end) {
+// unsigned char octet;
+// unsigned int width;
+// unsigned int value;
+// size_t k;
+//
+// octet = pointer[0];
+// width = (octet & 0x80) == 0x00 ? 1 :
+// (octet & 0xE0) == 0xC0 ? 2 :
+// (octet & 0xF0) == 0xE0 ? 3 :
+// (octet & 0xF8) == 0xF0 ? 4 : 0;
+// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
+// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
+// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
+// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
+// if (!width) return 0;
+// if (pointer+width > end) return 0;
+// for (k = 1; k < width; k ++) {
+// octet = pointer[k];
+// if ((octet & 0xC0) != 0x80) return 0;
+// value = (value << 6) + (octet & 0x3F);
+// }
+// if (!((width == 1) ||
+// (width == 2 && value >= 0x80) ||
+// (width == 3 && value >= 0x800) ||
+// (width == 4 && value >= 0x10000))) return 0;
+//
+// pointer += width;
+// }
+//
+// return 1;
+// }
+
+/*
+ * Create STREAM-START.
+ */
+
+func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
+ *event = yaml_event_t{
+ event_type: yaml_STREAM_START_EVENT,
+ encoding: encoding,
+ }
+}
+
+/*
+ * Create STREAM-END.
+ */
+
+func yaml_stream_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ event_type: yaml_STREAM_END_EVENT,
+ }
+}
+
+/*
+ * Create DOCUMENT-START.
+ */
+
+func yaml_document_start_event_initialize(event *yaml_event_t,
+ version_directive *yaml_version_directive_t,
+ tag_directives []yaml_tag_directive_t,
+ implicit bool) {
+ *event = yaml_event_t{
+ event_type: yaml_DOCUMENT_START_EVENT,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: implicit,
+ }
+}
+
+/*
+ * Create DOCUMENT-END.
+ */
+
+func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
+ *event = yaml_event_t{
+ event_type: yaml_DOCUMENT_END_EVENT,
+ implicit: implicit,
+ }
+}
+
+/*
+ * Create ALIAS.
+ */
+
+func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
+ *event = yaml_event_t{
+ event_type: yaml_ALIAS_EVENT,
+ anchor: anchor,
+ }
+}
+
+/*
+ * Create SCALAR.
+ */
+
+func yaml_scalar_event_initialize(event *yaml_event_t,
+ anchor []byte, tag []byte,
+ value []byte,
+ plain_implicit bool, quoted_implicit bool,
+ style yaml_scalar_style_t) {
+
+ *event = yaml_event_t{
+ event_type: yaml_SCALAR_EVENT,
+ anchor: anchor,
+ tag: tag,
+ value: value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+/*
+ * Create SEQUENCE-START.
+ */
+
+func yaml_sequence_start_event_initialize(event *yaml_event_t,
+ anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+/*
+ * Create SEQUENCE-END.
+ */
+
+func yaml_sequence_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_END_EVENT,
+ }
+}
+
+/*
+ * Create MAPPING-START.
+ */
+
+func yaml_mapping_start_event_initialize(event *yaml_event_t,
+ anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_START_EVENT,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(style),
+ }
+}
+
+/*
+ * Create MAPPING-END.
+ */
+
+func yaml_mapping_end_event_initialize(event *yaml_event_t) {
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_END_EVENT,
+ }
+}
+
+/*
+ * Destroy an event object.
+ */
+
+func yaml_event_delete(event *yaml_event_t) {
+ *event = yaml_event_t{}
+}
+
+// /*
+// * Create a document object.
+// */
+//
+// func yaml_document_initialize(document *yaml_document_t,
+// version_directive *yaml_version_directive_t,
+// tag_directives []yaml_tag_directive_t,
+// start_implicit, end_implicit bool) bool {
+//
+//
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+// struct {
+// yaml_node_t *start;
+// yaml_node_t *end;
+// yaml_node_t *top;
+// } nodes = { NULL, NULL, NULL };
+// yaml_version_directive_t *version_directive_copy = NULL;
+// struct {
+// yaml_tag_directive_t *start;
+// yaml_tag_directive_t *end;
+// yaml_tag_directive_t *top;
+// } tag_directives_copy = { NULL, NULL, NULL };
+// yaml_tag_directive_t value = { NULL, NULL };
+// YAML_mark_t mark = { 0, 0, 0 };
+//
+// assert(document); /* Non-NULL document object is expected. */
+// assert((tag_directives_start && tag_directives_end) ||
+// (tag_directives_start == tag_directives_end));
+// /* Valid tag directives are expected. */
+//
+// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
+//
+// if (version_directive) {
+// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
+// if (!version_directive_copy) goto error;
+// version_directive_copy.major = version_directive.major;
+// version_directive_copy.minor = version_directive.minor;
+// }
+//
+// if (tag_directives_start != tag_directives_end) {
+// yaml_tag_directive_t *tag_directive;
+// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
+// goto error;
+// for (tag_directive = tag_directives_start;
+// tag_directive != tag_directives_end; tag_directive ++) {
+// assert(tag_directive.handle);
+// assert(tag_directive.prefix);
+// if (!yaml_check_utf8(tag_directive.handle,
+// strlen((char *)tag_directive.handle)))
+// goto error;
+// if (!yaml_check_utf8(tag_directive.prefix,
+// strlen((char *)tag_directive.prefix)))
+// goto error;
+// value.handle = yaml_strdup(tag_directive.handle);
+// value.prefix = yaml_strdup(tag_directive.prefix);
+// if (!value.handle || !value.prefix) goto error;
+// if (!PUSH(&context, tag_directives_copy, value))
+// goto error;
+// value.handle = NULL;
+// value.prefix = NULL;
+// }
+// }
+//
+// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
+// tag_directives_copy.start, tag_directives_copy.top,
+// start_implicit, end_implicit, mark, mark);
+//
+// return 1;
+//
+// error:
+// STACK_DEL(&context, nodes);
+// yaml_free(version_directive_copy);
+// while (!STACK_EMPTY(&context, tag_directives_copy)) {
+// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
+// yaml_free(value.handle);
+// yaml_free(value.prefix);
+// }
+// STACK_DEL(&context, tag_directives_copy);
+// yaml_free(value.handle);
+// yaml_free(value.prefix);
+//
+// return 0;
+// }
+//
+// /*
+// * Destroy a document object.
+// */
+//
+// yaml_DECLARE(void)
+// yaml_document_delete(document *yaml_document_t)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+// yaml_tag_directive_t *tag_directive;
+//
+// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
+//
+// assert(document); /* Non-NULL document object is expected. */
+//
+// while (!STACK_EMPTY(&context, document.nodes)) {
+// yaml_node_t node = POP(&context, document.nodes);
+// yaml_free(node.tag);
+// switch (node.type) {
+// case yaml_SCALAR_NODE:
+// yaml_free(node.data.scalar.value);
+// break;
+// case yaml_SEQUENCE_NODE:
+// STACK_DEL(&context, node.data.sequence.items);
+// break;
+// case yaml_MAPPING_NODE:
+// STACK_DEL(&context, node.data.mapping.pairs);
+// break;
+// default:
+// assert(0); /* Should not happen. */
+// }
+// }
+// STACK_DEL(&context, document.nodes);
+//
+// yaml_free(document.version_directive);
+// for (tag_directive = document.tag_directives.start;
+// tag_directive != document.tag_directives.end;
+// tag_directive++) {
+// yaml_free(tag_directive.handle);
+// yaml_free(tag_directive.prefix);
+// }
+// yaml_free(document.tag_directives.start);
+//
+// memset(document, 0, sizeof(yaml_document_t));
+// }
+//
+// /**
+// * Get a document node.
+// */
+//
+// yaml_DECLARE(yaml_node_t *)
+// yaml_document_get_node(document *yaml_document_t, int index)
+// {
+// assert(document); /* Non-NULL document object is expected. */
+//
+// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
+// return document.nodes.start + index - 1;
+// }
+// return NULL;
+// }
+//
+// /**
+// * Get the root object.
+// */
+//
+// yaml_DECLARE(yaml_node_t *)
+// yaml_document_get_root_node(document *yaml_document_t)
+// {
+// assert(document); /* Non-NULL document object is expected. */
+//
+// if (document.nodes.top != document.nodes.start) {
+// return document.nodes.start;
+// }
+// return NULL;
+// }
+//
+// /*
+// * Add a scalar node to a document.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_document_add_scalar(document *yaml_document_t,
+// yaml_char_t *tag, yaml_char_t *value, int length,
+// yaml_scalar_style_t style)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+// YAML_mark_t mark = { 0, 0, 0 };
+// yaml_char_t *tag_copy = NULL;
+// yaml_char_t *value_copy = NULL;
+// yaml_node_t node;
+//
+// assert(document); /* Non-NULL document object is expected. */
+// assert(value); /* Non-NULL value is expected. */
+//
+// if (!tag) {
+// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
+// tag_copy = yaml_strdup(tag);
+// if (!tag_copy) goto error;
+//
+// if (length < 0) {
+// length = strlen((char *)value);
+// }
+//
+// if (!yaml_check_utf8(value, length)) goto error;
+// value_copy = yaml_malloc(length+1);
+// if (!value_copy) goto error;
+// memcpy(value_copy, value, length);
+// value_copy[length] = '\0';
+//
+// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
+// if (!PUSH(&context, document.nodes, node)) goto error;
+//
+// return document.nodes.top - document.nodes.start;
+//
+// error:
+// yaml_free(tag_copy);
+// yaml_free(value_copy);
+//
+// return 0;
+// }
+//
+// /*
+// * Add a sequence node to a document.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_document_add_sequence(document *yaml_document_t,
+// yaml_char_t *tag, yaml_sequence_style_t style)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+// YAML_mark_t mark = { 0, 0, 0 };
+// yaml_char_t *tag_copy = NULL;
+// struct {
+// yaml_node_item_t *start;
+// yaml_node_item_t *end;
+// yaml_node_item_t *top;
+// } items = { NULL, NULL, NULL };
+// yaml_node_t node;
+//
+// assert(document); /* Non-NULL document object is expected. */
+//
+// if (!tag) {
+// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
+// tag_copy = yaml_strdup(tag);
+// if (!tag_copy) goto error;
+//
+// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
+//
+// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
+// style, mark, mark);
+// if (!PUSH(&context, document.nodes, node)) goto error;
+//
+// return document.nodes.top - document.nodes.start;
+//
+// error:
+// STACK_DEL(&context, items);
+// yaml_free(tag_copy);
+//
+// return 0;
+// }
+//
+// /*
+// * Add a mapping node to a document.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_document_add_mapping(document *yaml_document_t,
+// yaml_char_t *tag, yaml_mapping_style_t style)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+// YAML_mark_t mark = { 0, 0, 0 };
+// yaml_char_t *tag_copy = NULL;
+// struct {
+// yaml_node_pair_t *start;
+// yaml_node_pair_t *end;
+// yaml_node_pair_t *top;
+// } pairs = { NULL, NULL, NULL };
+// yaml_node_t node;
+//
+// assert(document); /* Non-NULL document object is expected. */
+//
+// if (!tag) {
+// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
+// }
+//
+// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
+// tag_copy = yaml_strdup(tag);
+// if (!tag_copy) goto error;
+//
+// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
+//
+// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
+// style, mark, mark);
+// if (!PUSH(&context, document.nodes, node)) goto error;
+//
+// return document.nodes.top - document.nodes.start;
+//
+// error:
+// STACK_DEL(&context, pairs);
+// yaml_free(tag_copy);
+//
+// return 0;
+// }
+//
+// /*
+// * Append an item to a sequence node.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_document_append_sequence_item(document *yaml_document_t,
+// int sequence, int item)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+//
+// assert(document); /* Non-NULL document is required. */
+// assert(sequence > 0
+// && document.nodes.start + sequence <= document.nodes.top);
+// /* Valid sequence id is required. */
+// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
+// /* A sequence node is required. */
+// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
+// /* Valid item id is required. */
+//
+// if (!PUSH(&context,
+// document.nodes.start[sequence-1].data.sequence.items, item))
+// return 0;
+//
+// return 1;
+// }
+//
+// /*
+// * Append a pair of a key and a value to a mapping node.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_document_append_mapping_pair(document *yaml_document_t,
+// int mapping, int key, int value)
+// {
+// struct {
+// YAML_error_type_t error;
+// } context;
+//
+// yaml_node_pair_t pair;
+//
+// assert(document); /* Non-NULL document is required. */
+// assert(mapping > 0
+// && document.nodes.start + mapping <= document.nodes.top);
+// /* Valid mapping id is required. */
+// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
+// /* A mapping node is required. */
+// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
+// /* Valid key id is required. */
+// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
+// /* Valid value id is required. */
+//
+// pair.key = key;
+// pair.value = value;
+//
+// if (!PUSH(&context,
+// document.nodes.start[mapping-1].data.mapping.pairs, pair))
+// return 0;
+//
+// return 1;
+// }
+//
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
new file mode 100644
index 00000000..dcc1b89c
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/decode.go
@@ -0,0 +1,622 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "reflect"
+ "runtime"
+ "strconv"
+ "strings"
+)
+
+type Unmarshaler interface {
+ UnmarshalYAML(tag string, value interface{}) error
+}
+
+// A Number represents a JSON number literal.
+type Number string
+
+// String returns the literal text of the number.
+func (n Number) String() string { return string(n) }
+
+// Float64 returns the number as a float64.
+func (n Number) Float64() (float64, error) {
+ return strconv.ParseFloat(string(n), 64)
+}
+
+// Int64 returns the number as an int64.
+func (n Number) Int64() (int64, error) {
+ return strconv.ParseInt(string(n), 10, 64)
+}
+
+type Decoder struct {
+ parser yaml_parser_t
+ event yaml_event_t
+ replay_events []yaml_event_t
+ useNumber bool
+
+ anchors map[string][]yaml_event_t
+ tracking_anchors [][]yaml_event_t
+}
+
+type ParserError struct {
+ ErrorType YAML_error_type_t
+ Context string
+ ContextMark YAML_mark_t
+ Problem string
+ ProblemMark YAML_mark_t
+}
+
+func (e *ParserError) Error() string {
+ return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
+}
+
+type UnexpectedEventError struct {
+ Value string
+ EventType yaml_event_type_t
+ At YAML_mark_t
+}
+
+func (e *UnexpectedEventError) Error() string {
+ return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
+}
+
+func recovery(err *error) {
+ if r := recover(); r != nil {
+ if _, ok := r.(runtime.Error); ok {
+ panic(r)
+ }
+
+ var tmpError error
+ switch r := r.(type) {
+ case error:
+ tmpError = r
+ case string:
+ tmpError = errors.New(r)
+ default:
+ tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
+ }
+
+ *err = tmpError
+ }
+}
+
+func Unmarshal(data []byte, v interface{}) error {
+ d := NewDecoder(bytes.NewBuffer(data))
+ return d.Decode(v)
+}
+
+func NewDecoder(r io.Reader) *Decoder {
+ d := &Decoder{
+ anchors: make(map[string][]yaml_event_t),
+ tracking_anchors: make([][]yaml_event_t, 1),
+ }
+ yaml_parser_initialize(&d.parser)
+ yaml_parser_set_input_reader(&d.parser, r)
+ return d
+}
+
+func (d *Decoder) Decode(v interface{}) (err error) {
+ defer recovery(&err)
+
+ rv := reflect.ValueOf(v)
+ if rv.Kind() != reflect.Ptr || rv.IsNil() {
+ return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
+ }
+
+ if d.event.event_type == yaml_NO_EVENT {
+ d.nextEvent()
+
+ if d.event.event_type != yaml_STREAM_START_EVENT {
+ return errors.New("Invalid stream")
+ }
+
+ d.nextEvent()
+ }
+
+ d.document(rv)
+ return nil
+}
+
+func (d *Decoder) UseNumber() { d.useNumber = true }
+
+func (d *Decoder) error(err error) {
+ panic(err)
+}
+
+func (d *Decoder) nextEvent() {
+ if d.event.event_type == yaml_STREAM_END_EVENT {
+ d.error(errors.New("The stream is closed"))
+ }
+
+ if d.replay_events != nil {
+ d.event = d.replay_events[0]
+ if len(d.replay_events) == 1 {
+ d.replay_events = nil
+ } else {
+ d.replay_events = d.replay_events[1:]
+ }
+ } else {
+ if !yaml_parser_parse(&d.parser, &d.event) {
+ yaml_event_delete(&d.event)
+
+ d.error(&ParserError{
+ ErrorType: d.parser.error,
+ Context: d.parser.context,
+ ContextMark: d.parser.context_mark,
+ Problem: d.parser.problem,
+ ProblemMark: d.parser.problem_mark,
+ })
+ }
+ }
+
+ last := len(d.tracking_anchors)
+ // skip aliases when tracking an anchor
+ if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
+ d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
+ }
+}
+
+func (d *Decoder) document(rv reflect.Value) {
+ if d.event.event_type != yaml_DOCUMENT_START_EVENT {
+ d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
+ }
+
+ d.nextEvent()
+ d.parse(rv)
+
+ if d.event.event_type != yaml_DOCUMENT_END_EVENT {
+ d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
+ }
+
+ d.nextEvent()
+}
+
+func (d *Decoder) parse(rv reflect.Value) {
+ if !rv.IsValid() {
+ // skip ahead since we cannot store
+ d.valueInterface()
+ return
+ }
+
+ anchor := string(d.event.anchor)
+ switch d.event.event_type {
+ case yaml_SEQUENCE_START_EVENT:
+ d.begin_anchor(anchor)
+ d.sequence(rv)
+ d.end_anchor(anchor)
+ case yaml_MAPPING_START_EVENT:
+ d.begin_anchor(anchor)
+ d.mapping(rv)
+ d.end_anchor(anchor)
+ case yaml_SCALAR_EVENT:
+ d.begin_anchor(anchor)
+ d.scalar(rv)
+ d.end_anchor(anchor)
+ case yaml_ALIAS_EVENT:
+ d.alias(rv)
+ case yaml_DOCUMENT_END_EVENT:
+ default:
+ d.error(&UnexpectedEventError{
+ Value: string(d.event.value),
+ EventType: d.event.event_type,
+ At: d.event.start_mark,
+ })
+ }
+}
+
+func (d *Decoder) begin_anchor(anchor string) {
+ if anchor != "" {
+ events := []yaml_event_t{d.event}
+ d.tracking_anchors = append(d.tracking_anchors, events)
+ }
+}
+
+func (d *Decoder) end_anchor(anchor string) {
+ if anchor != "" {
+ events := d.tracking_anchors[len(d.tracking_anchors)-1]
+ d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
+ // remove the anchor, replaying events shouldn't have anchors
+ events[0].anchor = nil
+ // we went one too many, remove the extra event
+ events = events[:len(events)-1]
+ // if nested, append to all the other anchors
+ for i, e := range d.tracking_anchors {
+ d.tracking_anchors[i] = append(e, events...)
+ }
+ d.anchors[anchor] = events
+ }
+}
+
+func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(Unmarshaler); ok {
+ var temp interface{}
+ return u, reflect.ValueOf(&temp)
+ }
+ }
+
+ v = v.Elem()
+ }
+
+ return nil, v
+}
+
+func (d *Decoder) sequence(v reflect.Value) {
+ if d.event.event_type != yaml_SEQUENCE_START_EVENT {
+ d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
+ }
+
+ u, pv := d.indirect(v, false)
+ if u != nil {
+ defer func() {
+ if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
+ d.error(err)
+ }
+ }()
+ _, pv = d.indirect(pv, false)
+ }
+
+ v = pv
+
+ // Check type of target.
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.NumMethod() == 0 {
+ // Decoding into nil interface? Switch to non-reflect code.
+ v.Set(reflect.ValueOf(d.sequenceInterface()))
+ return
+ }
+ // Otherwise it's invalid.
+ fallthrough
+ default:
+ d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
+ case reflect.Array:
+ case reflect.Slice:
+ break
+ }
+
+ d.nextEvent()
+
+ i := 0
+done:
+ for {
+ switch d.event.event_type {
+ case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
+ break done
+ }
+
+ // Get element of array, growing if necessary.
+ if v.Kind() == reflect.Slice {
+ // Grow slice if necessary
+ if i >= v.Cap() {
+ newcap := v.Cap() + v.Cap()/2
+ if newcap < 4 {
+ newcap = 4
+ }
+ newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
+ reflect.Copy(newv, v)
+ v.Set(newv)
+ }
+ if i >= v.Len() {
+ v.SetLen(i + 1)
+ }
+ }
+
+ if i < v.Len() {
+ // Decode into element.
+ d.parse(v.Index(i))
+ } else {
+ // Ran out of fixed array: skip.
+ d.parse(reflect.Value{})
+ }
+ i++
+ }
+
+ if i < v.Len() {
+ if v.Kind() == reflect.Array {
+ // Array. Zero the rest.
+ z := reflect.Zero(v.Type().Elem())
+ for ; i < v.Len(); i++ {
+ v.Index(i).Set(z)
+ }
+ } else {
+ v.SetLen(i)
+ }
+ }
+ if i == 0 && v.Kind() == reflect.Slice {
+ v.Set(reflect.MakeSlice(v.Type(), 0, 0))
+ }
+
+ if d.event.event_type != yaml_DOCUMENT_END_EVENT {
+ d.nextEvent()
+ }
+}
+
+func (d *Decoder) mapping(v reflect.Value) {
+ u, pv := d.indirect(v, false)
+ if u != nil {
+ defer func() {
+ if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
+ d.error(err)
+ }
+ }()
+ _, pv = d.indirect(pv, false)
+ }
+ v = pv
+
+ // Decoding into nil interface? Switch to non-reflect code.
+ if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
+ v.Set(reflect.ValueOf(d.mappingInterface()))
+ return
+ }
+
+ // Check type of target: struct or map[X]Y
+ switch v.Kind() {
+ case reflect.Struct:
+ d.mappingStruct(v)
+ return
+ case reflect.Map:
+ default:
+ d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
+ }
+
+ mapt := v.Type()
+ if v.IsNil() {
+ v.Set(reflect.MakeMap(mapt))
+ }
+
+ d.nextEvent()
+
+ keyt := mapt.Key()
+ mapElemt := mapt.Elem()
+
+ var mapElem reflect.Value
+done:
+ for {
+ switch d.event.event_type {
+ case yaml_MAPPING_END_EVENT:
+ break done
+ case yaml_DOCUMENT_END_EVENT:
+ return
+ }
+
+ key := reflect.New(keyt)
+ d.parse(key.Elem())
+
+ if !mapElem.IsValid() {
+ mapElem = reflect.New(mapElemt).Elem()
+ } else {
+ mapElem.Set(reflect.Zero(mapElemt))
+ }
+
+ d.parse(mapElem)
+
+ v.SetMapIndex(key.Elem(), mapElem)
+ }
+
+ d.nextEvent()
+}
+
+func (d *Decoder) mappingStruct(v reflect.Value) {
+
+ structt := v.Type()
+ fields := cachedTypeFields(structt)
+
+ d.nextEvent()
+
+done:
+ for {
+ switch d.event.event_type {
+ case yaml_MAPPING_END_EVENT:
+ break done
+ case yaml_DOCUMENT_END_EVENT:
+ return
+ }
+
+ key := ""
+ d.parse(reflect.ValueOf(&key))
+
+ // Figure out field corresponding to key.
+ var subv reflect.Value
+
+ var f *field
+ for i := range fields {
+ ff := &fields[i]
+ if ff.name == key {
+ f = ff
+ break
+ }
+
+ if f == nil && strings.EqualFold(ff.name, key) {
+ f = ff
+ }
+ }
+
+ if f != nil {
+ subv = v
+ for _, i := range f.index {
+ if subv.Kind() == reflect.Ptr {
+ if subv.IsNil() {
+ subv.Set(reflect.New(subv.Type().Elem()))
+ }
+ subv = subv.Elem()
+ }
+ subv = subv.Field(i)
+ }
+ }
+ d.parse(subv)
+ }
+
+ d.nextEvent()
+}
+
+func (d *Decoder) scalar(v reflect.Value) {
+ val := string(d.event.value)
+ wantptr := null_values[val]
+
+ u, pv := d.indirect(v, wantptr)
+
+ var tag string
+ if u != nil {
+ defer func() {
+ if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
+ d.error(err)
+ }
+ }()
+
+ _, pv = d.indirect(pv, wantptr)
+ }
+ v = pv
+
+ var err error
+ tag, err = resolve(d.event, v, d.useNumber)
+ if err != nil {
+ d.error(err)
+ }
+
+ d.nextEvent()
+}
+
+func (d *Decoder) alias(rv reflect.Value) {
+ val, ok := d.anchors[string(d.event.anchor)]
+ if !ok {
+ d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
+ }
+
+ d.replay_events = val
+ d.nextEvent()
+ d.parse(rv)
+}
+
+func (d *Decoder) valueInterface() interface{} {
+ var v interface{}
+
+ anchor := string(d.event.anchor)
+ switch d.event.event_type {
+ case yaml_SEQUENCE_START_EVENT:
+ d.begin_anchor(anchor)
+ v = d.sequenceInterface()
+ case yaml_MAPPING_START_EVENT:
+ d.begin_anchor(anchor)
+ v = d.mappingInterface()
+ case yaml_SCALAR_EVENT:
+ d.begin_anchor(anchor)
+ v = d.scalarInterface()
+ case yaml_ALIAS_EVENT:
+ rv := reflect.ValueOf(&v)
+ d.alias(rv)
+ return v
+ case yaml_DOCUMENT_END_EVENT:
+ d.error(&UnexpectedEventError{
+ Value: string(d.event.value),
+ EventType: d.event.event_type,
+ At: d.event.start_mark,
+ })
+
+ }
+ d.end_anchor(anchor)
+
+ return v
+}
+
+func (d *Decoder) scalarInterface() interface{} {
+ _, v := resolveInterface(d.event, d.useNumber)
+
+ d.nextEvent()
+ return v
+}
+
+// sequenceInterface is like sequence but returns []interface{}.
+func (d *Decoder) sequenceInterface() []interface{} {
+ var v = make([]interface{}, 0)
+
+ d.nextEvent()
+
+done:
+ for {
+ switch d.event.event_type {
+ case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
+ break done
+ }
+
+ v = append(v, d.valueInterface())
+ }
+
+ if d.event.event_type != yaml_DOCUMENT_END_EVENT {
+ d.nextEvent()
+ }
+
+ return v
+}
+
+// mappingInterface is like mapping but returns map[interface{}]interface{}.
+func (d *Decoder) mappingInterface() map[interface{}]interface{} {
+ m := make(map[interface{}]interface{})
+
+ d.nextEvent()
+
+done:
+ for {
+ switch d.event.event_type {
+ case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
+ break done
+ }
+
+ key := d.valueInterface()
+
+ // Read value.
+ m[key] = d.valueInterface()
+ }
+
+ if d.event.event_type != yaml_DOCUMENT_END_EVENT {
+ d.nextEvent()
+ }
+
+ return m
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
new file mode 100644
index 00000000..bd2014f3
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/emitter.go
@@ -0,0 +1,2072 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+)
+
+var default_tag_directives = []yaml_tag_directive_t{
+ {[]byte("!"), []byte("!")},
+ {[]byte("!!"), []byte("tag:yaml.org,2002:")},
+}
+
+/*
+ * Flush the buffer if needed.
+ */
+
+func flush(emitter *yaml_emitter_t) bool {
+ if emitter.buffer_pos+5 >= len(emitter.buffer) {
+ return yaml_emitter_flush(emitter)
+ }
+ return true
+}
+
+/*
+ * Put a character to the output buffer.
+ */
+func put(emitter *yaml_emitter_t, value byte) bool {
+ if !flush(emitter) {
+ return false
+ }
+
+ emitter.buffer[emitter.buffer_pos] = value
+ emitter.buffer_pos++
+ emitter.column++
+ return true
+}
+
+/*
+ * Put a line break to the output buffer.
+ */
+
+func put_break(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+ switch emitter.line_break {
+ case yaml_CR_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer_pos++
+ case yaml_LN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos++
+ case yaml_CRLN_BREAK:
+ emitter.buffer[emitter.buffer_pos] = '\r'
+ emitter.buffer[emitter.buffer_pos] = '\n'
+ emitter.buffer_pos += 2
+ default:
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ return true
+}
+
+/*
+ * Copy a character from a string into buffer.
+ */
+func write(emitter *yaml_emitter_t, src []byte, src_pos *int) bool {
+ if !flush(emitter) {
+ return false
+ }
+ copy_bytes(emitter.buffer, &emitter.buffer_pos, src, src_pos)
+ emitter.column++
+ return true
+}
+
+/*
+ * Copy a line break character from a string into buffer.
+ */
+
+func write_break(emitter *yaml_emitter_t, src []byte, src_pos *int) bool {
+ if src[*src_pos] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ *src_pos++
+ } else {
+ if !write(emitter, src, src_pos) {
+ return false
+ }
+ emitter.column = 0
+ emitter.line++
+ }
+
+ return true
+}
+
+/*
+ * Set an emitter error and return 0.
+ */
+
+func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_EMITTER_ERROR
+ emitter.problem = problem
+ return false
+}
+
+/*
+ * Emit an event.
+ */
+
+func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.events = append(emitter.events, *event)
+ for !yaml_emitter_need_more_events(emitter) {
+ event := &emitter.events[emitter.events_head]
+ if !yaml_emitter_analyze_event(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_state_machine(emitter, event) {
+ return false
+ }
+ yaml_event_delete(event)
+ emitter.events_head++
+ }
+ return true
+}
+
+/*
+ * Check if we need to accumulate more events before emitting.
+ *
+ * We accumulate extra
+ * - 1 event for DOCUMENT-START
+ * - 2 events for SEQUENCE-START
+ * - 3 events for MAPPING-START
+ */
+
+func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
+ if emitter.events_head == len(emitter.events) {
+ return true
+ }
+
+ accumulate := 0
+ switch emitter.events[emitter.events_head].event_type {
+ case yaml_DOCUMENT_START_EVENT:
+ accumulate = 1
+ case yaml_SEQUENCE_START_EVENT:
+ accumulate = 2
+ case yaml_MAPPING_START_EVENT:
+ accumulate = 3
+ default:
+ return false
+ }
+
+ if len(emitter.events)-emitter.events_head > accumulate {
+ return false
+ }
+
+ level := 0
+ for i := emitter.events_head; i < len(emitter.events); i++ {
+ switch emitter.events[i].event_type {
+ case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
+ level++
+ case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
+ level--
+ }
+
+ if level == 0 {
+ return false
+ }
+ }
+ return true
+}
+
+/*
+ * Append a directive to the directives stack.
+ */
+
+func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t,
+ value *yaml_tag_directive_t, allow_duplicates bool) bool {
+
+ for i := range emitter.tag_directives {
+
+ if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_emitter_set_emitter_error(emitter, "duplicat %TAG directive")
+ }
+ }
+
+ tag_copy := yaml_tag_directive_t{
+ handle: value.handle,
+ prefix: value.prefix,
+ }
+
+ emitter.tag_directives = append(emitter.tag_directives, tag_copy)
+
+ return true
+}
+
+/*
+ * Increase the indentation level.
+ */
+
+func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow bool, indentless bool) bool {
+
+ emitter.indents = append(emitter.indents, emitter.indent)
+
+ if emitter.indent < 0 {
+ if flow {
+ emitter.indent = emitter.best_indent
+ } else {
+ emitter.indent = 0
+ }
+ } else if !indentless {
+ emitter.indent += emitter.best_indent
+ }
+
+ return true
+}
+
+/*
+ * State dispatcher.
+ */
+
+func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ switch emitter.state {
+ case yaml_EMIT_STREAM_START_STATE:
+ return yaml_emitter_emit_stream_start(emitter, event)
+
+ case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, true)
+
+ case yaml_EMIT_DOCUMENT_START_STATE:
+ return yaml_emitter_emit_document_start(emitter, event, false)
+
+ case yaml_EMIT_DOCUMENT_CONTENT_STATE:
+ return yaml_emitter_emit_document_content(emitter, event)
+
+ case yaml_EMIT_DOCUMENT_END_STATE:
+ return yaml_emitter_emit_document_end(emitter, event)
+
+ case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
+ return yaml_emitter_emit_block_sequence_item(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
+ return yaml_emitter_emit_block_mapping_key(emitter, event, false)
+
+ case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, true)
+
+ case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_emitter_emit_block_mapping_value(emitter, event, false)
+
+ case yaml_EMIT_END_STATE:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected nothing after STREAM-END")
+
+ }
+
+ panic("invalid state")
+}
+
+/*
+ * Expect STREAM-START.
+ */
+
+func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ if event.event_type != yaml_STREAM_START_EVENT {
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected STREAM-START")
+ }
+
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = event.encoding
+
+ if emitter.encoding == yaml_ANY_ENCODING {
+ emitter.encoding = yaml_UTF8_ENCODING
+ }
+ }
+
+ if emitter.best_indent < 2 || emitter.best_indent > 9 {
+ emitter.best_indent = 2
+ }
+
+ if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
+ emitter.best_width = 80
+ }
+
+ if emitter.best_width < 0 {
+ emitter.best_width = 1<<31 - 1
+ }
+
+ if emitter.line_break == yaml_ANY_BREAK {
+ emitter.line_break = yaml_LN_BREAK
+ }
+
+ emitter.indent = -1
+
+ emitter.line = 0
+ emitter.column = 0
+ emitter.whitespace = true
+ emitter.indention = true
+
+ if emitter.encoding != yaml_UTF8_ENCODING {
+ if !yaml_emitter_write_bom(emitter) {
+ return false
+ }
+ }
+
+ emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
+
+ return true
+}
+
+/*
+ * Expect DOCUMENT-START or STREAM-END.
+ */
+
+func yaml_emitter_emit_document_start(emitter *yaml_emitter_t,
+ event *yaml_event_t, first bool) bool {
+
+ if event.event_type == yaml_DOCUMENT_START_EVENT {
+ if event.version_directive != nil {
+ if !yaml_emitter_analyze_version_directive(emitter,
+ *event.version_directive) {
+ return false
+ }
+ }
+
+ for i := range event.tag_directives {
+ tag_directive := &event.tag_directives[i]
+
+ if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
+ return false
+ }
+ if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_emitter_append_tag_directive(emitter, &default_tag_directives[i], true) {
+ return false
+ }
+ }
+
+ implicit := event.implicit
+ if !first || emitter.canonical {
+ implicit = false
+ }
+
+ if (event.version_directive != nil || len(event.tag_directives) > 0) &&
+ emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if event.version_directive != nil {
+ implicit = false
+ if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
+ return false
+ }
+
+ if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
+ return false
+ }
+
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if len(event.tag_directives) > 0 {
+ implicit = false
+ for i := range event.tag_directives {
+ tag_directive := &event.tag_directives[i]
+
+ if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
+ return false
+ }
+ if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ if yaml_emitter_check_empty_document(emitter) {
+ implicit = false
+ }
+
+ if !implicit {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
+ return false
+ }
+
+ if emitter.canonical {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
+
+ return true
+ } else if event.event_type == yaml_STREAM_END_EVENT {
+ if emitter.open_ended {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+
+ emitter.state = yaml_EMIT_END_STATE
+
+ return true
+ }
+
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected DOCUMENT-START or STREAM-END")
+}
+
+/*
+ * Expect the root node.
+ */
+
+func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
+
+ return yaml_emitter_emit_node(emitter, event, true, false, false, false)
+}
+
+/*
+ * Expect DOCUMENT-END.
+ */
+
+func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+
+ if event.event_type != yaml_DOCUMENT_END_EVENT {
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected DOCUMENT-END")
+ }
+
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !event.implicit {
+ if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_flush(emitter) {
+ return false
+ }
+
+ emitter.state = yaml_EMIT_DOCUMENT_START_STATE
+ emitter.tag_directives = emitter.tag_directives[:0]
+ return true
+}
+
+/*
+ *
+ * Expect a flow item node.
+ */
+
+func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
+ if first {
+ if !yaml_emitter_write_indicator(emitter, []byte("["), true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.event_type == yaml_SEQUENCE_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("]"), false, false, false) {
+ return false
+ }
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
+ return false
+ }
+ }
+
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+/*
+ * Expect a flow key node.
+ */
+
+func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t,
+ event *yaml_event_t, first bool) bool {
+
+ if first {
+
+ if !yaml_emitter_write_indicator(emitter, []byte("{"), true, true, false) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ emitter.flow_level++
+ }
+
+ if event.event_type == yaml_MAPPING_END_EVENT {
+ emitter.flow_level--
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+
+ if emitter.canonical && !first {
+ if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("}"), false, false, false) {
+ return false
+ }
+
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !first {
+ if !yaml_emitter_write_indicator(emitter, []byte(","), false, false, false) {
+ return false
+ }
+ }
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+
+ if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ } else {
+ if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, false) {
+ return false
+ }
+
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+ }
+}
+
+/*
+ * Expect a flow value node.
+ */
+
+func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t,
+ event *yaml_event_t, simple bool) bool {
+
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) {
+ return false
+ }
+ } else {
+ if emitter.canonical || emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, false) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+/*
+ * Expect a block item node.
+ */
+
+func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t,
+ event *yaml_event_t, first bool) bool {
+
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false,
+ (emitter.mapping_context && !emitter.indention)) {
+ return false
+ }
+ }
+
+ if event.event_type == yaml_SEQUENCE_END_EVENT {
+
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte("-"), true, false, true) {
+ return false
+ }
+
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
+ return yaml_emitter_emit_node(emitter, event, false, true, false, false)
+}
+
+/*
+ * Expect a block key node.
+ */
+
+func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t,
+ event *yaml_event_t, first bool) bool {
+
+ if first {
+ if !yaml_emitter_increase_indent(emitter, false, false) {
+ return false
+ }
+ }
+
+ if event.event_type == yaml_MAPPING_END_EVENT {
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+ }
+
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+
+ if yaml_emitter_check_simple_key(emitter) {
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
+
+ return yaml_emitter_emit_node(emitter, event, false, false, true, true)
+ } else {
+ if !yaml_emitter_write_indicator(emitter, []byte("?"), true, false, true) {
+ return false
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
+
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+ }
+}
+
+/*
+ * Expect a block value node.
+ */
+
+func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t,
+ event *yaml_event_t, simple bool) bool {
+
+ if simple {
+ if !yaml_emitter_write_indicator(emitter, []byte(":"), false, false, false) {
+ return false
+ }
+ } else {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte(":"), true, false, true) {
+ return false
+ }
+ }
+ emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
+
+ return yaml_emitter_emit_node(emitter, event, false, false, true, false)
+}
+
+/*
+ * Expect a node.
+ */
+
+func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
+ root bool, sequence bool, mapping bool, simple_key bool) bool {
+ emitter.root_context = root
+ emitter.sequence_context = sequence
+ emitter.mapping_context = mapping
+ emitter.simple_key_context = simple_key
+
+ switch event.event_type {
+ case yaml_ALIAS_EVENT:
+ return yaml_emitter_emit_alias(emitter, event)
+
+ case yaml_SCALAR_EVENT:
+ return yaml_emitter_emit_scalar(emitter, event)
+
+ case yaml_SEQUENCE_START_EVENT:
+ return yaml_emitter_emit_sequence_start(emitter, event)
+
+ case yaml_MAPPING_START_EVENT:
+ return yaml_emitter_emit_mapping_start(emitter, event)
+
+ default:
+ return yaml_emitter_set_emitter_error(emitter,
+ "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
+ }
+
+ return false
+}
+
+/*
+ * Expect ALIAS.
+ */
+
+func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+}
+
+/*
+ * Expect SCALAR.
+ */
+
+func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_select_scalar_style(emitter, event) {
+ return false
+ }
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+ if !yaml_emitter_increase_indent(emitter, true, false) {
+ return false
+ }
+ if !yaml_emitter_process_scalar(emitter) {
+ return false
+ }
+ emitter.indent = emitter.indents[len(emitter.indents)-1]
+ emitter.indents = emitter.indents[:len(emitter.indents)-1]
+
+ emitter.state = emitter.states[len(emitter.states)-1]
+ emitter.states = emitter.states[:len(emitter.states)-1]
+
+ return true
+}
+
+/*
+ * Expect SEQUENCE-START.
+ */
+
+func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+
+ if emitter.flow_level > 0 || emitter.canonical ||
+ event.style == yaml_style_t(yaml_FLOW_SEQUENCE_STYLE) ||
+ yaml_emitter_check_empty_sequence(emitter) {
+ emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ }
+
+ return true
+}
+
+/*
+ * Expect MAPPING-START.
+ */
+
+func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ if !yaml_emitter_process_anchor(emitter) {
+ return false
+ }
+ if !yaml_emitter_process_tag(emitter) {
+ return false
+ }
+
+ if emitter.flow_level > 0 || emitter.canonical ||
+ event.style == yaml_style_t(yaml_FLOW_MAPPING_STYLE) ||
+ yaml_emitter_check_empty_mapping(emitter) {
+ emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ } else {
+ emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ }
+
+ return true
+}
+
+/*
+ * Check if the document content is an empty scalar.
+ */
+
+func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
+ return false
+}
+
+/*
+ * Check if the next events represent an empty sequence.
+ */
+
+func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+
+ return (emitter.events[emitter.events_head].event_type == yaml_SEQUENCE_START_EVENT &&
+ emitter.events[emitter.events_head+1].event_type == yaml_SEQUENCE_END_EVENT)
+}
+
+/*
+ * Check if the next events represent an empty mapping.
+ */
+
+func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
+ if len(emitter.events)-emitter.events_head < 2 {
+ return false
+ }
+
+ return (emitter.events[emitter.events_head].event_type == yaml_MAPPING_START_EVENT &&
+ emitter.events[emitter.events_head+1].event_type == yaml_MAPPING_END_EVENT)
+}
+
+/*
+ * Check if the next node can be expressed as a simple key.
+ */
+
+func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
+ length := 0
+
+ switch emitter.events[emitter.events_head].event_type {
+ case yaml_ALIAS_EVENT:
+ length += len(emitter.anchor_data.anchor)
+
+ case yaml_SCALAR_EVENT:
+ if emitter.scalar_data.multiline {
+ return false
+ }
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix) +
+ len(emitter.scalar_data.value)
+
+ case yaml_SEQUENCE_START_EVENT:
+ if !yaml_emitter_check_empty_sequence(emitter) {
+ return false
+ }
+
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+
+ case yaml_MAPPING_START_EVENT:
+ if !yaml_emitter_check_empty_mapping(emitter) {
+ return false
+ }
+
+ length += len(emitter.anchor_data.anchor) +
+ len(emitter.tag_data.handle) +
+ len(emitter.tag_data.suffix)
+
+ default:
+ return false
+ }
+
+ if length > 128 {
+ return false
+ }
+
+ return true
+}
+
+/*
+ * Determine an acceptable scalar style.
+ */
+
+func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
+
+ if no_tag && !event.implicit && !event.quoted_implicit {
+ return yaml_emitter_set_emitter_error(emitter,
+ "neither tag nor implicit flags are specified")
+ }
+
+ style := yaml_scalar_style_t(event.style)
+
+ if style == yaml_ANY_SCALAR_STYLE {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+
+ if emitter.canonical {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if emitter.simple_key_context && emitter.scalar_data.multiline {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ if style == yaml_PLAIN_SCALAR_STYLE {
+ if (emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed) ||
+ (emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if len(emitter.scalar_data.value) == 0 &&
+ (emitter.flow_level > 0 || emitter.simple_key_context) {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ if no_tag && !event.implicit {
+ style = yaml_SINGLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
+ if !emitter.scalar_data.single_quoted_allowed {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
+ if !emitter.scalar_data.block_allowed ||
+ emitter.flow_level > 0 || emitter.simple_key_context {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+ }
+
+ if no_tag && !event.quoted_implicit &&
+ style != yaml_PLAIN_SCALAR_STYLE {
+ emitter.tag_data.handle = []byte("!")
+ }
+
+ emitter.scalar_data.style = style
+
+ return true
+}
+
+/*
+ * Write an achor.
+ */
+
+func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
+ if emitter.anchor_data.anchor == nil {
+ return true
+ }
+
+ indicator := "*"
+ if !emitter.anchor_data.alias {
+ indicator = "&"
+ }
+ if !yaml_emitter_write_indicator(emitter, []byte(indicator), true, false, false) {
+ return false
+ }
+
+ return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
+}
+
+/*
+ * Write a tag.
+ */
+
+func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
+ if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
+ return true
+ }
+
+ if len(emitter.tag_data.handle) > 0 {
+ if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
+ return false
+ }
+
+ if len(emitter.tag_data.suffix) > 0 {
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+
+ }
+ } else {
+ if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
+ return false
+ }
+
+ if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
+ return false
+ }
+
+ if !yaml_emitter_write_indicator(emitter, []byte(">"), false, false, false) {
+ return false
+ }
+
+ }
+
+ return true
+}
+
+/*
+ * Write a scalar.
+ */
+
+func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
+ switch emitter.scalar_data.style {
+ case yaml_PLAIN_SCALAR_STYLE:
+ return yaml_emitter_write_plain_scalar(emitter,
+ emitter.scalar_data.value,
+ !emitter.simple_key_context)
+
+ case yaml_SINGLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_single_quoted_scalar(emitter,
+ emitter.scalar_data.value,
+ !emitter.simple_key_context)
+
+ case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
+ return yaml_emitter_write_double_quoted_scalar(emitter,
+ emitter.scalar_data.value,
+ !emitter.simple_key_context)
+
+ case yaml_LITERAL_SCALAR_STYLE:
+ return yaml_emitter_write_literal_scalar(emitter,
+ emitter.scalar_data.value)
+
+ case yaml_FOLDED_SCALAR_STYLE:
+ return yaml_emitter_write_folded_scalar(emitter,
+ emitter.scalar_data.value)
+
+ default:
+ panic("unknown scalar")
+ }
+
+ return false
+}
+
+/*
+ * Check if a %YAML directive is valid.
+ */
+
+func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t,
+ version_directive yaml_version_directive_t) bool {
+ if version_directive.major != 1 || version_directive.minor != 1 {
+ return yaml_emitter_set_emitter_error(emitter,
+ "incompatible %YAML directive")
+ }
+
+ return true
+}
+
+/*
+ * Check if a %TAG directive is valid.
+ */
+
+func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t,
+ tag_directive *yaml_tag_directive_t) bool {
+ handle := tag_directive.handle
+ prefix := tag_directive.prefix
+
+ if len(handle) == 0 {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag handle must not be empty")
+ }
+
+ if handle[0] != '!' {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag handle must start with '!'")
+ }
+
+ if handle[len(handle)-1] != '!' {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag handle must end with '!'")
+ }
+
+ for i := 1; i < len(handle)-1; width(handle[i]) {
+ if !is_alpha(handle[i]) {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag handle must contain alphanumerical characters only")
+ }
+ }
+
+ if len(prefix) == 0 {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag prefix must not be empty")
+ }
+
+ return true
+}
+
+/*
+ * Check if an anchor is valid.
+ */
+
+func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t,
+ anchor []byte, alias bool) bool {
+ if len(anchor) == 0 {
+ errmsg := "alias value must not be empty"
+ if !alias {
+ errmsg = "anchor value must not be empty"
+ }
+ return yaml_emitter_set_emitter_error(emitter, errmsg)
+ }
+
+ for i := 0; i < len(anchor); i += width(anchor[i]) {
+ if !is_alpha(anchor[i]) {
+ errmsg := "alias value must contain alphanumerical characters only"
+ if !alias {
+ errmsg = "anchor value must contain alphanumerical characters only"
+ }
+ return yaml_emitter_set_emitter_error(emitter, errmsg)
+ }
+ }
+
+ emitter.anchor_data.anchor = anchor
+ emitter.anchor_data.alias = alias
+
+ return true
+}
+
+/*
+ * Check if a tag is valid.
+ */
+
+func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
+ if len(tag) == 0 {
+ return yaml_emitter_set_emitter_error(emitter,
+ "tag value must not be empty")
+ }
+
+ for i := range emitter.tag_directives {
+ tag_directive := &emitter.tag_directives[i]
+ if bytes.HasPrefix(tag, tag_directive.prefix) {
+ emitter.tag_data.handle = tag_directive.handle
+ emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
+ return true
+ }
+ }
+
+ emitter.tag_data.suffix = tag
+
+ return true
+}
+
+/*
+ * Check if a scalar is valid.
+ */
+
+func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ block_indicators := false
+ flow_indicators := false
+ line_breaks := false
+ special_characters := false
+
+ leading_space := false
+ leading_break := false
+ trailing_space := false
+ trailing_break := false
+ break_space := false
+ space_break := false
+
+ preceeded_by_whitespace := false
+ followed_by_whitespace := false
+ previous_space := false
+ previous_break := false
+
+ emitter.scalar_data.value = value
+
+ if len(value) == 0 {
+ emitter.scalar_data.multiline = false
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = false
+
+ return true
+ }
+
+ if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') ||
+ (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
+ block_indicators = true
+ flow_indicators = true
+ }
+
+ preceeded_by_whitespace = true
+
+ for i, w := 0, 0; i < len(value); i += w {
+ w = width(value[i])
+ followed_by_whitespace = i+w >= len(value) || is_blankz_at(value, i+w)
+
+ if i == 0 {
+ switch value[i] {
+ case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
+ flow_indicators = true
+ block_indicators = true
+ case '?', ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '-':
+ if followed_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ } else {
+ switch value[i] {
+ case ',', '?', '[', ']', '{', '}':
+ flow_indicators = true
+ case ':':
+ flow_indicators = true
+ if followed_by_whitespace {
+ block_indicators = true
+ }
+ case '#':
+ if preceeded_by_whitespace {
+ flow_indicators = true
+ block_indicators = true
+ }
+ }
+ }
+
+ if !is_printable_at(value, i) || (!is_ascii(value[i]) && !emitter.unicode) {
+ special_characters = true
+ }
+
+ if is_break_at(value, i) {
+ line_breaks = true
+ }
+
+ if is_space(value[i]) {
+ if i == 0 {
+ leading_space = true
+ }
+ if i+w == len(value) {
+ trailing_space = true
+ }
+ if previous_break {
+ break_space = true
+ }
+ previous_space = true
+ previous_break = false
+ } else if is_break_at(value, i) {
+ if i == 0 {
+ leading_break = true
+ }
+ if i+width(value[i]) == len(value) {
+ trailing_break = true
+ }
+ if previous_space {
+ space_break = true
+ }
+ previous_space = false
+ previous_break = true
+ } else {
+ previous_space = false
+ previous_break = false
+ }
+
+ preceeded_by_whitespace = is_blankz_at(value, i)
+ }
+
+ emitter.scalar_data.multiline = line_breaks
+
+ emitter.scalar_data.flow_plain_allowed = true
+ emitter.scalar_data.block_plain_allowed = true
+ emitter.scalar_data.single_quoted_allowed = true
+ emitter.scalar_data.block_allowed = true
+
+ if leading_space || leading_break || trailing_space || trailing_break {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+
+ if trailing_space {
+ emitter.scalar_data.block_allowed = false
+ }
+
+ if break_space {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ }
+
+ if space_break || special_characters {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ emitter.scalar_data.single_quoted_allowed = false
+ emitter.scalar_data.block_allowed = false
+ }
+
+ if line_breaks {
+ emitter.scalar_data.flow_plain_allowed = false
+ emitter.scalar_data.block_plain_allowed = false
+ }
+
+ if flow_indicators {
+ emitter.scalar_data.flow_plain_allowed = false
+ }
+
+ if block_indicators {
+ emitter.scalar_data.block_plain_allowed = false
+ }
+
+ return true
+}
+
+/*
+ * Check if the event data is valid.
+ */
+
+func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
+ emitter.anchor_data.anchor = nil
+ emitter.tag_data.handle = nil
+ emitter.tag_data.suffix = nil
+ emitter.scalar_data.value = nil
+
+ switch event.event_type {
+ case yaml_ALIAS_EVENT:
+ if !yaml_emitter_analyze_anchor(emitter,
+ event.anchor, true) {
+ return false
+ }
+
+ case yaml_SCALAR_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter,
+ event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical ||
+ (!event.implicit &&
+ !event.quoted_implicit)) {
+ if !yaml_emitter_analyze_tag(emitter, event.tag) {
+ return false
+ }
+ }
+ if !yaml_emitter_analyze_scalar(emitter, event.value) {
+ return false
+ }
+ case yaml_SEQUENCE_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter,
+ event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical ||
+ !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter,
+ event.tag) {
+ return false
+ }
+ }
+ case yaml_MAPPING_START_EVENT:
+ if len(event.anchor) > 0 {
+ if !yaml_emitter_analyze_anchor(emitter,
+ event.anchor, false) {
+ return false
+ }
+ }
+ if len(event.tag) > 0 && (emitter.canonical ||
+ !event.implicit) {
+ if !yaml_emitter_analyze_tag(emitter,
+ event.tag) {
+ return false
+ }
+ }
+
+ }
+ return true
+}
+
+/*
+ * Write the BOM character.
+ */
+
+func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
+ if !flush(emitter) {
+ return false
+ }
+
+ pos := emitter.buffer_pos
+ emitter.buffer[pos] = '\xEF'
+ emitter.buffer[pos+1] = '\xBB'
+ emitter.buffer[pos+2] = '\xBF'
+ emitter.buffer_pos += 3
+ return true
+}
+
+func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
+ indent := emitter.indent
+ if indent < 0 {
+ indent = 0
+ }
+
+ if !emitter.indention || emitter.column > indent ||
+ (emitter.column == indent && !emitter.whitespace) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+
+ for emitter.column < indent {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ emitter.whitespace = true
+ emitter.indention = true
+
+ return true
+}
+
+func yaml_emitter_write_indicator(emitter *yaml_emitter_t,
+ indicator []byte, need_whitespace bool,
+ is_whitespace bool, is_indention bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ ind_pos := 0
+ for ind_pos < len(indicator) {
+ if !write(emitter, indicator, &ind_pos) {
+ return false
+ }
+ }
+
+ emitter.whitespace = is_whitespace
+ emitter.indention = (emitter.indention && is_indention)
+ emitter.open_ended = false
+
+ return true
+}
+
+func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
+ pos := 0
+ for pos < len(value) {
+ if !write(emitter, value, &pos) {
+ return false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+
+ return true
+}
+
+func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ pos := 0
+ for pos < len(value) {
+ if !write(emitter, value, &pos) {
+ return false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+
+ return true
+}
+
+func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte,
+ need_whitespace bool) bool {
+ if need_whitespace && !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ for i := 0; i < len(value); {
+ write_it := false
+ switch value[i] {
+ case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_',
+ '.', '!', '~', '*', '\'', '(', ')', '[', ']':
+ write_it = true
+ default:
+ write_it = is_alpha(value[i])
+ }
+ if write_it {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ } else {
+ w := width(value[i])
+ for j := 0; j < w; j++ {
+ val := value[i]
+ i++
+
+ if !put(emitter, '%') {
+ return false
+ }
+ c := val >> 4
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ c = val & 0x0f
+ if c < 10 {
+ c += '0'
+ } else {
+ c += 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+
+ }
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+
+ return true
+}
+
+func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte,
+ allow_breaks bool) bool {
+ spaces := false
+ breaks := false
+
+ if !emitter.whitespace {
+ if !put(emitter, ' ') {
+ return false
+ }
+ }
+
+ for i := 0; i < len(value); {
+ if is_space(value[i]) {
+ if allow_breaks && !spaces &&
+ emitter.column > emitter.best_width &&
+ !is_space(value[i+1]) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break_at(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+ if emitter.root_context {
+ emitter.open_ended = true
+ }
+
+ return true
+}
+
+func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte,
+ allow_breaks bool) bool {
+ spaces := false
+ breaks := false
+
+ if !yaml_emitter_write_indicator(emitter, []byte("'"), true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if is_space(value[i]) {
+ if allow_breaks && !spaces &&
+ emitter.column > emitter.best_width &&
+ i > 0 && i < len(value)-1 &&
+ !is_space(value[i+1]) {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else if is_break_at(value, i) {
+ if !breaks && value[i] == '\n' {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if value[i] == '\'' {
+ if !put(emitter, '\'') {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ spaces = false
+ breaks = false
+ }
+ }
+
+ if !yaml_emitter_write_indicator(emitter, []byte("'"), false, false, false) {
+ return false
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+
+ return true
+}
+
+func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte,
+ allow_breaks bool) bool {
+
+ spaces := false
+
+ if !yaml_emitter_write_indicator(emitter, []byte("\""), true, false, false) {
+ return false
+ }
+
+ for i := 0; i < len(value); {
+ if !is_printable_at(value, i) || (!emitter.unicode && !is_ascii(value[i])) ||
+ is_bom_at(value, i) || is_break_at(value, i) ||
+ value[i] == '"' || value[i] == '\\' {
+ octet := value[i]
+
+ var w int
+ var v rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, v = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, v = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, v = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, v = 4, rune(octet&0x07)
+ }
+
+ for k := 1; k < w; k++ {
+ octet = value[i+k]
+ v = (v << 6) + (rune(octet) & 0x3F)
+ }
+ i += w
+
+ if !put(emitter, '\\') {
+ return false
+ }
+
+ switch v {
+ case 0x00:
+ if !put(emitter, '0') {
+ return false
+ }
+ case 0x07:
+ if !put(emitter, 'a') {
+ return false
+ }
+ case 0x08:
+ if !put(emitter, 'b') {
+ return false
+ }
+ case 0x09:
+ if !put(emitter, 't') {
+ return false
+ }
+
+ case 0x0A:
+ if !put(emitter, 'n') {
+ return false
+ }
+
+ case 0x0B:
+ if !put(emitter, 'v') {
+ return false
+ }
+
+ case 0x0C:
+ if !put(emitter, 'f') {
+ return false
+ }
+
+ case 0x0D:
+ if !put(emitter, 'r') {
+ return false
+ }
+
+ case 0x1B:
+ if !put(emitter, 'e') {
+ return false
+ }
+ case 0x22:
+ if !put(emitter, '"') {
+ return false
+ }
+ case 0x5C:
+ if !put(emitter, '\\') {
+ return false
+ }
+ case 0x85:
+ if !put(emitter, 'N') {
+ return false
+ }
+
+ case 0xA0:
+ if !put(emitter, '_') {
+ return false
+ }
+
+ case 0x2028:
+ if !put(emitter, 'L') {
+ return false
+ }
+
+ case 0x2029:
+ if !put(emitter, 'P') {
+ return false
+ }
+ default:
+ if v <= 0xFF {
+ if !put(emitter, 'x') {
+ return false
+ }
+ w = 2
+ } else if v <= 0xFFFF {
+ if !put(emitter, 'u') {
+ return false
+ }
+ w = 4
+ } else {
+ if !put(emitter, 'U') {
+ return false
+ }
+ w = 8
+ }
+ for k := (w - 1) * 4; k >= 0; k -= 4 {
+ digit := byte((v >> uint(k)) & 0x0F)
+ c := digit + '0'
+ if c > 9 {
+ c = digit + 'A' - 10
+ }
+ if !put(emitter, c) {
+ return false
+ }
+ }
+ }
+ spaces = false
+ } else if is_space(value[i]) {
+ if allow_breaks && !spaces &&
+ emitter.column > emitter.best_width &&
+ i > 0 && i < len(value)-1 {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ if is_space(value[i+1]) {
+ if !put(emitter, '\\') {
+ return false
+ }
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ spaces = true
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ spaces = false
+ }
+ }
+
+ if !yaml_emitter_write_indicator(emitter, []byte("\""), false, false, false) {
+ return false
+ }
+
+ emitter.whitespace = false
+ emitter.indention = false
+
+ return true
+}
+
+func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
+
+ if is_space(value[0]) || is_break_at(value, 0) {
+ indent_hint := []byte{'0' + byte(emitter.best_indent)}
+ if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
+ return false
+ }
+ }
+
+ emitter.open_ended = false
+
+ var chomp_hint [1]byte
+ if len(value) == 0 {
+ chomp_hint[0] = '-'
+ } else {
+ i := len(value) - 1
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+
+ if !is_break_at(value, i) {
+ chomp_hint[0] = '-'
+ } else if i == 0 {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ } else {
+ for value[i]&0xC0 == 0x80 {
+ i--
+ }
+
+ if is_break_at(value, i) {
+ chomp_hint[0] = '+'
+ emitter.open_ended = true
+ }
+ }
+ }
+
+ if chomp_hint[0] != 0 {
+ if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
+ return false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
+
+ breaks := true
+
+ if !yaml_emitter_write_indicator(emitter, []byte("|"), true, false, false) {
+ return false
+ }
+
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+
+ if !put_break(emitter) {
+ return false
+ }
+
+ emitter.indention = true
+ emitter.whitespace = true
+
+ for i := 0; i < len(value); {
+ if is_break_at(value, i) {
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ }
+ if !write(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
+
+func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
+ breaks := true
+ leading_spaces := true
+
+ if !yaml_emitter_write_indicator(emitter, []byte(">"), true, false, false) {
+ return false
+ }
+ if !yaml_emitter_write_block_scalar_hints(emitter, value) {
+ return false
+ }
+ if !put_break(emitter) {
+ return false
+ }
+ emitter.indention = true
+ emitter.whitespace = true
+
+ for i := 0; i < len(value); {
+ if is_break_at(value, i) {
+ if !breaks && !leading_spaces && value[i] == '\n' {
+ k := i
+ for is_break_at(value, k) {
+ k += width(value[k])
+ }
+ if !is_blankz_at(value, k) {
+ if !put_break(emitter) {
+ return false
+ }
+ }
+ }
+ if !write_break(emitter, value, &i) {
+ return false
+ }
+ emitter.indention = true
+ breaks = true
+ } else {
+ if breaks {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ leading_spaces = is_blank(value[i])
+ }
+ if !breaks && is_space(value[i]) && !is_space(value[i+1]) &&
+ emitter.column > emitter.best_width {
+ if !yaml_emitter_write_indent(emitter) {
+ return false
+ }
+ i += width(value[i])
+ } else {
+ if !write(emitter, value, &i) {
+ return false
+ }
+ }
+ emitter.indention = false
+ breaks = false
+ }
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
new file mode 100644
index 00000000..fd991808
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/encode.go
@@ -0,0 +1,395 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+ "encoding/base64"
+ "io"
+ "math"
+ "reflect"
+ "regexp"
+ "sort"
+ "strconv"
+ "time"
+)
+
+var (
+ timeTimeType = reflect.TypeOf(time.Time{})
+ marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
+ numberType = reflect.TypeOf(Number(""))
+ nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
+ multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
+
+ shortTags = map[string]string{
+ yaml_NULL_TAG: "!!null",
+ yaml_BOOL_TAG: "!!bool",
+ yaml_STR_TAG: "!!str",
+ yaml_INT_TAG: "!!int",
+ yaml_FLOAT_TAG: "!!float",
+ yaml_TIMESTAMP_TAG: "!!timestamp",
+ yaml_SEQ_TAG: "!!seq",
+ yaml_MAP_TAG: "!!map",
+ yaml_BINARY_TAG: "!!binary",
+ }
+)
+
+type Marshaler interface {
+ MarshalYAML() (tag string, value interface{}, err error)
+}
+
+// An Encoder writes JSON objects to an output stream.
+type Encoder struct {
+ w io.Writer
+ emitter yaml_emitter_t
+ event yaml_event_t
+ flow bool
+ err error
+}
+
+func Marshal(v interface{}) ([]byte, error) {
+ b := bytes.Buffer{}
+ e := NewEncoder(&b)
+ err := e.Encode(v)
+ return b.Bytes(), err
+}
+
+// NewEncoder returns a new encoder that writes to w.
+func NewEncoder(w io.Writer) *Encoder {
+ e := &Encoder{w: w}
+ yaml_emitter_initialize(&e.emitter)
+ yaml_emitter_set_output_writer(&e.emitter, e.w)
+ yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
+ e.emit()
+ yaml_document_start_event_initialize(&e.event, nil, nil, true)
+ e.emit()
+
+ return e
+}
+
+func (e *Encoder) Encode(v interface{}) (err error) {
+ defer recovery(&err)
+
+ if e.err != nil {
+ return e.err
+ }
+
+ e.marshal("", reflect.ValueOf(v), true)
+
+ yaml_document_end_event_initialize(&e.event, true)
+ e.emit()
+ e.emitter.open_ended = false
+ yaml_stream_end_event_initialize(&e.event)
+ e.emit()
+
+ return nil
+}
+
+func (e *Encoder) emit() {
+ if !yaml_emitter_emit(&e.emitter, &e.event) {
+ panic("bad emit")
+ }
+}
+
+func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
+ vt := v.Type()
+
+ if vt.Implements(marshalerType) {
+ e.emitMarshaler(tag, v)
+ return
+ }
+
+ if vt.Kind() != reflect.Ptr && allowAddr {
+ if reflect.PtrTo(vt).Implements(marshalerType) {
+ e.emitAddrMarshaler(tag, v)
+ return
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Interface:
+ if v.IsNil() {
+ e.emitNil()
+ } else {
+ e.marshal(tag, v.Elem(), allowAddr)
+ }
+ case reflect.Map:
+ e.emitMap(tag, v)
+ case reflect.Ptr:
+ if v.IsNil() {
+ e.emitNil()
+ } else {
+ e.marshal(tag, v.Elem(), true)
+ }
+ case reflect.Struct:
+ e.emitStruct(tag, v)
+ case reflect.Slice:
+ e.emitSlice(tag, v)
+ case reflect.String:
+ e.emitString(tag, v)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ e.emitInt(tag, v)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ e.emitUint(tag, v)
+ case reflect.Float32, reflect.Float64:
+ e.emitFloat(tag, v)
+ case reflect.Bool:
+ e.emitBool(tag, v)
+ default:
+ panic("Can't marshal type yet: " + v.Type().String())
+ }
+}
+
+func (e *Encoder) emitMap(tag string, v reflect.Value) {
+ e.mapping(tag, func() {
+ var keys stringValues = v.MapKeys()
+ sort.Sort(keys)
+ for _, k := range keys {
+ e.marshal("", k, true)
+ e.marshal("", v.MapIndex(k), true)
+ }
+ })
+}
+
+func (e *Encoder) emitStruct(tag string, v reflect.Value) {
+ if v.Type() == timeTimeType {
+ e.emitTime(tag, v)
+ return
+ }
+
+ fields := cachedTypeFields(v.Type())
+
+ e.mapping(tag, func() {
+ for _, f := range fields {
+ fv := fieldByIndex(v, f.index)
+ if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
+ continue
+ }
+
+ e.marshal("", reflect.ValueOf(f.name), true)
+ e.flow = f.flow
+ e.marshal("", fv, true)
+ }
+ })
+}
+
+func (e *Encoder) emitTime(tag string, v reflect.Value) {
+ t := v.Interface().(time.Time)
+ bytes, _ := t.MarshalText()
+ e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func isEmptyValue(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
+ return v.Len() == 0
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return v.Float() == 0
+ case reflect.Interface, reflect.Ptr:
+ return v.IsNil()
+ }
+ return false
+}
+
+func (e *Encoder) mapping(tag string, f func()) {
+ implicit := tag == ""
+ style := yaml_BLOCK_MAPPING_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_MAPPING_STYLE
+ }
+ yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+
+ f()
+
+ yaml_mapping_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *Encoder) emitSlice(tag string, v reflect.Value) {
+ if v.Type() == byteSliceType {
+ e.emitBase64(tag, v)
+ return
+ }
+
+ implicit := tag == ""
+ style := yaml_BLOCK_SEQUENCE_STYLE
+ if e.flow {
+ e.flow = false
+ style = yaml_FLOW_SEQUENCE_STYLE
+ }
+ yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
+ e.emit()
+
+ n := v.Len()
+ for i := 0; i < n; i++ {
+ e.marshal("", v.Index(i), true)
+ }
+
+ yaml_sequence_end_event_initialize(&e.event)
+ e.emit()
+}
+
+func (e *Encoder) emitBase64(tag string, v reflect.Value) {
+ if v.IsNil() {
+ e.emitNil()
+ return
+ }
+
+ s := v.Bytes()
+
+ dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
+
+ base64.StdEncoding.Encode(dst, s)
+ e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitString(tag string, v reflect.Value) {
+ var style yaml_scalar_style_t
+ s := v.String()
+
+ if nonPrintable.MatchString(s) {
+ e.emitBase64(tag, v)
+ return
+ }
+
+ if v.Type() == numberType {
+ style = yaml_PLAIN_SCALAR_STYLE
+ } else {
+ event := yaml_event_t{
+ implicit: true,
+ value: []byte(s),
+ }
+
+ rtag, _ := resolveInterface(event, false)
+ if tag == "" && rtag != yaml_STR_TAG {
+ style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ } else if multiline.MatchString(s) {
+ style = yaml_LITERAL_SCALAR_STYLE
+ } else {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+ }
+
+ e.emitScalar(s, "", tag, style)
+}
+
+func (e *Encoder) emitBool(tag string, v reflect.Value) {
+ s := strconv.FormatBool(v.Bool())
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitInt(tag string, v reflect.Value) {
+ s := strconv.FormatInt(v.Int(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitUint(tag string, v reflect.Value) {
+ s := strconv.FormatUint(v.Uint(), 10)
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitFloat(tag string, v reflect.Value) {
+ f := v.Float()
+
+ var s string
+ switch {
+ case math.IsNaN(f):
+ s = ".nan"
+ case math.IsInf(f, 1):
+ s = "+.inf"
+ case math.IsInf(f, -1):
+ s = "-.inf"
+ default:
+ s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
+ }
+
+ e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitNil() {
+ e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
+}
+
+func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
+ implicit := tag == ""
+ if !implicit {
+ style = yaml_PLAIN_SCALAR_STYLE
+ }
+
+ stag := shortTags[tag]
+ if stag == "" {
+ stag = tag
+ }
+
+ yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
+ e.emit()
+}
+
+func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ e.emitNil()
+ return
+ }
+
+ m := v.Interface().(Marshaler)
+ if m == nil {
+ e.emitNil()
+ return
+ }
+ t, val, err := m.MarshalYAML()
+ if err != nil {
+ panic(err)
+ }
+ if val == nil {
+ e.emitNil()
+ return
+ }
+
+ e.marshal(t, reflect.ValueOf(val), false)
+}
+
+func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
+ if !v.CanAddr() {
+ e.marshal(tag, v, false)
+ return
+ }
+
+ va := v.Addr()
+ if va.IsNil() {
+ e.emitNil()
+ return
+ }
+
+ m := v.Interface().(Marshaler)
+ t, val, err := m.MarshalYAML()
+ if err != nil {
+ panic(err)
+ }
+
+ if val == nil {
+ e.emitNil()
+ return
+ }
+
+ e.marshal(t, reflect.ValueOf(val), false)
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE b/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
new file mode 100644
index 00000000..050ced23
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/libyaml-LICENSE
@@ -0,0 +1,19 @@
+Copyright (c) 2006 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
new file mode 100644
index 00000000..8d38e306
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/parser.go
@@ -0,0 +1,1230 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+)
+
+/*
+ * The parser implements the following grammar:
+ *
+ * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ * implicit_document ::= block_node DOCUMENT-END*
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ * block_node_or_indentless_sequence ::=
+ * ALIAS
+ * | properties (block_content | indentless_block_sequence)?
+ * | block_content
+ * | indentless_block_sequence
+ * block_node ::= ALIAS
+ * | properties block_content?
+ * | block_content
+ * flow_node ::= ALIAS
+ * | properties flow_content?
+ * | flow_content
+ * properties ::= TAG ANCHOR? | ANCHOR TAG?
+ * block_content ::= block_collection | flow_collection | SCALAR
+ * flow_content ::= flow_collection | SCALAR
+ * block_collection ::= block_sequence | block_mapping
+ * flow_collection ::= flow_sequence | flow_mapping
+ * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+ * indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+ * block_mapping ::= BLOCK-MAPPING_START
+ * ((KEY block_node_or_indentless_sequence?)?
+ * (VALUE block_node_or_indentless_sequence?)?)*
+ * BLOCK-END
+ * flow_sequence ::= FLOW-SEQUENCE-START
+ * (flow_sequence_entry FLOW-ENTRY)*
+ * flow_sequence_entry?
+ * FLOW-SEQUENCE-END
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * flow_mapping ::= FLOW-MAPPING-START
+ * (flow_mapping_entry FLOW-ENTRY)*
+ * flow_mapping_entry?
+ * FLOW-MAPPING-END
+ * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ */
+
+/*
+ * Peek the next token in the token queue.
+ */
+func peek_token(parser *yaml_parser_t) *yaml_token_t {
+ if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
+ return &parser.tokens[parser.tokens_head]
+ }
+ return nil
+}
+
+/*
+ * Remove the next token from the queue (must be called after peek_token).
+ */
+func skip_token(parser *yaml_parser_t) {
+ parser.token_available = false
+ parser.tokens_parsed++
+ parser.stream_end_produced = parser.tokens[parser.tokens_head].token_type == yaml_STREAM_END_TOKEN
+ parser.tokens_head++
+}
+
+/*
+ * Get the next event.
+ */
+
+func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
+ /* Erase the event object. */
+ *event = yaml_event_t{}
+
+ /* No events after the end of the stream or error. */
+
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR ||
+ parser.state == yaml_PARSE_END_STATE {
+ return true
+ }
+
+ /* Generate the next event. */
+
+ return yaml_parser_state_machine(parser, event)
+}
+
+/*
+ * Set parser error.
+ */
+
+func yaml_parser_set_parser_error(parser *yaml_parser_t,
+ problem string, problem_mark YAML_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+
+ return false
+}
+
+func yaml_parser_set_parser_error_context(parser *yaml_parser_t,
+ context string, context_mark YAML_mark_t,
+ problem string, problem_mark YAML_mark_t) bool {
+ parser.error = yaml_PARSER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = problem_mark
+
+ return false
+}
+
+/*
+ * State dispatcher.
+ */
+
+func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
+ switch parser.state {
+ case yaml_PARSE_STREAM_START_STATE:
+ return yaml_parser_parse_stream_start(parser, event)
+
+ case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, true)
+
+ case yaml_PARSE_DOCUMENT_START_STATE:
+ return yaml_parser_parse_document_start(parser, event, false)
+
+ case yaml_PARSE_DOCUMENT_CONTENT_STATE:
+ return yaml_parser_parse_document_content(parser, event)
+
+ case yaml_PARSE_DOCUMENT_END_STATE:
+ return yaml_parser_parse_document_end(parser, event)
+
+ case yaml_PARSE_BLOCK_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, true, false)
+
+ case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
+ return yaml_parser_parse_node(parser, event, true, true)
+
+ case yaml_PARSE_FLOW_NODE_STATE:
+ return yaml_parser_parse_node(parser, event, false, false)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_block_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_indentless_sequence_entry(parser, event)
+
+ case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, true)
+
+ case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
+ return yaml_parser_parse_block_mapping_key(parser, event, false)
+
+ case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_block_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, true)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
+ return yaml_parser_parse_flow_sequence_entry(parser, event, false)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
+
+ case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
+ return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
+
+ case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, true)
+
+ case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
+ return yaml_parser_parse_flow_mapping_key(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, false)
+
+ case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
+ return yaml_parser_parse_flow_mapping_value(parser, event, true)
+ }
+
+ panic("invalid parser state")
+}
+
+/*
+ * Parse the production:
+ * stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ * ************
+ */
+
+func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type != yaml_STREAM_START_TOKEN {
+ return yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ }
+
+ parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ event_type: yaml_STREAM_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ encoding: token.encoding,
+ }
+ skip_token(parser)
+
+ return true
+}
+
+/*
+ * Parse the productions:
+ * implicit_document ::= block_node DOCUMENT-END*
+ * *
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ * *************************
+ */
+
+func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t,
+ implicit bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ /* Parse extra document end indicators. */
+
+ if !implicit {
+ for token.token_type == yaml_DOCUMENT_END_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ }
+
+ /* Parse an implicit document. */
+
+ if implicit && token.token_type != yaml_VERSION_DIRECTIVE_TOKEN &&
+ token.token_type != yaml_TAG_DIRECTIVE_TOKEN &&
+ token.token_type != yaml_DOCUMENT_START_TOKEN &&
+ token.token_type != yaml_STREAM_END_TOKEN {
+ if !yaml_parser_process_directives(parser, nil, nil) {
+ return false
+ }
+
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_BLOCK_NODE_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_DOCUMENT_START_EVENT,
+ implicit: true,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ } else if token.token_type != yaml_STREAM_END_TOKEN {
+ /* Parse an explicit document. */
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ start_mark := token.start_mark
+ if !yaml_parser_process_directives(parser, &version_directive,
+ &tag_directives) {
+ return false
+ }
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_DOCUMENT_START_TOKEN {
+ yaml_parser_set_parser_error(parser,
+ "did not find expected ", token.start_mark)
+ return false
+ }
+
+ parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
+ parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
+
+ end_mark := token.end_mark
+
+ *event = yaml_event_t{
+ event_type: yaml_DOCUMENT_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ version_directive: version_directive,
+ tag_directives: tag_directives,
+ implicit: false,
+ }
+ skip_token(parser)
+ } else {
+ /* Parse the stream end. */
+ parser.state = yaml_PARSE_END_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_STREAM_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ }
+ return true
+}
+
+/*
+ * Parse the productions:
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ * ***********
+ */
+
+func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.token_type == yaml_TAG_DIRECTIVE_TOKEN ||
+ token.token_type == yaml_DOCUMENT_START_TOKEN ||
+ token.token_type == yaml_DOCUMENT_END_TOKEN ||
+ token.token_type == yaml_STREAM_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ } else {
+ return yaml_parser_parse_node(parser, event, true, false)
+ }
+}
+
+/*
+ * Parse the productions:
+ * implicit_document ::= block_node DOCUMENT-END*
+ * *************
+ * explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+ * *************
+ */
+
+func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
+ implicit := true
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ start_mark, end_mark := token.start_mark, token.start_mark
+
+ if token.token_type == yaml_DOCUMENT_END_TOKEN {
+ end_mark = token.end_mark
+ skip_token(parser)
+ implicit = false
+ }
+
+ parser.tag_directives = parser.tag_directives[:0]
+
+ parser.state = yaml_PARSE_DOCUMENT_START_STATE
+ *event = yaml_event_t{
+ event_type: yaml_DOCUMENT_END_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ implicit: implicit,
+ }
+
+ return true
+}
+
+/*
+ * Parse the productions:
+ * block_node_or_indentless_sequence ::=
+ * ALIAS
+ * *****
+ * | properties (block_content | indentless_block_sequence)?
+ * ********** *
+ * | block_content | indentless_block_sequence
+ * *
+ * block_node ::= ALIAS
+ * *****
+ * | properties block_content?
+ * ********** *
+ * | block_content
+ * *
+ * flow_node ::= ALIAS
+ * *****
+ * | properties flow_content?
+ * ********** *
+ * | flow_content
+ * *
+ * properties ::= TAG ANCHOR? | ANCHOR TAG?
+ * *************************
+ * block_content ::= block_collection | flow_collection | SCALAR
+ * ******
+ * flow_content ::= flow_collection | SCALAR
+ * ******
+ */
+
+func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t,
+ block bool, indentless_sequence bool) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_ALIAS_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_ALIAS_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ anchor: token.value,
+ }
+ skip_token(parser)
+ return true
+ } else {
+ start_mark, end_mark := token.start_mark, token.start_mark
+
+ var tag_handle []byte
+ var tag_suffix, anchor []byte
+ var tag_mark YAML_mark_t
+ if token.token_type == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type == yaml_TAG_TOKEN {
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+ } else if token.token_type == yaml_TAG_TOKEN {
+ tag_handle = token.value
+ tag_suffix = token.suffix
+ start_mark, tag_mark = token.start_mark, token.start_mark
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type == yaml_ANCHOR_TOKEN {
+ anchor = token.value
+ end_mark = token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ }
+ }
+
+ var tag []byte
+ if tag_handle != nil {
+ if len(tag_handle) == 0 {
+ tag = tag_suffix
+ tag_handle = nil
+ tag_suffix = nil
+ } else {
+ for i := range parser.tag_directives {
+ tag_directive := &parser.tag_directives[i]
+ if bytes.Equal(tag_directive.handle, tag_handle) {
+ tag = append([]byte(nil), tag_directive.prefix...)
+ tag = append(tag, tag_suffix...)
+ tag_handle = nil
+ tag_suffix = nil
+ break
+ }
+ }
+ if len(tag) == 0 {
+ yaml_parser_set_parser_error_context(parser,
+ "while parsing a node", start_mark,
+ "found undefined tag handle", tag_mark)
+ return false
+ }
+ }
+ }
+
+ implicit := len(tag) == 0
+ if indentless_sequence && token.token_type == yaml_BLOCK_ENTRY_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+
+ return true
+ } else {
+ if token.token_type == yaml_SCALAR_TOKEN {
+ plain_implicit := false
+ quoted_implicit := false
+ end_mark = token.end_mark
+ if (token.style == yaml_PLAIN_SCALAR_STYLE && len(tag) == 0) ||
+ (len(tag) == 1 && tag[0] == '!') {
+ plain_implicit = true
+ } else if len(tag) == 0 {
+ quoted_implicit = true
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ value: token.value,
+ implicit: plain_implicit,
+ quoted_implicit: quoted_implicit,
+ style: yaml_style_t(token.style),
+ }
+
+ skip_token(parser)
+ return true
+ } else if token.token_type == yaml_FLOW_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
+ }
+
+ return true
+ } else if token.token_type == yaml_FLOW_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+
+ return true
+ } else if block && token.token_type == yaml_BLOCK_SEQUENCE_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
+ }
+
+ return true
+ } else if block && token.token_type == yaml_BLOCK_MAPPING_START_TOKEN {
+ end_mark = token.end_mark
+ parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_START_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
+ }
+ return true
+ } else if len(anchor) > 0 || len(tag) > 0 {
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_SCALAR_EVENT,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ anchor: anchor,
+ tag: tag,
+ implicit: implicit,
+ quoted_implicit: false,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+ return true
+ } else {
+ msg := "while parsing a block node"
+ if !block {
+ msg = "while parsing a flow node"
+ }
+ yaml_parser_set_parser_error_context(parser, msg, start_mark,
+ "did not find expected node content", token.start_mark)
+ return false
+ }
+ }
+ }
+
+ return false
+}
+
+/*
+ * Parse the productions:
+ * block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+ * ******************** *********** * *********
+ */
+
+func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t,
+ event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_BLOCK_ENTRY_TOKEN &&
+ token.token_type != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.token_type == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+ } else {
+ mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block collection", mark,
+ "did not find expected '-' indicator", token.start_mark)
+ }
+}
+
+/*
+ * Parse the productions:
+ * indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+ * *********** *
+ */
+
+func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t,
+ event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_BLOCK_ENTRY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_BLOCK_ENTRY_TOKEN &&
+ token.token_type != yaml_KEY_TOKEN &&
+ token.token_type != yaml_VALUE_TOKEN &&
+ token.token_type != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, true, false)
+ } else {
+ parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark,
+ }
+ return true
+ }
+}
+
+/*
+ * Parse the productions:
+ * block_mapping ::= BLOCK-MAPPING_START
+ * *******************
+ * ((KEY block_node_or_indentless_sequence?)?
+ * *** *
+ * (VALUE block_node_or_indentless_sequence?)?)*
+ *
+ * BLOCK-END
+ * *********
+ */
+
+func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t,
+ event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_KEY_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_KEY_TOKEN &&
+ token.token_type != yaml_VALUE_TOKEN &&
+ token.token_type != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else if token.token_type == yaml_BLOCK_END_TOKEN {
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+ skip_token(parser)
+ return true
+ } else {
+ mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a block mapping", mark,
+ "did not find expected key", token.start_mark)
+ }
+}
+
+/*
+ * Parse the productions:
+ * block_mapping ::= BLOCK-MAPPING_START
+ *
+ * ((KEY block_node_or_indentless_sequence?)?
+ *
+ * (VALUE block_node_or_indentless_sequence?)?)*
+ * ***** *
+ * BLOCK-END
+ *
+ */
+
+func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t,
+ event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_VALUE_TOKEN {
+ mark := token.end_mark
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_KEY_TOKEN &&
+ token.token_type != yaml_VALUE_TOKEN &&
+ token.token_type != yaml_BLOCK_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, true, true)
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+ } else {
+ parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+ }
+}
+
+/*
+ * Parse the productions:
+ * flow_sequence ::= FLOW-SEQUENCE-START
+ * *******************
+ * (flow_sequence_entry FLOW-ENTRY)*
+ * * **********
+ * flow_sequence_entry?
+ * *
+ * FLOW-SEQUENCE-END
+ * *****************
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * *
+ */
+
+func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t,
+ event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
+ if !first {
+ if token.token_type == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow sequence", mark,
+ "did not find expected ',' or ']'", token.start_mark)
+ }
+ }
+
+ if token.token_type == yaml_KEY_TOKEN {
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_START_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ implicit: true,
+ style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
+ }
+
+ skip_token(parser)
+ return true
+ } else if token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ *event = yaml_event_t{
+ event_type: yaml_SEQUENCE_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+/*
+ * Parse the productions:
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * *** *
+ */
+
+func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t,
+ event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type != yaml_VALUE_TOKEN &&
+ token.token_type != yaml_FLOW_ENTRY_TOKEN &&
+ token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ mark := token.end_mark
+ skip_token(parser)
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event, mark)
+ }
+}
+
+/*
+ * Parse the productions:
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * ***** *
+ */
+
+func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t,
+ event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_FLOW_ENTRY_TOKEN &&
+ token.token_type != yaml_FLOW_SEQUENCE_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+/*
+ * Parse the productions:
+ * flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * *
+ */
+
+func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t,
+ event *yaml_event_t) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.start_mark,
+ }
+
+ return true
+}
+
+/*
+ * Parse the productions:
+ * flow_mapping ::= FLOW-MAPPING-START
+ * ******************
+ * (flow_mapping_entry FLOW-ENTRY)*
+ * * **********
+ * flow_mapping_entry?
+ * ******************
+ * FLOW-MAPPING-END
+ * ****************
+ * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * * *** *
+ */
+
+func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t,
+ event *yaml_event_t, first bool) bool {
+ if first {
+ token := peek_token(parser)
+ parser.marks = append(parser.marks, token.start_mark)
+ skip_token(parser)
+ }
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
+ if !first {
+ if token.token_type == yaml_FLOW_ENTRY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ } else {
+ mark := parser.marks[len(parser.marks)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+
+ return yaml_parser_set_parser_error_context(parser,
+ "while parsing a flow mapping", mark,
+ "did not find expected ',' or '}'", token.start_mark)
+ }
+ }
+
+ if token.token_type == yaml_KEY_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_VALUE_TOKEN &&
+ token.token_type != yaml_FLOW_ENTRY_TOKEN &&
+ token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ } else {
+ parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+ } else if token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = parser.states[len(parser.states)-1]
+ parser.states = parser.states[:len(parser.states)-1]
+ parser.marks = parser.marks[:len(parser.marks)-1]
+ *event = yaml_event_t{
+ event_type: yaml_MAPPING_END_EVENT,
+ start_mark: token.start_mark,
+ end_mark: token.end_mark,
+ }
+
+ skip_token(parser)
+ return true
+}
+
+/*
+ * Parse the productions:
+ * flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ * * ***** *
+ */
+
+func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t,
+ event *yaml_event_t, empty bool) bool {
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ if empty {
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event,
+ token.start_mark)
+ }
+
+ if token.token_type == yaml_VALUE_TOKEN {
+ skip_token(parser)
+ token = peek_token(parser)
+ if token == nil {
+ return false
+ }
+ if token.token_type != yaml_FLOW_ENTRY_TOKEN &&
+ token.token_type != yaml_FLOW_MAPPING_END_TOKEN {
+ parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
+ return yaml_parser_parse_node(parser, event, false, false)
+ }
+ }
+
+ parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
+}
+
+/*
+ * Generate an empty scalar event.
+ */
+
+func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t,
+ mark YAML_mark_t) bool {
+ *event = yaml_event_t{
+ event_type: yaml_SCALAR_EVENT,
+ start_mark: mark,
+ end_mark: mark,
+ value: nil,
+ implicit: true,
+ style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
+ }
+
+ return true
+}
+
+/*
+ * Parse directives.
+ */
+
+func yaml_parser_process_directives(parser *yaml_parser_t,
+ version_directive_ref **yaml_version_directive_t,
+ tag_directives_ref *[]yaml_tag_directive_t) bool {
+
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+
+ var version_directive *yaml_version_directive_t
+ var tag_directives []yaml_tag_directive_t
+
+ for token.token_type == yaml_VERSION_DIRECTIVE_TOKEN ||
+ token.token_type == yaml_TAG_DIRECTIVE_TOKEN {
+ if token.token_type == yaml_VERSION_DIRECTIVE_TOKEN {
+ if version_directive != nil {
+ yaml_parser_set_parser_error(parser,
+ "found duplicate %YAML directive", token.start_mark)
+ return false
+ }
+ if token.major != 1 ||
+ token.minor != 1 {
+ yaml_parser_set_parser_error(parser,
+ "found incompatible YAML document", token.start_mark)
+ return false
+ }
+ version_directive = &yaml_version_directive_t{
+ major: token.major,
+ minor: token.minor,
+ }
+ } else if token.token_type == yaml_TAG_DIRECTIVE_TOKEN {
+ value := yaml_tag_directive_t{
+ handle: token.value,
+ prefix: token.prefix,
+ }
+
+ if !yaml_parser_append_tag_directive(parser, value, false,
+ token.start_mark) {
+ return false
+ }
+ tag_directives = append(tag_directives, value)
+ }
+
+ skip_token(parser)
+ token := peek_token(parser)
+ if token == nil {
+ return false
+ }
+ }
+
+ for i := range default_tag_directives {
+ if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
+ return false
+ }
+ }
+
+ if version_directive_ref != nil {
+ *version_directive_ref = version_directive
+ }
+ if tag_directives_ref != nil {
+ *tag_directives_ref = tag_directives
+ }
+
+ return true
+}
+
+/*
+ * Append a tag directive to the directives stack.
+ */
+
+func yaml_parser_append_tag_directive(parser *yaml_parser_t,
+ value yaml_tag_directive_t, allow_duplicates bool, mark YAML_mark_t) bool {
+ for i := range parser.tag_directives {
+ tag := &parser.tag_directives[i]
+ if bytes.Equal(value.handle, tag.handle) {
+ if allow_duplicates {
+ return true
+ }
+ return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
+ }
+ }
+
+ parser.tag_directives = append(parser.tag_directives, value)
+ return true
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
new file mode 100644
index 00000000..5631da2d
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/reader.go
@@ -0,0 +1,465 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "io"
+)
+
+/*
+ * Set the reader error and return 0.
+ */
+
+func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
+ offset int, value int) bool {
+ parser.error = yaml_READER_ERROR
+ parser.problem = problem
+ parser.problem_offset = offset
+ parser.problem_value = value
+
+ return false
+}
+
+/*
+ * Byte order marks.
+ */
+const (
+ BOM_UTF8 = "\xef\xbb\xbf"
+ BOM_UTF16LE = "\xff\xfe"
+ BOM_UTF16BE = "\xfe\xff"
+)
+
+/*
+ * Determine the input stream encoding by checking the BOM symbol. If no BOM is
+ * found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
+ */
+
+func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
+ /* Ensure that we had enough bytes in the raw buffer. */
+ for !parser.eof &&
+ len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
+ if !yaml_parser_update_raw_buffer(parser) {
+ return false
+ }
+ }
+
+ /* Determine the encoding. */
+ raw := parser.raw_buffer
+ pos := parser.raw_buffer_pos
+ remaining := len(raw) - pos
+ if remaining >= 2 &&
+ raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
+ parser.encoding = yaml_UTF16LE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if remaining >= 2 &&
+ raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
+ parser.encoding = yaml_UTF16BE_ENCODING
+ parser.raw_buffer_pos += 2
+ parser.offset += 2
+ } else if remaining >= 3 &&
+ raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
+ parser.encoding = yaml_UTF8_ENCODING
+ parser.raw_buffer_pos += 3
+ parser.offset += 3
+ } else {
+ parser.encoding = yaml_UTF8_ENCODING
+ }
+
+ return true
+}
+
+/*
+ * Update the raw buffer.
+ */
+
+func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
+ size_read := 0
+
+ /* Return if the raw buffer is full. */
+ if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
+ return true
+ }
+
+ /* Return on EOF. */
+
+ if parser.eof {
+ return true
+ }
+
+ /* Move the remaining bytes in the raw buffer to the beginning. */
+ if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
+ copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
+ }
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
+ parser.raw_buffer_pos = 0
+
+ /* Call the read handler to fill the buffer. */
+ size_read, err := parser.read_handler(parser,
+ parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
+ parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
+
+ if err == io.EOF {
+ parser.eof = true
+ } else if err != nil {
+ return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
+ parser.offset, -1)
+ }
+
+ return true
+}
+
+/*
+ * Ensure that the buffer contains at least `length` characters.
+ * Return 1 on success, 0 on failure.
+ *
+ * The length is supposed to be significantly less that the buffer size.
+ */
+
+func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
+ /* Read handler must be set. */
+ if parser.read_handler == nil {
+ panic("read handler must be set")
+ }
+
+ /* If the EOF flag is set and the raw buffer is empty, do nothing. */
+
+ if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
+ return true
+ }
+
+ /* Return if the buffer contains enough characters. */
+
+ if parser.unread >= length {
+ return true
+ }
+
+ /* Determine the input encoding if it is not known yet. */
+
+ if parser.encoding == yaml_ANY_ENCODING {
+ if !yaml_parser_determine_encoding(parser) {
+ return false
+ }
+ }
+
+ /* Move the unread characters to the beginning of the buffer. */
+ buffer_end := len(parser.buffer)
+ if 0 < parser.buffer_pos &&
+ parser.buffer_pos < buffer_end {
+ copy(parser.buffer, parser.buffer[parser.buffer_pos:])
+ buffer_end -= parser.buffer_pos
+ parser.buffer_pos = 0
+ } else if parser.buffer_pos == buffer_end {
+ buffer_end = 0
+ parser.buffer_pos = 0
+ }
+
+ parser.buffer = parser.buffer[:cap(parser.buffer)]
+
+ /* Fill the buffer until it has enough characters. */
+ first := true
+ for parser.unread < length {
+ /* Fill the raw buffer if necessary. */
+
+ if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
+ if !yaml_parser_update_raw_buffer(parser) {
+ parser.buffer = parser.buffer[:buffer_end]
+ return false
+ }
+ }
+ first = false
+
+ /* Decode the raw buffer. */
+ for parser.raw_buffer_pos != len(parser.raw_buffer) {
+ var value rune
+ var w int
+
+ raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
+ incomplete := false
+
+ /* Decode the next character. */
+
+ switch parser.encoding {
+ case yaml_UTF8_ENCODING:
+
+ /*
+ * Decode a UTF-8 character. Check RFC 3629
+ * (http://www.ietf.org/rfc/rfc3629.txt) for more details.
+ *
+ * The following table (taken from the RFC) is used for
+ * decoding.
+ *
+ * Char. number range | UTF-8 octet sequence
+ * (hexadecimal) | (binary)
+ * --------------------+------------------------------------
+ * 0000 0000-0000 007F | 0xxxxxxx
+ * 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
+ * 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
+ * 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+ *
+ * Additionally, the characters in the range 0xD800-0xDFFF
+ * are prohibited as they are reserved for use with UTF-16
+ * surrogate pairs.
+ */
+
+ /* Determine the length of the UTF-8 sequence. */
+
+ octet := parser.raw_buffer[parser.raw_buffer_pos]
+ w = width(octet)
+
+ /* Check if the leading octet is valid. */
+
+ if w == 0 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid leading UTF-8 octet",
+ parser.offset, int(octet))
+ }
+
+ /* Check if the raw buffer contains an incomplete character. */
+
+ if w > raw_unread {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-8 octet sequence",
+ parser.offset, -1)
+ }
+ incomplete = true
+ break
+ }
+
+ /* Decode the leading octet. */
+ switch {
+ case octet&0x80 == 0x00:
+ value = rune(octet & 0x7F)
+ case octet&0xE0 == 0xC0:
+ value = rune(octet & 0x1F)
+ case octet&0xF0 == 0xE0:
+ value = rune(octet & 0x0F)
+ case octet&0xF8 == 0xF0:
+ value = rune(octet & 0x07)
+ default:
+ value = 0
+ }
+
+ /* Check and decode the trailing octets. */
+
+ for k := 1; k < w; k++ {
+ octet = parser.raw_buffer[parser.raw_buffer_pos+k]
+
+ /* Check if the octet is valid. */
+
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_reader_error(parser,
+ "invalid trailing UTF-8 octet",
+ parser.offset+k, int(octet))
+ }
+
+ /* Decode the octet. */
+
+ value = (value << 6) + rune(octet&0x3F)
+ }
+
+ /* Check the length of the sequence against the value. */
+ switch {
+ case w == 1:
+ case w == 2 && value >= 0x80:
+ case w == 3 && value >= 0x800:
+ case w == 4 && value >= 0x10000:
+ default:
+ return yaml_parser_set_reader_error(parser,
+ "invalid length of a UTF-8 sequence",
+ parser.offset, -1)
+ }
+
+ /* Check the range of the value. */
+
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ return yaml_parser_set_reader_error(parser,
+ "invalid Unicode character",
+ parser.offset, int(value))
+ }
+ case yaml_UTF16LE_ENCODING,
+ yaml_UTF16BE_ENCODING:
+
+ var low, high int
+ if parser.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ /*
+ * The UTF-16 encoding is not as simple as one might
+ * naively think. Check RFC 2781
+ * (http://www.ietf.org/rfc/rfc2781.txt).
+ *
+ * Normally, two subsequent bytes describe a Unicode
+ * character. However a special technique (called a
+ * surrogate pair) is used for specifying character
+ * values larger than 0xFFFF.
+ *
+ * A surrogate pair consists of two pseudo-characters:
+ * high surrogate area (0xD800-0xDBFF)
+ * low surrogate area (0xDC00-0xDFFF)
+ *
+ * The following formulas are used for decoding
+ * and encoding characters using surrogate pairs:
+ *
+ * U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
+ * U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
+ * W1 = 110110yyyyyyyyyy
+ * W2 = 110111xxxxxxxxxx
+ *
+ * where U is the character value, W1 is the high surrogate
+ * area, W2 is the low surrogate area.
+ */
+
+ /* Check for incomplete UTF-16 character. */
+
+ if raw_unread < 2 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 character",
+ parser.offset, -1)
+ }
+ incomplete = true
+ break
+ }
+
+ /* Get the character. */
+ value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
+
+ /* Check for unexpected low surrogate area. */
+
+ if (value & 0xFC00) == 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "unexpected low surrogate area",
+ parser.offset, int(value))
+ }
+
+ /* Check for a high surrogate area. */
+
+ if (value & 0xFC00) == 0xD800 {
+
+ w = 4
+
+ /* Check for incomplete surrogate pair. */
+
+ if raw_unread < 4 {
+ if parser.eof {
+ return yaml_parser_set_reader_error(parser,
+ "incomplete UTF-16 surrogate pair",
+ parser.offset, -1)
+ }
+ incomplete = true
+ break
+ }
+
+ /* Get the next character. */
+
+ value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
+ (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
+
+ /* Check for a low surrogate area. */
+
+ if (value2 & 0xFC00) != 0xDC00 {
+ return yaml_parser_set_reader_error(parser,
+ "expected low surrogate area",
+ parser.offset+2, int(value2))
+ }
+
+ /* Generate the value of the surrogate pair. */
+
+ value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
+ } else {
+ w = 2
+ }
+
+ break
+
+ default:
+ panic("Impossible") /* Impossible. */
+ }
+
+ /* Check if the raw buffer contains enough bytes to form a character. */
+
+ if incomplete {
+ break
+ }
+
+ /*
+ * Check if the character is in the allowed range:
+ * #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
+ * | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
+ * | [#x10000-#x10FFFF] (32 bit)
+ */
+
+ if !(value == 0x09 || value == 0x0A || value == 0x0D ||
+ (value >= 0x20 && value <= 0x7E) ||
+ (value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
+ (value >= 0xE000 && value <= 0xFFFD) ||
+ (value >= 0x10000 && value <= 0x10FFFF)) {
+ return yaml_parser_set_reader_error(parser,
+ "control characters are not allowed",
+ parser.offset, int(value))
+ }
+
+ /* Move the raw pointers. */
+
+ parser.raw_buffer_pos += w
+ parser.offset += w
+
+ /* Finally put the character into the buffer. */
+
+ /* 0000 0000-0000 007F . 0xxxxxxx */
+ if value <= 0x7F {
+ parser.buffer[buffer_end] = byte(value)
+ } else if value <= 0x7FF {
+ /* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
+ parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
+ parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
+ } else if value <= 0xFFFF {
+ /* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
+ parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
+ parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
+ } else {
+ /* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
+ parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
+ parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
+ parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
+ }
+
+ buffer_end += w
+ parser.unread++
+ }
+
+ /* On EOF, put NUL into the buffer and return. */
+
+ if parser.eof {
+ parser.buffer[buffer_end] = 0
+ buffer_end++
+ parser.buffer = parser.buffer[:buffer_end]
+ parser.unread++
+ return true
+ }
+
+ }
+
+ parser.buffer = parser.buffer[:buffer_end]
+ return true
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
new file mode 100644
index 00000000..fb9e8be8
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/resolver.go
@@ -0,0 +1,449 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+ "encoding/base64"
+ "fmt"
+ "math"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+)
+
+var byteSliceType = reflect.TypeOf([]byte(nil))
+
+var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
+var bool_values map[string]bool
+var null_values map[string]bool
+
+var signs = []byte{'-', '+'}
+var nulls = []byte{'~', 'n', 'N'}
+var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
+
+var timestamp_regexp *regexp.Regexp
+var ymd_regexp *regexp.Regexp
+
+func init() {
+ bool_values = make(map[string]bool)
+ bool_values["y"] = true
+ bool_values["yes"] = true
+ bool_values["n"] = false
+ bool_values["no"] = false
+ bool_values["true"] = true
+ bool_values["false"] = false
+ bool_values["on"] = true
+ bool_values["off"] = false
+
+ null_values = make(map[string]bool)
+ null_values["~"] = true
+ null_values["null"] = true
+ null_values["Null"] = true
+ null_values["NULL"] = true
+
+ timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
+ ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
+}
+
+func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
+ val := string(event.value)
+
+ if null_values[val] {
+ v.Set(reflect.Zero(v.Type()))
+ return yaml_NULL_TAG, nil
+ }
+
+ switch v.Kind() {
+ case reflect.String:
+ if useNumber && v.Type() == numberType {
+ tag, i := resolveInterface(event, useNumber)
+ if n, ok := i.(Number); ok {
+ v.Set(reflect.ValueOf(n))
+ return tag, nil
+ }
+ return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
+ }
+
+ return resolve_string(val, v, event)
+ case reflect.Bool:
+ return resolve_bool(val, v, event)
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return resolve_int(val, v, useNumber, event)
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+ return resolve_uint(val, v, useNumber, event)
+ case reflect.Float32, reflect.Float64:
+ return resolve_float(val, v, useNumber, event)
+ case reflect.Interface:
+ _, i := resolveInterface(event, useNumber)
+ if i != nil {
+ v.Set(reflect.ValueOf(i))
+ } else {
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ case reflect.Struct:
+ return resolve_time(val, v, event)
+ case reflect.Slice:
+ if v.Type() != byteSliceType {
+ return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
+ }
+ b, err := decode_binary(event.value, event)
+ if err != nil {
+ return "", err
+ }
+
+ v.Set(reflect.ValueOf(b))
+ default:
+ return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
+ }
+
+ return yaml_STR_TAG, nil
+}
+
+func hasBinaryTag(event yaml_event_t) bool {
+ for _, tag := range binary_tags {
+ if bytes.Equal(event.tag, tag) {
+ return true
+ }
+ }
+ return false
+}
+
+func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
+ b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
+ n, err := base64.StdEncoding.Decode(b, value)
+ if err != nil {
+ return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
+ }
+ return b[:n], nil
+}
+
+func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
+ if len(event.tag) > 0 {
+ if hasBinaryTag(event) {
+ b, err := decode_binary(event.value, event)
+ if err != nil {
+ return "", err
+ }
+ val = string(b)
+ }
+ }
+ v.SetString(val)
+ return yaml_STR_TAG, nil
+}
+
+func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
+ b, found := bool_values[strings.ToLower(val)]
+ if !found {
+ return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
+ }
+
+ v.SetBool(b)
+ return yaml_BOOL_TAG, nil
+}
+
+func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
+ original := val
+ val = strings.Replace(val, "_", "", -1)
+ var value uint64
+
+ isNumberValue := v.Type() == numberType
+
+ sign := int64(1)
+ if val[0] == '-' {
+ sign = -1
+ val = val[1:]
+ } else if val[0] == '+' {
+ val = val[1:]
+ }
+
+ base := 0
+ if val == "0" {
+ if isNumberValue {
+ v.SetString("0")
+ } else {
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ return yaml_INT_TAG, nil
+ }
+
+ if strings.HasPrefix(val, "0o") {
+ base = 8
+ val = val[2:]
+ }
+
+ value, err := strconv.ParseUint(val, base, 64)
+ if err != nil {
+ return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
+ }
+
+ var val64 int64
+ if value <= math.MaxInt64 {
+ val64 = int64(value)
+ if sign == -1 {
+ val64 = -val64
+ }
+ } else if sign == -1 && value == uint64(math.MaxInt64)+1 {
+ val64 = math.MinInt64
+ } else {
+ return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
+ }
+
+ if isNumberValue {
+ v.SetString(strconv.FormatInt(val64, 10))
+ } else {
+ if v.OverflowInt(val64) {
+ return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
+ }
+ v.SetInt(val64)
+ }
+
+ return yaml_INT_TAG, nil
+}
+
+func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
+ original := val
+ val = strings.Replace(val, "_", "", -1)
+ var value uint64
+
+ isNumberValue := v.Type() == numberType
+
+ if val[0] == '-' {
+ return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
+ }
+
+ if val[0] == '+' {
+ val = val[1:]
+ }
+
+ base := 0
+ if val == "0" {
+ if isNumberValue {
+ v.SetString("0")
+ } else {
+ v.Set(reflect.Zero(v.Type()))
+ }
+
+ return yaml_INT_TAG, nil
+ }
+
+ if strings.HasPrefix(val, "0o") {
+ base = 8
+ val = val[2:]
+ }
+
+ value, err := strconv.ParseUint(val, base, 64)
+ if err != nil {
+ return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
+ }
+
+ if isNumberValue {
+ v.SetString(strconv.FormatUint(value, 10))
+ } else {
+ if v.OverflowUint(value) {
+ return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
+ }
+
+ v.SetUint(value)
+ }
+
+ return yaml_INT_TAG, nil
+}
+
+func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
+ val = strings.Replace(val, "_", "", -1)
+ var value float64
+
+ isNumberValue := v.Type() == numberType
+ typeBits := 64
+ if !isNumberValue {
+ typeBits = v.Type().Bits()
+ }
+
+ sign := 1
+ if val[0] == '-' {
+ sign = -1
+ val = val[1:]
+ } else if val[0] == '+' {
+ val = val[1:]
+ }
+
+ valLower := strings.ToLower(val)
+ if valLower == ".inf" {
+ value = math.Inf(sign)
+ } else if valLower == ".nan" {
+ value = math.NaN()
+ } else {
+ var err error
+ value, err = strconv.ParseFloat(val, typeBits)
+ value *= float64(sign)
+
+ if err != nil {
+ return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
+ }
+ }
+
+ if isNumberValue {
+ v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
+ } else {
+ if v.OverflowFloat(value) {
+ return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
+ }
+
+ v.SetFloat(value)
+ }
+
+ return yaml_FLOAT_TAG, nil
+}
+
+func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
+ var parsedTime time.Time
+ matches := ymd_regexp.FindStringSubmatch(val)
+ if len(matches) > 0 {
+ year, _ := strconv.Atoi(matches[1])
+ month, _ := strconv.Atoi(matches[2])
+ day, _ := strconv.Atoi(matches[3])
+ parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
+ } else {
+ matches = timestamp_regexp.FindStringSubmatch(val)
+ if len(matches) == 0 {
+ return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
+ }
+
+ year, _ := strconv.Atoi(matches[1])
+ month, _ := strconv.Atoi(matches[2])
+ day, _ := strconv.Atoi(matches[3])
+ hour, _ := strconv.Atoi(matches[4])
+ min, _ := strconv.Atoi(matches[5])
+ sec, _ := strconv.Atoi(matches[6])
+
+ nsec := 0
+ if matches[7] != "" {
+ millis, _ := strconv.Atoi(matches[7])
+ nsec = int(time.Duration(millis) * time.Millisecond)
+ }
+
+ loc := time.UTC
+ if matches[8] != "" {
+ sign := matches[8][0]
+ hr, _ := strconv.Atoi(matches[8][1:])
+ min := 0
+ if matches[9] != "" {
+ min, _ = strconv.Atoi(matches[9])
+ }
+
+ zoneOffset := (hr*60 + min) * 60
+ if sign == '-' {
+ zoneOffset = -zoneOffset
+ }
+
+ loc = time.FixedZone("", zoneOffset)
+ }
+ parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
+ }
+
+ v.Set(reflect.ValueOf(parsedTime))
+ return "", nil
+}
+
+func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
+ val := string(event.value)
+ if len(event.tag) == 0 && !event.implicit {
+ return "", val
+ }
+
+ if len(val) == 0 {
+ return yaml_NULL_TAG, nil
+ }
+
+ var result interface{}
+
+ sign := false
+ c := val[0]
+ switch {
+ case bytes.IndexByte(signs, c) != -1:
+ sign = true
+ fallthrough
+ case c >= '0' && c <= '9':
+ i := int64(0)
+ result = &i
+ if useNumber {
+ var n Number
+ result = &n
+ }
+
+ v := reflect.ValueOf(result).Elem()
+ if _, err := resolve_int(val, v, useNumber, event); err == nil {
+ return yaml_INT_TAG, v.Interface()
+ }
+
+ f := float64(0)
+ result = &f
+ if useNumber {
+ var n Number
+ result = &n
+ }
+
+ v = reflect.ValueOf(result).Elem()
+ if _, err := resolve_float(val, v, useNumber, event); err == nil {
+ return yaml_FLOAT_TAG, v.Interface()
+ }
+
+ if !sign {
+ t := time.Time{}
+ if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
+ return "", t
+ }
+ }
+ case bytes.IndexByte(nulls, c) != -1:
+ if null_values[val] {
+ return yaml_NULL_TAG, nil
+ }
+ b := false
+ if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
+ return yaml_BOOL_TAG, b
+ }
+ case c == '.':
+ f := float64(0)
+ result = &f
+ if useNumber {
+ var n Number
+ result = &n
+ }
+
+ v := reflect.ValueOf(result).Elem()
+ if _, err := resolve_float(val, v, useNumber, event); err == nil {
+ return yaml_FLOAT_TAG, v.Interface()
+ }
+ case bytes.IndexByte(bools, c) != -1:
+ b := false
+ if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
+ return yaml_BOOL_TAG, b
+ }
+ }
+
+ if hasBinaryTag(event) {
+ bytes, err := decode_binary(event.value, event)
+ if err == nil {
+ return yaml_BINARY_TAG, bytes
+ }
+ }
+
+ return yaml_STR_TAG, val
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
new file mode 100644
index 00000000..25c29816
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/run_parser.go
@@ -0,0 +1,62 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "fmt"
+ "os"
+)
+
+func Run_parser(cmd string, args []string) {
+ for i := 0; i < len(args); i++ {
+ fmt.Printf("[%d] Scanning '%s'", i, args[i])
+ file, err := os.Open(args[i])
+ if err != nil {
+ panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
+ }
+
+ parser := yaml_parser_t{}
+ yaml_parser_initialize(&parser)
+ yaml_parser_set_input_reader(&parser, file)
+
+ failed := false
+ token := yaml_token_t{}
+ count := 0
+ for {
+ if !yaml_parser_scan(&parser, &token) {
+ failed = true
+ break
+ }
+
+ if token.token_type == yaml_STREAM_END_TOKEN {
+ break
+ }
+ count++
+ }
+
+ file.Close()
+
+ msg := "SUCCESS"
+ if failed {
+ msg = "FAILED"
+ if parser.error != yaml_NO_ERROR {
+ m := parser.problem_mark
+ fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
+ parser.context, parser.problem, m.line, m.column)
+ }
+ }
+ fmt.Printf("%s (%d tokens)\n", msg, count)
+ }
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
new file mode 100644
index 00000000..5c080a06
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/scanner.go
@@ -0,0 +1,3318 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "bytes"
+)
+
+/*
+ * Introduction
+ * ************
+ *
+ * The following notes assume that you are familiar with the YAML specification
+ * (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
+ * some cases we are less restrictive that it requires.
+ *
+ * The process of transforming a YAML stream into a sequence of events is
+ * divided on two steps: Scanning and Parsing.
+ *
+ * The Scanner transforms the input stream into a sequence of tokens, while the
+ * parser transform the sequence of tokens produced by the Scanner into a
+ * sequence of parsing events.
+ *
+ * The Scanner is rather clever and complicated. The Parser, on the contrary,
+ * is a straightforward implementation of a recursive-descendant parser (or,
+ * LL(1) parser, as it is usually called).
+ *
+ * Actually there are two issues of Scanning that might be called "clever", the
+ * rest is quite straightforward. The issues are "block collection start" and
+ * "simple keys". Both issues are explained below in details.
+ *
+ * Here the Scanning step is explained and implemented. We start with the list
+ * of all the tokens produced by the Scanner together with short descriptions.
+ *
+ * Now, tokens:
+ *
+ * STREAM-START(encoding) # The stream start.
+ * STREAM-END # The stream end.
+ * VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
+ * TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
+ * DOCUMENT-START # '---'
+ * DOCUMENT-END # '...'
+ * BLOCK-SEQUENCE-START # Indentation increase denoting a block
+ * BLOCK-MAPPING-START # sequence or a block mapping.
+ * BLOCK-END # Indentation decrease.
+ * FLOW-SEQUENCE-START # '['
+ * FLOW-SEQUENCE-END # ']'
+ * BLOCK-SEQUENCE-START # '{'
+ * BLOCK-SEQUENCE-END # '}'
+ * BLOCK-ENTRY # '-'
+ * FLOW-ENTRY # ','
+ * KEY # '?' or nothing (simple keys).
+ * VALUE # ':'
+ * ALIAS(anchor) # '*anchor'
+ * ANCHOR(anchor) # '&anchor'
+ * TAG(handle,suffix) # '!handle!suffix'
+ * SCALAR(value,style) # A scalar.
+ *
+ * The following two tokens are "virtual" tokens denoting the beginning and the
+ * end of the stream:
+ *
+ * STREAM-START(encoding)
+ * STREAM-END
+ *
+ * We pass the information about the input stream encoding with the
+ * STREAM-START token.
+ *
+ * The next two tokens are responsible for tags:
+ *
+ * VERSION-DIRECTIVE(major,minor)
+ * TAG-DIRECTIVE(handle,prefix)
+ *
+ * Example:
+ *
+ * %YAML 1.1
+ * %TAG ! !foo
+ * %TAG !yaml! tag:yaml.org,2002:
+ * ---
+ *
+ * The correspoding sequence of tokens:
+ *
+ * STREAM-START(utf-8)
+ * VERSION-DIRECTIVE(1,1)
+ * TAG-DIRECTIVE("!","!foo")
+ * TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
+ * DOCUMENT-START
+ * STREAM-END
+ *
+ * Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
+ * line.
+ *
+ * The document start and end indicators are represented by:
+ *
+ * DOCUMENT-START
+ * DOCUMENT-END
+ *
+ * Note that if a YAML stream contains an implicit document (without '---'
+ * and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
+ * produced.
+ *
+ * In the following examples, we present whole documents together with the
+ * produced tokens.
+ *
+ * 1. An implicit document:
+ *
+ * 'a scalar'
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * SCALAR("a scalar",single-quoted)
+ * STREAM-END
+ *
+ * 2. An explicit document:
+ *
+ * ---
+ * 'a scalar'
+ * ...
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * DOCUMENT-START
+ * SCALAR("a scalar",single-quoted)
+ * DOCUMENT-END
+ * STREAM-END
+ *
+ * 3. Several documents in a stream:
+ *
+ * 'a scalar'
+ * ---
+ * 'another scalar'
+ * ---
+ * 'yet another scalar'
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * SCALAR("a scalar",single-quoted)
+ * DOCUMENT-START
+ * SCALAR("another scalar",single-quoted)
+ * DOCUMENT-START
+ * SCALAR("yet another scalar",single-quoted)
+ * STREAM-END
+ *
+ * We have already introduced the SCALAR token above. The following tokens are
+ * used to describe aliases, anchors, tag, and scalars:
+ *
+ * ALIAS(anchor)
+ * ANCHOR(anchor)
+ * TAG(handle,suffix)
+ * SCALAR(value,style)
+ *
+ * The following series of examples illustrate the usage of these tokens:
+ *
+ * 1. A recursive sequence:
+ *
+ * &A [ *A ]
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * ANCHOR("A")
+ * FLOW-SEQUENCE-START
+ * ALIAS("A")
+ * FLOW-SEQUENCE-END
+ * STREAM-END
+ *
+ * 2. A tagged scalar:
+ *
+ * !!float "3.14" # A good approximation.
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * TAG("!!","float")
+ * SCALAR("3.14",double-quoted)
+ * STREAM-END
+ *
+ * 3. Various scalar styles:
+ *
+ * --- # Implicit empty plain scalars do not produce tokens.
+ * --- a plain scalar
+ * --- 'a single-quoted scalar'
+ * --- "a double-quoted scalar"
+ * --- |-
+ * a literal scalar
+ * --- >-
+ * a folded
+ * scalar
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * DOCUMENT-START
+ * DOCUMENT-START
+ * SCALAR("a plain scalar",plain)
+ * DOCUMENT-START
+ * SCALAR("a single-quoted scalar",single-quoted)
+ * DOCUMENT-START
+ * SCALAR("a double-quoted scalar",double-quoted)
+ * DOCUMENT-START
+ * SCALAR("a literal scalar",literal)
+ * DOCUMENT-START
+ * SCALAR("a folded scalar",folded)
+ * STREAM-END
+ *
+ * Now it's time to review collection-related tokens. We will start with
+ * flow collections:
+ *
+ * FLOW-SEQUENCE-START
+ * FLOW-SEQUENCE-END
+ * FLOW-MAPPING-START
+ * FLOW-MAPPING-END
+ * FLOW-ENTRY
+ * KEY
+ * VALUE
+ *
+ * The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
+ * FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
+ * correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
+ * indicators '?' and ':', which are used for denoting mapping keys and values,
+ * are represented by the KEY and VALUE tokens.
+ *
+ * The following examples show flow collections:
+ *
+ * 1. A flow sequence:
+ *
+ * [item 1, item 2, item 3]
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * FLOW-SEQUENCE-START
+ * SCALAR("item 1",plain)
+ * FLOW-ENTRY
+ * SCALAR("item 2",plain)
+ * FLOW-ENTRY
+ * SCALAR("item 3",plain)
+ * FLOW-SEQUENCE-END
+ * STREAM-END
+ *
+ * 2. A flow mapping:
+ *
+ * {
+ * a simple key: a value, # Note that the KEY token is produced.
+ * ? a complex key: another value,
+ * }
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * FLOW-MAPPING-START
+ * KEY
+ * SCALAR("a simple key",plain)
+ * VALUE
+ * SCALAR("a value",plain)
+ * FLOW-ENTRY
+ * KEY
+ * SCALAR("a complex key",plain)
+ * VALUE
+ * SCALAR("another value",plain)
+ * FLOW-ENTRY
+ * FLOW-MAPPING-END
+ * STREAM-END
+ *
+ * A simple key is a key which is not denoted by the '?' indicator. Note that
+ * the Scanner still produce the KEY token whenever it encounters a simple key.
+ *
+ * For scanning block collections, the following tokens are used (note that we
+ * repeat KEY and VALUE here):
+ *
+ * BLOCK-SEQUENCE-START
+ * BLOCK-MAPPING-START
+ * BLOCK-END
+ * BLOCK-ENTRY
+ * KEY
+ * VALUE
+ *
+ * The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
+ * increase that precedes a block collection (cf. the INDENT token in Python).
+ * The token BLOCK-END denote indentation decrease that ends a block collection
+ * (cf. the DEDENT token in Python). However YAML has some syntax pecularities
+ * that makes detections of these tokens more complex.
+ *
+ * The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
+ * '-', '?', and ':' correspondingly.
+ *
+ * The following examples show how the tokens BLOCK-SEQUENCE-START,
+ * BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
+ *
+ * 1. Block sequences:
+ *
+ * - item 1
+ * - item 2
+ * -
+ * - item 3.1
+ * - item 3.2
+ * -
+ * key 1: value 1
+ * key 2: value 2
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * SCALAR("item 1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 2",plain)
+ * BLOCK-ENTRY
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * SCALAR("item 3.1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 3.2",plain)
+ * BLOCK-END
+ * BLOCK-ENTRY
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("key 1",plain)
+ * VALUE
+ * SCALAR("value 1",plain)
+ * KEY
+ * SCALAR("key 2",plain)
+ * VALUE
+ * SCALAR("value 2",plain)
+ * BLOCK-END
+ * BLOCK-END
+ * STREAM-END
+ *
+ * 2. Block mappings:
+ *
+ * a simple key: a value # The KEY token is produced here.
+ * ? a complex key
+ * : another value
+ * a mapping:
+ * key 1: value 1
+ * key 2: value 2
+ * a sequence:
+ * - item 1
+ * - item 2
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("a simple key",plain)
+ * VALUE
+ * SCALAR("a value",plain)
+ * KEY
+ * SCALAR("a complex key",plain)
+ * VALUE
+ * SCALAR("another value",plain)
+ * KEY
+ * SCALAR("a mapping",plain)
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("key 1",plain)
+ * VALUE
+ * SCALAR("value 1",plain)
+ * KEY
+ * SCALAR("key 2",plain)
+ * VALUE
+ * SCALAR("value 2",plain)
+ * BLOCK-END
+ * KEY
+ * SCALAR("a sequence",plain)
+ * VALUE
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * SCALAR("item 1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 2",plain)
+ * BLOCK-END
+ * BLOCK-END
+ * STREAM-END
+ *
+ * YAML does not always require to start a new block collection from a new
+ * line. If the current line contains only '-', '?', and ':' indicators, a new
+ * block collection may start at the current line. The following examples
+ * illustrate this case:
+ *
+ * 1. Collections in a sequence:
+ *
+ * - - item 1
+ * - item 2
+ * - key 1: value 1
+ * key 2: value 2
+ * - ? complex key
+ * : complex value
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * SCALAR("item 1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 2",plain)
+ * BLOCK-END
+ * BLOCK-ENTRY
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("key 1",plain)
+ * VALUE
+ * SCALAR("value 1",plain)
+ * KEY
+ * SCALAR("key 2",plain)
+ * VALUE
+ * SCALAR("value 2",plain)
+ * BLOCK-END
+ * BLOCK-ENTRY
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("complex key")
+ * VALUE
+ * SCALAR("complex value")
+ * BLOCK-END
+ * BLOCK-END
+ * STREAM-END
+ *
+ * 2. Collections in a mapping:
+ *
+ * ? a sequence
+ * : - item 1
+ * - item 2
+ * ? a mapping
+ * : key 1: value 1
+ * key 2: value 2
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("a sequence",plain)
+ * VALUE
+ * BLOCK-SEQUENCE-START
+ * BLOCK-ENTRY
+ * SCALAR("item 1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 2",plain)
+ * BLOCK-END
+ * KEY
+ * SCALAR("a mapping",plain)
+ * VALUE
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("key 1",plain)
+ * VALUE
+ * SCALAR("value 1",plain)
+ * KEY
+ * SCALAR("key 2",plain)
+ * VALUE
+ * SCALAR("value 2",plain)
+ * BLOCK-END
+ * BLOCK-END
+ * STREAM-END
+ *
+ * YAML also permits non-indented sequences if they are included into a block
+ * mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
+ *
+ * key:
+ * - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
+ * - item 2
+ *
+ * Tokens:
+ *
+ * STREAM-START(utf-8)
+ * BLOCK-MAPPING-START
+ * KEY
+ * SCALAR("key",plain)
+ * VALUE
+ * BLOCK-ENTRY
+ * SCALAR("item 1",plain)
+ * BLOCK-ENTRY
+ * SCALAR("item 2",plain)
+ * BLOCK-END
+ */
+
+/*
+ * Ensure that the buffer contains the required number of characters.
+ * Return 1 on success, 0 on failure (reader error or memory error).
+ */
+func cache(parser *yaml_parser_t, length int) bool {
+ if parser.unread >= length {
+ return true
+ }
+
+ return yaml_parser_update_buffer(parser, length)
+}
+
+/*
+ * Advance the buffer pointer.
+ */
+func skip(parser *yaml_parser_t) {
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+}
+
+func skip_line(parser *yaml_parser_t) {
+ if is_crlf_at(parser.buffer, parser.buffer_pos) {
+ parser.mark.index += 2
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread -= 2
+ parser.buffer_pos += 2
+ } else if is_break_at(parser.buffer, parser.buffer_pos) {
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
+ }
+}
+
+/*
+ * Copy a character to a string buffer and advance pointers.
+ */
+
+func read(parser *yaml_parser_t, s []byte) []byte {
+ w := width(parser.buffer[parser.buffer_pos])
+ if w == 0 {
+ panic("invalid character sequence")
+ }
+ if len(s) == 0 {
+ s = make([]byte, 0, 32)
+ }
+ if w == 1 && len(s)+w <= cap(s) {
+ s = s[:len(s)+1]
+ s[len(s)-1] = parser.buffer[parser.buffer_pos]
+ parser.buffer_pos++
+ } else {
+ s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
+ parser.buffer_pos += w
+ }
+ parser.mark.index++
+ parser.mark.column++
+ parser.unread--
+ return s
+}
+
+/*
+ * Copy a line break character to a string buffer and advance pointers.
+ */
+func read_line(parser *yaml_parser_t, s []byte) []byte {
+ buf := parser.buffer
+ pos := parser.buffer_pos
+ if buf[pos] == '\r' && buf[pos+1] == '\n' {
+ /* CR LF . LF */
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ parser.mark.index++
+ parser.unread--
+ } else if buf[pos] == '\r' || buf[pos] == '\n' {
+ /* CR|LF . LF */
+ s = append(s, '\n')
+ parser.buffer_pos += 1
+ } else if buf[pos] == '\xC2' && buf[pos+1] == '\x85' {
+ /* NEL . LF */
+ s = append(s, '\n')
+ parser.buffer_pos += 2
+ } else if buf[pos] == '\xE2' && buf[pos+1] == '\x80' &&
+ (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9') {
+ // LS|PS . LS|PS
+ s = append(s, buf[parser.buffer_pos:pos+3]...)
+ parser.buffer_pos += 3
+ } else {
+ return s
+ }
+
+ parser.mark.index++
+ parser.mark.column = 0
+ parser.mark.line++
+ parser.unread--
+ return s
+}
+
+/*
+ * Get the next token.
+ */
+
+func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
+ /* Erase the token object. */
+ *token = yaml_token_t{}
+
+ /* No tokens after STREAM-END or error. */
+
+ if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
+ return true
+ }
+
+ /* Ensure that the tokens queue contains enough tokens. */
+
+ if !parser.token_available {
+ if !yaml_parser_fetch_more_tokens(parser) {
+ return false
+ }
+ }
+
+ /* Fetch the next token from the queue. */
+
+ *token = parser.tokens[parser.tokens_head]
+ parser.tokens_head++
+ parser.token_available = false
+ parser.tokens_parsed++
+
+ if token.token_type == yaml_STREAM_END_TOKEN {
+ parser.stream_end_produced = true
+ }
+
+ return true
+}
+
+/*
+ * Set the scanner error and return 0.
+ */
+
+func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string,
+ context_mark YAML_mark_t, problem string) bool {
+ parser.error = yaml_SCANNER_ERROR
+ parser.context = context
+ parser.context_mark = context_mark
+ parser.problem = problem
+ parser.problem_mark = parser.mark
+
+ return false
+}
+
+func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark YAML_mark_t, problem string) bool {
+ context := "while parsing a %TAG directive"
+ if directive {
+ context = "while parsing a tag"
+ }
+ return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
+}
+
+/*
+ * Ensure that the tokens queue contains at least one token which can be
+ * returned to the Parser.
+ */
+
+func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
+ /* While we need more tokens to fetch, do it. */
+
+ for {
+ /*
+ * Check if we really need to fetch more tokens.
+ */
+
+ need_more_tokens := false
+
+ if parser.tokens_head == len(parser.tokens) {
+ /* Queue is empty. */
+
+ need_more_tokens = true
+ } else {
+
+ /* Check if any potential simple key may occupy the head position. */
+
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ for i := range parser.simple_keys {
+ simple_key := &parser.simple_keys[i]
+
+ if simple_key.possible &&
+ simple_key.token_number == parser.tokens_parsed {
+ need_more_tokens = true
+ break
+ }
+ }
+ }
+ if len(parser.simple_keys) > 0 {
+
+ }
+ /* We are finished. */
+
+ if !need_more_tokens {
+ break
+ }
+
+ /* Fetch the next token. */
+
+ if !yaml_parser_fetch_next_token(parser) {
+ return false
+ }
+
+ }
+
+ parser.token_available = true
+
+ return true
+}
+
+/*
+ * The dispatcher for token fetchers.
+ */
+
+func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
+ /* Ensure that the buffer is initialized. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ /* Check if we just started scanning. Fetch STREAM-START then. */
+
+ if !parser.stream_start_produced {
+ return yaml_parser_fetch_stream_start(parser)
+ }
+
+ /* Eat whitespaces and comments until we reach the next token. */
+
+ if !yaml_parser_scan_to_next_token(parser) {
+ return false
+ }
+
+ /* Remove obsolete potential simple keys. */
+
+ if !yaml_parser_stale_simple_keys(parser) {
+ return false
+ }
+
+ /* Check the indentation level against the current column. */
+
+ if !yaml_parser_unroll_indent(parser, parser.mark.column) {
+ return false
+ }
+
+ /*
+ * Ensure that the buffer contains at least 4 characters. 4 is the length
+ * of the longest indicators ('--- ' and '... ').
+ */
+
+ if !cache(parser, 4) {
+ return false
+ }
+
+ /* Is it the end of the stream? */
+ buf := parser.buffer
+ pos := parser.buffer_pos
+
+ if is_z(buf[pos]) {
+ return yaml_parser_fetch_stream_end(parser)
+ }
+
+ /* Is it a directive? */
+
+ if parser.mark.column == 0 && buf[pos] == '%' {
+ return yaml_parser_fetch_directive(parser)
+ }
+
+ /* Is it the document start indicator? */
+
+ if parser.mark.column == 0 &&
+ buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' &&
+ is_blankz_at(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser,
+ yaml_DOCUMENT_START_TOKEN)
+ }
+
+ /* Is it the document end indicator? */
+
+ if parser.mark.column == 0 &&
+ buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' &&
+ is_blankz_at(buf, pos+3) {
+ return yaml_parser_fetch_document_indicator(parser,
+ yaml_DOCUMENT_END_TOKEN)
+ }
+
+ /* Is it the flow sequence start indicator? */
+
+ if buf[pos] == '[' {
+ return yaml_parser_fetch_flow_collection_start(parser,
+ yaml_FLOW_SEQUENCE_START_TOKEN)
+ }
+
+ /* Is it the flow mapping start indicator? */
+
+ if buf[pos] == '{' {
+ return yaml_parser_fetch_flow_collection_start(parser,
+ yaml_FLOW_MAPPING_START_TOKEN)
+ }
+
+ /* Is it the flow sequence end indicator? */
+
+ if buf[pos] == ']' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_SEQUENCE_END_TOKEN)
+ }
+
+ /* Is it the flow mapping end indicator? */
+
+ if buf[pos] == '}' {
+ return yaml_parser_fetch_flow_collection_end(parser,
+ yaml_FLOW_MAPPING_END_TOKEN)
+ }
+
+ /* Is it the flow entry indicator? */
+
+ if buf[pos] == ',' {
+ return yaml_parser_fetch_flow_entry(parser)
+ }
+
+ /* Is it the block entry indicator? */
+ if buf[pos] == '-' && is_blankz_at(buf, pos+1) {
+ return yaml_parser_fetch_block_entry(parser)
+ }
+
+ /* Is it the key indicator? */
+ if buf[pos] == '?' &&
+ (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) {
+ return yaml_parser_fetch_key(parser)
+ }
+
+ /* Is it the value indicator? */
+ if buf[pos] == ':' &&
+ (parser.flow_level > 0 || is_blankz_at(buf, pos+1)) {
+ return yaml_parser_fetch_value(parser)
+ }
+
+ /* Is it an alias? */
+ if buf[pos] == '*' {
+ return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
+ }
+
+ /* Is it an anchor? */
+
+ if buf[pos] == '&' {
+ return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
+ }
+
+ /* Is it a tag? */
+
+ if buf[pos] == '!' {
+ return yaml_parser_fetch_tag(parser)
+ }
+
+ /* Is it a literal scalar? */
+ if buf[pos] == '|' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, true)
+ }
+
+ /* Is it a folded scalar? */
+ if buf[pos] == '>' && parser.flow_level == 0 {
+ return yaml_parser_fetch_block_scalar(parser, false)
+ }
+
+ /* Is it a single-quoted scalar? */
+
+ if buf[pos] == '\'' {
+ return yaml_parser_fetch_flow_scalar(parser, true)
+ }
+
+ /* Is it a double-quoted scalar? */
+ if buf[pos] == '"' {
+ return yaml_parser_fetch_flow_scalar(parser, false)
+ }
+
+ /*
+ * Is it a plain scalar?
+ *
+ * A plain scalar may start with any non-blank characters except
+ *
+ * '-', '?', ':', ',', '[', ']', '{', '}',
+ * '#', '&', '*', '!', '|', '>', '\'', '\"',
+ * '%', '@', '`'.
+ *
+ * In the block context (and, for the '-' indicator, in the flow context
+ * too), it may also start with the characters
+ *
+ * '-', '?', ':'
+ *
+ * if it is followed by a non-space character.
+ *
+ * The last rule is more restrictive than the specification requires.
+ */
+
+ b := buf[pos]
+ if !(is_blankz_at(buf, pos) || b == '-' ||
+ b == '?' || b == ':' ||
+ b == ',' || b == '[' ||
+ b == ']' || b == '{' ||
+ b == '}' || b == '#' ||
+ b == '&' || b == '*' ||
+ b == '!' || b == '|' ||
+ b == '>' || b == '\'' ||
+ b == '"' || b == '%' ||
+ b == '@' || b == '`') ||
+ (b == '-' && !is_blank(buf[pos+1])) ||
+ (parser.flow_level == 0 &&
+ (buf[pos] == '?' || buf[pos] == ':') &&
+ !is_blank(buf[pos+1])) {
+ return yaml_parser_fetch_plain_scalar(parser)
+ }
+
+ /*
+ * If we don't determine the token type so far, it is an error.
+ */
+
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning for the next token", parser.mark,
+ "found character that cannot start any token")
+}
+
+/*
+ * Check the list of potential simple keys and remove the positions that
+ * cannot contain simple keys anymore.
+ */
+
+func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
+ /* Check for a potential simple key for each flow level. */
+
+ for i := range parser.simple_keys {
+ /*
+ * The specification requires that a simple key
+ *
+ * - is limited to a single line,
+ * - is shorter than 1024 characters.
+ */
+
+ simple_key := &parser.simple_keys[i]
+ if simple_key.possible &&
+ (simple_key.mark.line < parser.mark.line ||
+ simple_key.mark.index+1024 < parser.mark.index) {
+
+ /* Check if the potential simple key to be removed is required. */
+
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+
+ simple_key.possible = false
+ }
+ }
+
+ return true
+}
+
+/*
+ * Check if a simple key may start at the current position and add it if
+ * needed.
+ */
+
+func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
+ /*
+ * A simple key is required at the current position if the scanner is in
+ * the block context and the current column coincides with the indentation
+ * level.
+ */
+
+ required := (parser.flow_level == 0 &&
+ parser.indent == parser.mark.column)
+
+ /*
+ * A simple key is required only when it is the first token in the current
+ * line. Therefore it is always allowed. But we add a check anyway.
+ */
+ if required && !parser.simple_key_allowed {
+ panic("impossible") /* Impossible. */
+ }
+
+ /*
+ * If the current position may start a simple key, save it.
+ */
+
+ if parser.simple_key_allowed {
+ simple_key := yaml_simple_key_t{
+ possible: true,
+ required: required,
+ token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
+ }
+ simple_key.mark = parser.mark
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_keys[len(parser.simple_keys)-1] = simple_key
+ }
+
+ return true
+}
+
+/*
+ * Remove a potential simple key at the current flow level.
+ */
+
+func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ if simple_key.possible {
+ /* If the key is required, it is an error. */
+
+ if simple_key.required {
+ return yaml_parser_set_scanner_error(parser,
+ "while scanning a simple key", simple_key.mark,
+ "could not find expected ':'")
+ }
+ }
+
+ /* Remove the key from the stack. */
+
+ simple_key.possible = false
+
+ return true
+}
+
+/*
+ * Increase the flow level and resize the simple key list if needed.
+ */
+
+func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
+ /* Reset the simple key on the next level. */
+
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ /* Increase the flow level. */
+
+ parser.flow_level++
+
+ return true
+}
+
+/*
+ * Decrease the flow level.
+ */
+
+func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
+ if parser.flow_level > 0 {
+ parser.flow_level--
+ parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
+ }
+
+ return true
+}
+
+/*
+ * Push the current indentation level to the stack and set the new level
+ * the current column is greater than the indentation level. In this case,
+ * append or insert the specified token into the token queue.
+ *
+ */
+
+func yaml_parser_roll_indent(parser *yaml_parser_t, column int,
+ number int, token_type yaml_token_type_t, mark YAML_mark_t) bool {
+ /* In the flow context, do nothing. */
+
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ if parser.indent == -1 || parser.indent < column {
+ /*
+ * Push the current indentation level to the stack and set the new
+ * indentation level.
+ */
+
+ parser.indents = append(parser.indents, parser.indent)
+ parser.indent = column
+
+ /* Create a token and insert it into the queue. */
+ token := yaml_token_t{
+ token_type: token_type,
+ start_mark: mark,
+ end_mark: mark,
+ }
+
+ // number == -1 -> enqueue otherwise insert
+ if number > -1 {
+ number -= parser.tokens_parsed
+ }
+ insert_token(parser, number, &token)
+ }
+
+ return true
+}
+
+/*
+ * Pop indentation levels from the indents stack until the current level
+ * becomes less or equal to the column. For each indentation level, append
+ * the BLOCK-END token.
+ */
+
+func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
+ /* In the flow context, do nothing. */
+
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ /*
+ * column is unsigned and parser->indent is signed, so if
+ * parser->indent is less than zero the conditional in the while
+ * loop below is incorrect. Guard against that.
+ */
+
+ if parser.indent < 0 {
+ return true
+ }
+
+ /* Loop through the indentation levels in the stack. */
+
+ for parser.indent > column {
+ /* Create a token and append it to the queue. */
+ token := yaml_token_t{
+ token_type: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ insert_token(parser, -1, &token)
+
+ /* Pop the indentation level. */
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+
+ }
+
+ return true
+}
+
+/*
+ * Pop indentation levels from the indents stack until the current
+ * level resets to -1. For each indentation level, append the
+ * BLOCK-END token.
+ */
+
+func yaml_parser_reset_indent(parser *yaml_parser_t) bool {
+ /* In the flow context, do nothing. */
+
+ if parser.flow_level > 0 {
+ return true
+ }
+
+ /* Loop through the indentation levels in the stack. */
+
+ for parser.indent > -1 {
+ /* Create a token and append it to the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_BLOCK_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+ insert_token(parser, -1, &token)
+
+ /* Pop the indentation level. */
+ parser.indent = parser.indents[len(parser.indents)-1]
+ parser.indents = parser.indents[:len(parser.indents)-1]
+ }
+
+ return true
+}
+
+/*
+ * Initialize the scanner and produce the STREAM-START token.
+ */
+
+func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
+ /* Set the initial indentation. */
+
+ parser.indent = -1
+
+ /* Initialize the simple key stack. */
+ parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
+
+ /* A simple key is allowed at the beginning of the stream. */
+
+ parser.simple_key_allowed = true
+
+ /* We have started. */
+
+ parser.stream_start_produced = true
+
+ /* Create the STREAM-START token and append it to the queue. */
+ token := yaml_token_t{
+ token_type: yaml_STREAM_START_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ encoding: parser.encoding,
+ }
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the STREAM-END token and shut down the scanner.
+ */
+
+func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
+ /* Force new line. */
+
+ if parser.mark.column != 0 {
+ parser.mark.column = 0
+ parser.mark.line++
+ }
+
+ /* Reset the indentation level. */
+
+ if !yaml_parser_reset_indent(parser) {
+ return false
+ }
+
+ /* Reset simple keys. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ /* Create the STREAM-END token and append it to the queue. */
+ token := yaml_token_t{
+ token_type: yaml_STREAM_END_TOKEN,
+ start_mark: parser.mark,
+ end_mark: parser.mark,
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
+ */
+
+func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
+ /* Reset the indentation level. */
+
+ if !yaml_parser_reset_indent(parser) {
+ return false
+ }
+
+ /* Reset simple keys. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ /* Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. */
+ var token yaml_token_t
+ if !yaml_parser_scan_directive(parser, &token) {
+ return false
+ }
+
+ /* Append the token to the queue. */
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the DOCUMENT-START or DOCUMENT-END token.
+ */
+
+func yaml_parser_fetch_document_indicator(parser *yaml_parser_t,
+ token_type yaml_token_type_t) bool {
+
+ /* Reset the indentation level. */
+
+ if !yaml_parser_reset_indent(parser) {
+ return false
+ }
+
+ /* Reset simple keys. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ parser.simple_key_allowed = false
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+
+ skip(parser)
+ skip(parser)
+ skip(parser)
+
+ end_mark := parser.mark
+
+ /* Create the DOCUMENT-START or DOCUMENT-END token. */
+
+ token := yaml_token_t{
+ token_type: token_type,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ /* Append the token to the queue. */
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
+ */
+
+func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t,
+ token_type yaml_token_type_t) bool {
+
+ /* The indicators '[' and '{' may start a simple key. */
+
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ /* Increase the flow level. */
+
+ if !yaml_parser_increase_flow_level(parser) {
+ return false
+ }
+
+ /* A simple key may follow the indicators '[' and '{'. */
+
+ parser.simple_key_allowed = true
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. */
+
+ token := yaml_token_t{
+ token_type: token_type,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ /* Append the token to the queue. */
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
+ */
+
+func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t,
+ token_type yaml_token_type_t) bool {
+
+ /* Reset any potential simple key on the current flow level. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ /* Decrease the flow level. */
+
+ if !yaml_parser_decrease_flow_level(parser) {
+ return false
+ }
+
+ /* No simple keys after the indicators ']' and '}'. */
+
+ parser.simple_key_allowed = false
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. */
+
+ token := yaml_token_t{
+ token_type: token_type,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ /* Append the token to the queue. */
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the FLOW-ENTRY token.
+ */
+
+func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
+
+ /* Reset any potential simple keys on the current flow level. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ /* Simple keys are allowed after ','. */
+
+ parser.simple_key_allowed = true
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the FLOW-ENTRY token and append it to the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_FLOW_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the BLOCK-ENTRY token.
+ */
+
+func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
+
+ /* Check if the scanner is in the block context. */
+
+ if parser.flow_level == 0 {
+ /* Check if we are allowed to start a new entry. */
+
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "block sequence entries are not allowed in this context")
+ }
+
+ /* Add the BLOCK-SEQUENCE-START token if needed. */
+
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
+ yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
+ return false
+ }
+ } else {
+ /*
+ * It is an error for the '-' indicator to occur in the flow context,
+ * but we let the Parser detect and report about it because the Parser
+ * is able to point to the context.
+ */
+ }
+
+ /* Reset any potential simple keys on the current flow level. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ /* Simple keys are allowed after '-'. */
+
+ parser.simple_key_allowed = true
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the BLOCK-ENTRY token and append it to the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_BLOCK_ENTRY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the KEY token.
+ */
+
+func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
+ /* In the block context, additional checks are required. */
+
+ if parser.flow_level == 0 {
+ /* Check if we are allowed to start a new key (not nessesary simple). */
+
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping keys are not allowed in this context")
+ }
+
+ /* Add the BLOCK-MAPPING-START token if needed. */
+
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
+ yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ /* Reset any potential simple keys on the current flow level. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ /* Simple keys are allowed after '?' in the block context. */
+
+ parser.simple_key_allowed = (parser.flow_level == 0)
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the KEY token and append it to the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_KEY_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the VALUE token.
+ */
+
+func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
+
+ simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
+
+ /* Have we found a simple key? */
+
+ if simple_key.possible {
+
+ /* Create the KEY token and insert it into the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_KEY_TOKEN,
+ start_mark: simple_key.mark,
+ end_mark: simple_key.mark,
+ }
+
+ insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
+
+ /* In the block context, we may need to add the BLOCK-MAPPING-START token. */
+
+ if !yaml_parser_roll_indent(parser, simple_key.mark.column,
+ simple_key.token_number,
+ yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
+ return false
+ }
+
+ /* Remove the simple key. */
+
+ simple_key.possible = false
+
+ /* A simple key cannot follow another simple key. */
+
+ parser.simple_key_allowed = false
+ } else {
+ /* The ':' indicator follows a complex key. */
+
+ /* In the block context, extra checks are required. */
+
+ if parser.flow_level == 0 {
+ /* Check if we are allowed to start a complex value. */
+
+ if !parser.simple_key_allowed {
+ return yaml_parser_set_scanner_error(parser, "", parser.mark,
+ "mapping values are not allowed in this context")
+ }
+
+ /* Add the BLOCK-MAPPING-START token if needed. */
+
+ if !yaml_parser_roll_indent(parser, parser.mark.column, -1,
+ yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
+ return false
+ }
+ }
+
+ /* Simple keys after ':' are allowed in the block context. */
+
+ parser.simple_key_allowed = (parser.flow_level == 0)
+ }
+
+ /* Consume the token. */
+
+ start_mark := parser.mark
+ skip(parser)
+ end_mark := parser.mark
+
+ /* Create the VALUE token and append it to the queue. */
+
+ token := yaml_token_t{
+ token_type: yaml_VALUE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the ALIAS or ANCHOR token.
+ */
+
+func yaml_parser_fetch_anchor(parser *yaml_parser_t, token_type yaml_token_type_t) bool {
+
+ /* An anchor or an alias could be a simple key. */
+
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ /* A simple key cannot follow an anchor or an alias. */
+
+ parser.simple_key_allowed = false
+
+ /* Create the ALIAS or ANCHOR token and append it to the queue. */
+ var token yaml_token_t
+ if !yaml_parser_scan_anchor(parser, &token, token_type) {
+ return false
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the TAG token.
+ */
+
+func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
+ /* A tag could be a simple key. */
+
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ /* A simple key cannot follow a tag. */
+
+ parser.simple_key_allowed = false
+
+ /* Create the TAG token and append it to the queue. */
+ var token yaml_token_t
+ if !yaml_parser_scan_tag(parser, &token) {
+ return false
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
+ */
+
+func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
+ /* Remove any potential simple keys. */
+
+ if !yaml_parser_remove_simple_key(parser) {
+ return false
+ }
+
+ /* A simple key may follow a block scalar. */
+
+ parser.simple_key_allowed = true
+
+ /* Create the SCALAR token and append it to the queue. */
+ var token yaml_token_t
+ if !yaml_parser_scan_block_scalar(parser, &token, literal) {
+ return false
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
+ */
+
+func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
+
+ /* A plain scalar could be a simple key. */
+
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ /* A simple key cannot follow a flow scalar. */
+
+ parser.simple_key_allowed = false
+
+ /* Create the SCALAR token and append it to the queue. */
+ var token yaml_token_t
+ if !yaml_parser_scan_flow_scalar(parser, &token, single) {
+ return false
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Produce the SCALAR(...,plain) token.
+ */
+
+func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
+ /* A plain scalar could be a simple key. */
+
+ if !yaml_parser_save_simple_key(parser) {
+ return false
+ }
+
+ /* A simple key cannot follow a flow scalar. */
+
+ parser.simple_key_allowed = false
+
+ /* Create the SCALAR token and append it to the queue. */
+ var token yaml_token_t
+ if !yaml_parser_scan_plain_scalar(parser, &token) {
+ return false
+ }
+
+ insert_token(parser, -1, &token)
+
+ return true
+}
+
+/*
+ * Eat whitespaces and comments until the next token is found.
+ */
+
+func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
+ /* Until the next token is not found. */
+
+ for {
+ /* Allow the BOM mark to start a line. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if parser.mark.column == 0 && is_bom_at(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ }
+
+ /*
+ * Eat whitespaces.
+ *
+ * Tabs are allowed:
+ *
+ * - in the flow context;
+ * - in the block context, but not at the beginning of the line or
+ * after '-', '?', or ':' (complex value).
+ */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for parser.buffer[parser.buffer_pos] == ' ' ||
+ ((parser.flow_level > 0 || !parser.simple_key_allowed) &&
+ parser.buffer[parser.buffer_pos] == '\t') {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Eat a comment until a line break. */
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ /* If it is a line break, eat it. */
+
+ if is_break_at(parser.buffer, parser.buffer_pos) {
+ if !cache(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+
+ /* In the block context, a new line may start a simple key. */
+
+ if parser.flow_level == 0 {
+ parser.simple_key_allowed = true
+ }
+ } else {
+ /* We have found a token. */
+
+ break
+ }
+ }
+
+ return true
+}
+
+/*
+ * Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
+ *
+ * Scope:
+ * %YAML 1.1 # a comment \n
+ * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ * %TAG !yaml! tag:yaml.org,2002: \n
+ * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ */
+
+func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
+ /* Eat '%'. */
+
+ start_mark := parser.mark
+
+ skip(parser)
+
+ /* Scan the directive name. */
+ var name []byte
+ if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
+ return false
+ }
+
+ /* Is it a YAML directive? */
+ var major, minor int
+ if bytes.Equal(name, []byte("YAML")) {
+ /* Scan the VERSION directive value. */
+
+ if !yaml_parser_scan_version_directive_value(parser, start_mark,
+ &major, &minor) {
+ return false
+ }
+
+ end_mark := parser.mark
+
+ /* Create a VERSION-DIRECTIVE token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_VERSION_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ major: major,
+ minor: minor,
+ }
+ } else if bytes.Equal(name, []byte("TAG")) {
+ /* Is it a TAG directive? */
+ /* Scan the TAG directive value. */
+ var handle, prefix []byte
+ if !yaml_parser_scan_tag_directive_value(parser, start_mark,
+ &handle, &prefix) {
+ return false
+ }
+
+ end_mark := parser.mark
+
+ /* Create a TAG-DIRECTIVE token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_TAG_DIRECTIVE_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ prefix: prefix,
+ }
+ } else {
+ /* Unknown directive. */
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found uknown directive name")
+ return false
+ }
+
+ /* Eat the rest of the line including any comments. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ /* Check if we are at the end of the line. */
+
+ if !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ /* Eat a line break. */
+
+ if is_break_at(parser.buffer, parser.buffer_pos) {
+ if !cache(parser, 2) {
+ return false
+ }
+ skip_line(parser)
+ }
+
+ return true
+}
+
+/*
+ * Scan the directive name.
+ *
+ * Scope:
+ * %YAML 1.1 # a comment \n
+ * ^^^^
+ * %TAG !yaml! tag:yaml.org,2002: \n
+ * ^^^
+ */
+
+func yaml_parser_scan_directive_name(parser *yaml_parser_t,
+ start_mark YAML_mark_t, name *[]byte) bool {
+
+ /* Consume the directive name. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer[parser.buffer_pos]) {
+ s = read(parser, s)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Check if the name is empty. */
+
+ if len(s) == 0 {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "could not find expected directive name")
+ return false
+ }
+
+ /* Check for an blank character after the name. */
+
+ if !is_blankz_at(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a directive",
+ start_mark, "found unexpected non-alphabetical character")
+ return false
+ }
+
+ *name = s
+
+ return true
+}
+
+/*
+ * Scan the value of VERSION-DIRECTIVE.
+ *
+ * Scope:
+ * %YAML 1.1 # a comment \n
+ * ^^^^^^
+ */
+
+func yaml_parser_scan_version_directive_value(parser *yaml_parser_t,
+ start_mark YAML_mark_t, major *int, minor *int) bool {
+ /* Eat whitespaces. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Consume the major version number. */
+
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
+ return false
+ }
+
+ /* Eat '.'. */
+
+ if parser.buffer[parser.buffer_pos] != '.' {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected digit or '.' character")
+ }
+
+ skip(parser)
+
+ /* Consume the minor version number. */
+
+ if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
+ return false
+ }
+
+ return true
+}
+
+const MAX_NUMBER_LENGTH = 9
+
+/*
+ * Scan the version number of VERSION-DIRECTIVE.
+ *
+ * Scope:
+ * %YAML 1.1 # a comment \n
+ * ^
+ * %YAML 1.1 # a comment \n
+ * ^
+ */
+
+func yaml_parser_scan_version_directive_number(parser *yaml_parser_t,
+ start_mark YAML_mark_t, number *int) bool {
+
+ /* Repeat while the next character is digit. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ value := 0
+ length := 0
+ for is_digit(parser.buffer[parser.buffer_pos]) {
+ /* Check if the number is too long. */
+
+ length++
+ if length > MAX_NUMBER_LENGTH {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "found extremely long version number")
+ }
+
+ value = value*10 + as_digit(parser.buffer[parser.buffer_pos])
+
+ skip(parser)
+
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Check if the number was present. */
+
+ if length == 0 {
+ return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
+ start_mark, "did not find expected version number")
+ }
+
+ *number = value
+
+ return true
+}
+
+/*
+ * Scan the value of a TAG-DIRECTIVE token.
+ *
+ * Scope:
+ * %TAG !yaml! tag:yaml.org,2002: \n
+ * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+ */
+
+func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t,
+ start_mark YAML_mark_t, handle, prefix *[]byte) bool {
+
+ /* Eat whitespaces. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Scan a handle. */
+ var handle_value []byte
+ if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
+ return false
+ }
+
+ /* Expect a whitespace. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if !is_blank(parser.buffer[parser.buffer_pos]) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace")
+ return false
+ }
+
+ /* Eat whitespaces. */
+
+ for is_blank(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Scan a prefix. */
+ var prefix_value []byte
+ if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
+ return false
+ }
+
+ /* Expect a whitespace or line break. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if !is_blankz_at(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ *handle = handle_value
+ *prefix = prefix_value
+
+ return true
+}
+
+func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t,
+ token_type yaml_token_type_t) bool {
+
+ /* Eat the indicator character. */
+
+ start_mark := parser.mark
+
+ skip(parser)
+
+ /* Consume the value. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ for is_alpha(parser.buffer[parser.buffer_pos]) {
+ s = read(parser, s)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ end_mark := parser.mark
+
+ /*
+ * Check if length of the anchor is greater than 0 and it is followed by
+ * a whitespace character or one of the indicators:
+ *
+ * '?', ':', ',', ']', '}', '%', '@', '`'.
+ */
+
+ b := parser.buffer[parser.buffer_pos]
+ if len(s) == 0 || !(is_blankz_at(parser.buffer, parser.buffer_pos) || b == '?' ||
+ b == ':' || b == ',' ||
+ b == ']' || b == '}' ||
+ b == '%' || b == '@' ||
+ b == '`') {
+ context := "while scanning an anchor"
+ if token_type != yaml_ANCHOR_TOKEN {
+ context = "while scanning an alias"
+ }
+ yaml_parser_set_scanner_error(parser, context, start_mark,
+ "did not find expected alphabetic or numeric character")
+ return false
+ }
+
+ /* Create a token. */
+ *token = yaml_token_t{
+ token_type: token_type,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ }
+
+ return true
+}
+
+/*
+ * Scan a TAG token.
+ */
+
+func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
+ start_mark := parser.mark
+
+ /* Check if the tag is in the canonical form. */
+
+ if !cache(parser, 2) {
+ return false
+ }
+
+ var handle []byte
+ var suffix []byte
+ if parser.buffer[parser.buffer_pos+1] == '<' {
+ /* Set the handle to '' */
+
+ /* Eat '!<' */
+
+ skip(parser)
+ skip(parser)
+
+ /* Consume the tag value. */
+
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+
+ /* Check for '>' and eat it. */
+
+ if parser.buffer[parser.buffer_pos] != '>' {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find the expected '>'")
+ return false
+ }
+
+ skip(parser)
+ } else if is_blank(parser.buffer[parser.buffer_pos+1]) {
+ // NON-SPECIFIED
+ skip(parser)
+ } else {
+ /* The tag has either the '!suffix' or the '!handle!suffix' form. */
+
+ /* First, try to scan a handle. */
+
+ if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
+ return false
+ }
+
+ /* Check if it is, indeed, handle. */
+
+ if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
+ /* Scan the suffix now. */
+
+ if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
+ return false
+ }
+ } else {
+ /* It wasn't a handle after all. Scan the rest of the tag. */
+
+ if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
+ return false
+ }
+
+ /* Set the handle to '!'. */
+
+ handle = []byte{'!'}
+
+ /*
+ * A special case: the '!' tag. Set the handle to '' and the
+ * suffix to '!'.
+ */
+
+ if len(suffix) == 0 {
+ handle, suffix = suffix, handle
+ }
+
+ }
+ }
+
+ /* Check the character which ends the tag. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if !is_blankz_at(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a tag",
+ start_mark, "did not find expected whitespace or line break")
+ return false
+ }
+
+ end_mark := parser.mark
+
+ /* Create a token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_TAG_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: handle,
+ suffix: suffix,
+ }
+
+ return true
+}
+
+/*
+ * Scan a tag handle.
+ */
+
+func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool,
+ start_mark YAML_mark_t, handle *[]byte) bool {
+
+ /* Check the initial '!' character. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos] != '!' {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+
+ /* Copy the '!' character. */
+ var s []byte
+ s = read(parser, s)
+
+ /* Copy all subsequent alphabetical and numerical characters. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_alpha(parser.buffer[parser.buffer_pos]) {
+ s = read(parser, s)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Check if the trailing character is '!' and copy it. */
+
+ if parser.buffer[parser.buffer_pos] == '!' {
+ s = read(parser, s)
+ } else {
+ /*
+ * It's either the '!' tag or not really a tag handle. If it's a %TAG
+ * directive, it's an error. If it's a tag token, it must be a part of
+ * URI.
+ */
+
+ if directive && !(s[0] == '!' && len(s) == 1) {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected '!'")
+ return false
+ }
+ }
+
+ *handle = s
+
+ return true
+}
+
+/*
+ * Scan a tag.
+ */
+
+func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool,
+ head []byte, start_mark YAML_mark_t, uri *[]byte) bool {
+
+ var s []byte
+ /*
+ * Copy the head if needed.
+ *
+ * Note that we don't copy the leading '!' character.
+ */
+ if len(head) > 1 {
+ s = append(s, head[1:]...)
+ }
+
+ /* Scan the tag. */
+ if !cache(parser, 1) {
+ return false
+ }
+
+ /*
+ * The set of characters that may appear in URI is as follows:
+ *
+ * '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
+ * '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
+ * '%'.
+ */
+
+ b := parser.buffer[parser.buffer_pos]
+ for is_alpha(b) || b == ';' ||
+ b == '/' || b == '?' ||
+ b == ':' || b == '@' ||
+ b == '&' || b == '=' ||
+ b == '+' || b == '$' ||
+ b == ',' || b == '.' ||
+ b == '!' || b == '~' ||
+ b == '*' || b == '\'' ||
+ b == '(' || b == ')' ||
+ b == '[' || b == ']' ||
+ b == '%' {
+ /* Check if it is a URI-escape sequence. */
+
+ if b == '%' {
+ if !yaml_parser_scan_uri_escapes(parser,
+ directive, start_mark, &s) {
+ return false
+ }
+ } else {
+ s = read(parser, s)
+ }
+
+ if !cache(parser, 1) {
+ return false
+ }
+ b = parser.buffer[parser.buffer_pos]
+ }
+
+ /* Check if the tag is non-empty. */
+
+ if len(s) == 0 {
+ yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find expected tag URI")
+ return false
+ }
+
+ *uri = s
+
+ return true
+}
+
+/*
+ * Decode an URI-escape sequence corresponding to a single UTF-8 character.
+ */
+
+func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool,
+ start_mark YAML_mark_t, s *[]byte) bool {
+
+ /* Decode the required number of characters. */
+ w := 10
+ for w > 0 {
+
+ /* Check for a URI-escaped octet. */
+
+ if !cache(parser, 3) {
+ return false
+ }
+
+ if !(parser.buffer[parser.buffer_pos] == '%' &&
+ is_hex(parser.buffer[parser.buffer_pos+1]) &&
+ is_hex(parser.buffer[parser.buffer_pos+2])) {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "did not find URI escaped octet")
+ }
+
+ /* Get the octet. */
+ octet := byte((as_hex(parser.buffer[parser.buffer_pos+1]) << 4) +
+ as_hex(parser.buffer[parser.buffer_pos+2]))
+
+ /* If it is the leading octet, determine the length of the UTF-8 sequence. */
+
+ if w == 10 {
+ w = width(octet)
+ if w == 0 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect leading UTF-8 octet")
+ }
+ } else {
+ /* Check if the trailing octet is correct. */
+
+ if (octet & 0xC0) != 0x80 {
+ return yaml_parser_set_scanner_tag_error(parser, directive,
+ start_mark, "found an incorrect trailing UTF-8 octet")
+ }
+ }
+
+ /* Copy the octet and move the pointers. */
+
+ *s = append(*s, octet)
+ skip(parser)
+ skip(parser)
+ skip(parser)
+ w--
+ }
+
+ return true
+}
+
+/*
+ * Scan a block scalar.
+ */
+
+func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t,
+ literal bool) bool {
+
+ /* Eat the indicator '|' or '>'. */
+
+ start_mark := parser.mark
+
+ skip(parser)
+
+ /* Scan the additional block scalar indicators. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ /* Check for a chomping indicator. */
+ chomping := 0
+ increment := 0
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ /* Set the chomping method and eat the indicator. */
+
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+
+ skip(parser)
+
+ /* Check for an indentation indicator. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if is_digit(parser.buffer[parser.buffer_pos]) {
+ /* Check that the indentation is greater than 0. */
+
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ /* Get the indentation level and eat the indicator. */
+
+ increment = as_digit(parser.buffer[parser.buffer_pos])
+
+ skip(parser)
+ }
+ } else if is_digit(parser.buffer[parser.buffer_pos]) {
+
+ /* Do the same as above, but in the opposite order. */
+ if parser.buffer[parser.buffer_pos] == '0' {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found an indentation indicator equal to 0")
+ return false
+ }
+
+ increment = as_digit(parser.buffer[parser.buffer_pos])
+
+ skip(parser)
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
+ if parser.buffer[parser.buffer_pos] == '+' {
+ chomping = +1
+ } else {
+ chomping = -1
+ }
+
+ skip(parser)
+ }
+ }
+
+ /* Eat whitespaces and comments to the end of the line. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ for !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+ }
+
+ /* Check if we are at the end of the line. */
+
+ if !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "did not find expected comment or line break")
+ return false
+ }
+
+ /* Eat a line break. */
+
+ if is_break_at(parser.buffer, parser.buffer_pos) {
+ if !cache(parser, 2) {
+ return false
+ }
+
+ skip_line(parser)
+ }
+
+ end_mark := parser.mark
+
+ /* Set the indentation level if it was specified. */
+ indent := 0
+ if increment > 0 {
+ if parser.indent >= 0 {
+ indent = parser.indent + increment
+ } else {
+ indent = increment
+ }
+ }
+
+ /* Scan the leading line breaks and determine the indentation level if needed. */
+ var trailing_breaks []byte
+ if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks,
+ start_mark, &end_mark) {
+ return false
+ }
+
+ /* Scan the block scalar content. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ var s []byte
+ var leading_break []byte
+ leading_blank := false
+ trailing_blank := false
+ for parser.mark.column == indent && !is_z(parser.buffer[parser.buffer_pos]) {
+
+ /*
+ * We are at the beginning of a non-empty line.
+ */
+
+ /* Is it a trailing whitespace? */
+
+ trailing_blank = is_blank(parser.buffer[parser.buffer_pos])
+
+ /* Check if we need to fold the leading line break. */
+
+ if !literal && len(leading_break) > 0 && leading_break[0] == '\n' &&
+ !leading_blank && !trailing_blank {
+ /* Do we need to join the lines by space? */
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ }
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, leading_break...)
+ leading_break = leading_break[:0]
+ }
+
+ /* Append the remaining line breaks. */
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+
+ /* Is it a leading whitespace? */
+
+ leading_blank = is_blank(parser.buffer[parser.buffer_pos])
+
+ /* Consume the current line. */
+
+ for !is_breakz_at(parser.buffer, parser.buffer_pos) {
+ s = read(parser, s)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Consume the line break. */
+
+ if !cache(parser, 2) {
+ return false
+ }
+
+ leading_break = read_line(parser, leading_break)
+
+ /* Eat the following indentation spaces and line breaks. */
+
+ if !yaml_parser_scan_block_scalar_breaks(parser,
+ &indent, &trailing_breaks, start_mark, &end_mark) {
+ return false
+ }
+ }
+
+ /* Chomp the tail. */
+
+ if chomping != -1 {
+ s = append(s, leading_break...)
+ }
+ if chomping == 1 {
+ s = append(s, trailing_breaks...)
+ }
+
+ /* Create a token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_LITERAL_SCALAR_STYLE,
+ }
+ if !literal {
+ token.style = yaml_FOLDED_SCALAR_STYLE
+ }
+
+ return true
+}
+
+/*
+ * Scan indentation spaces and line breaks for a block scalar. Determine the
+ * indentation level if needed.
+ */
+
+func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t,
+ indent *int, breaks *[]byte,
+ start_mark YAML_mark_t, end_mark *YAML_mark_t) bool {
+
+ *end_mark = parser.mark
+
+ /* Eat the indentation spaces and line breaks. */
+ max_indent := 0
+ for {
+ /* Eat the indentation spaces. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for (*indent == 0 || parser.mark.column < *indent) &&
+ is_space(parser.buffer[parser.buffer_pos]) {
+ skip(parser)
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+ if parser.mark.column > max_indent {
+ max_indent = parser.mark.column
+ }
+
+ /* Check for a tab character messing the indentation. */
+
+ if (*indent == 0 || parser.mark.column < *indent) &&
+ is_tab(parser.buffer[parser.buffer_pos]) {
+ return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
+ start_mark, "found a tab character where an indentation space is expected")
+ }
+
+ /* Have we found a non-empty line? */
+
+ if !is_break_at(parser.buffer, parser.buffer_pos) {
+ break
+ }
+
+ /* Consume the line break. */
+
+ if !cache(parser, 2) {
+ return false
+ }
+
+ *breaks = read_line(parser, *breaks)
+ *end_mark = parser.mark
+ }
+
+ /* Determine the indentation level if needed. */
+
+ if *indent == 0 {
+ *indent = max_indent
+ if *indent < parser.indent+1 {
+ *indent = parser.indent + 1
+ }
+ if *indent < 1 {
+ *indent = 1
+ }
+ }
+
+ return true
+}
+
+/*
+ * Scan a quoted scalar.
+ */
+
+func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t,
+ single bool) bool {
+
+ /* Eat the left quote. */
+
+ start_mark := parser.mark
+
+ skip(parser)
+
+ /* Consume the content of the quoted scalar. */
+ var s []byte
+ var leading_break []byte
+ var trailing_breaks []byte
+ var whitespaces []byte
+ for {
+ /* Check that there are no document indicators at the beginning of the line. */
+
+ if !cache(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz_at(parser.buffer, parser.buffer_pos+3) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected document indicator")
+ return false
+ }
+
+ /* Check for EOF. */
+
+ if is_z(parser.buffer[parser.buffer_pos]) {
+ yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
+ start_mark, "found unexpected end of stream")
+ return false
+ }
+
+ /* Consume non-blank characters. */
+
+ if !cache(parser, 2) {
+ return false
+ }
+
+ leading_blanks := false
+
+ for !is_blankz_at(parser.buffer, parser.buffer_pos) {
+ /* Check for an escaped single quote. */
+
+ if single && parser.buffer[parser.buffer_pos] == '\'' &&
+ parser.buffer[parser.buffer_pos+1] == '\'' {
+ // Is is an escaped single quote.
+ s = append(s, '\'')
+ skip(parser)
+ skip(parser)
+ } else if single && parser.buffer[parser.buffer_pos] == '\'' {
+ /* Check for the right quote. */
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '"' {
+ /* Check for the right quote. */
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' &&
+ is_break_at(parser.buffer, parser.buffer_pos+1) {
+
+ /* Check for an escaped line break. */
+ if !cache(parser, 3) {
+ return false
+ }
+
+ skip(parser)
+ skip_line(parser)
+ leading_blanks = true
+ break
+ } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
+
+ /* Check for an escape sequence. */
+
+ code_length := 0
+
+ /* Check the escape character. */
+
+ switch parser.buffer[parser.buffer_pos+1] {
+ case '0':
+ s = append(s, 0)
+ case 'a':
+ s = append(s, '\x07')
+ case 'b':
+ s = append(s, '\x08')
+ case 't', '\t':
+ s = append(s, '\x09')
+ case 'n':
+ s = append(s, '\x0A')
+ case 'v':
+ s = append(s, '\x0B')
+ case 'f':
+ s = append(s, '\x0C')
+ case 'r':
+ s = append(s, '\x0D')
+ case 'e':
+ s = append(s, '\x1B')
+ case ' ':
+ s = append(s, '\x20')
+ case '"':
+ s = append(s, '"')
+ case '/':
+ s = append(s, '/')
+ case '\\':
+ s = append(s, '\\')
+ case 'N': /* NEL (#x85) */
+ s = append(s, '\xC2')
+ s = append(s, '\x85')
+ case '_': /* #xA0 */
+ s = append(s, '\xC2')
+ s = append(s, '\xA0')
+ case 'L': /* LS (#x2028) */
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA8')
+ case 'P': /* PS (#x2029) */
+ s = append(s, '\xE2')
+ s = append(s, '\x80')
+ s = append(s, '\xA9')
+ case 'x':
+ code_length = 2
+ case 'u':
+ code_length = 4
+ case 'U':
+ code_length = 8
+ default:
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found unknown escape character")
+ return false
+ }
+
+ skip(parser)
+ skip(parser)
+
+ /* Consume an arbitrary escape code. */
+
+ if code_length > 0 {
+ value := 0
+
+ /* Scan the character value. */
+
+ if !cache(parser, code_length) {
+ return false
+ }
+
+ for k := 0; k < code_length; k++ {
+ if !is_hex(parser.buffer[parser.buffer_pos+k]) {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "did not find expected hexdecimal number")
+ return false
+ }
+ value = (value << 4) + as_hex(parser.buffer[parser.buffer_pos+k])
+ }
+
+ /* Check the value and write the character. */
+
+ if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
+ yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
+ start_mark, "found invalid Unicode character escape code")
+ return false
+ }
+
+ if value <= 0x7F {
+ s = append(s, byte(value))
+ } else if value <= 0x7FF {
+ s = append(s, byte(0xC0+(value>>6)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else if value <= 0xFFFF {
+ s = append(s, byte(0xE0+(value>>12)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ } else {
+ s = append(s, byte(0xF0+(value>>18)))
+ s = append(s, byte(0x80+((value>>12)&0x3F)))
+ s = append(s, byte(0x80+((value>>6)&0x3F)))
+ s = append(s, byte(0x80+(value&0x3F)))
+ }
+
+ /* Advance the pointer. */
+
+ for k := 0; k < code_length; k++ {
+ skip(parser)
+ }
+ }
+ } else {
+ /* It is a non-escaped non-blank character. */
+
+ s = read(parser, s)
+ }
+
+ if !cache(parser, 2) {
+ return false
+ }
+ }
+
+ /* Check if we are at the end of the scalar. */
+ b := parser.buffer[parser.buffer_pos]
+ if single {
+ if b == '\'' {
+ break
+ }
+ } else if b == '"' {
+ break
+ }
+
+ /* Consume blank characters. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) || is_break_at(parser.buffer, parser.buffer_pos) {
+ if is_blank(parser.buffer[parser.buffer_pos]) {
+ /* Consume a space or a tab character. */
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if !cache(parser, 2) {
+ return false
+ }
+
+ /* Check if it is a first line break. */
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Join the whitespaces or fold line breaks. */
+
+ if leading_blanks {
+ /* Do we need to fold line breaks? */
+
+ if len(leading_break) > 0 && leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+ }
+
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ leading_break = leading_break[:0]
+ trailing_breaks = trailing_breaks[:0]
+ }
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ /* Eat the right quote. */
+
+ skip(parser)
+
+ end_mark := parser.mark
+
+ /* Create a token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
+ }
+ if !single {
+ token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
+ }
+
+ return true
+}
+
+/*
+ * Scan a plain scalar.
+ */
+
+func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
+ var s []byte
+ var leading_break []byte
+ var trailing_breaks []byte
+ var whitespaces []byte
+ leading_blanks := false
+ indent := parser.indent + 1
+
+ start_mark := parser.mark
+ end_mark := parser.mark
+
+ /* Consume the content of the plain scalar. */
+
+ for {
+ /* Check for a document indicator. */
+
+ if !cache(parser, 4) {
+ return false
+ }
+
+ if parser.mark.column == 0 &&
+ ((parser.buffer[parser.buffer_pos] == '-' &&
+ parser.buffer[parser.buffer_pos+1] == '-' &&
+ parser.buffer[parser.buffer_pos+2] == '-') ||
+ (parser.buffer[parser.buffer_pos] == '.' &&
+ parser.buffer[parser.buffer_pos+1] == '.' &&
+ parser.buffer[parser.buffer_pos+2] == '.')) &&
+ is_blankz_at(parser.buffer, parser.buffer_pos+3) {
+ break
+ }
+
+ /* Check for a comment. */
+
+ if parser.buffer[parser.buffer_pos] == '#' {
+ break
+ }
+
+ /* Consume non-blank characters. */
+
+ for !is_blankz_at(parser.buffer, parser.buffer_pos) {
+ /* Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13". */
+
+ if parser.flow_level > 0 &&
+ parser.buffer[parser.buffer_pos] == ':' &&
+ !is_blankz_at(parser.buffer, parser.buffer_pos+1) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found unexpected ':'")
+ return false
+ }
+
+ /* Check for indicators that may end a plain scalar. */
+ b := parser.buffer[parser.buffer_pos]
+ if (b == ':' && is_blankz_at(parser.buffer, parser.buffer_pos+1)) ||
+ (parser.flow_level > 0 &&
+ (b == ',' || b == ':' ||
+ b == '?' || b == '[' ||
+ b == ']' || b == '{' ||
+ b == '}')) {
+ break
+ }
+
+ /* Check if we need to join whitespaces and breaks. */
+
+ if leading_blanks || len(whitespaces) > 0 {
+ if leading_blanks {
+ /* Do we need to fold line breaks? */
+
+ if leading_break[0] == '\n' {
+ if len(trailing_breaks) == 0 {
+ s = append(s, ' ')
+ } else {
+ s = append(s, trailing_breaks...)
+ trailing_breaks = trailing_breaks[:0]
+ }
+ leading_break = leading_break[:0]
+ } else {
+ s = append(s, leading_break...)
+ s = append(s, trailing_breaks...)
+ leading_break = leading_break[:0]
+ trailing_breaks = trailing_breaks[:0]
+ }
+
+ leading_blanks = false
+ } else {
+ s = append(s, whitespaces...)
+ whitespaces = whitespaces[:0]
+ }
+ }
+
+ /* Copy the character. */
+
+ s = read(parser, s)
+ end_mark = parser.mark
+
+ if !cache(parser, 2) {
+ return false
+ }
+ }
+
+ /* Is it the end? */
+
+ if !(is_blank(parser.buffer[parser.buffer_pos]) ||
+ is_break_at(parser.buffer, parser.buffer_pos)) {
+ break
+ }
+
+ /* Consume blank characters. */
+
+ if !cache(parser, 1) {
+ return false
+ }
+
+ for is_blank(parser.buffer[parser.buffer_pos]) ||
+ is_break_at(parser.buffer, parser.buffer_pos) {
+
+ if is_blank(parser.buffer[parser.buffer_pos]) {
+ /* Check for tab character that abuse indentation. */
+
+ if leading_blanks && parser.mark.column < indent &&
+ is_tab(parser.buffer[parser.buffer_pos]) {
+ yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
+ start_mark, "found a tab character that violate indentation")
+ return false
+ }
+
+ /* Consume a space or a tab character. */
+
+ if !leading_blanks {
+ whitespaces = read(parser, whitespaces)
+ } else {
+ skip(parser)
+ }
+ } else {
+ if !cache(parser, 2) {
+ return false
+ }
+
+ /* Check if it is a first line break. */
+
+ if !leading_blanks {
+ whitespaces = whitespaces[:0]
+ leading_break = read_line(parser, leading_break)
+ leading_blanks = true
+ } else {
+ trailing_breaks = read_line(parser, trailing_breaks)
+ }
+ }
+ if !cache(parser, 1) {
+ return false
+ }
+ }
+
+ /* Check indentation level. */
+
+ if parser.flow_level == 0 && parser.mark.column < indent {
+ break
+ }
+ }
+
+ /* Create a token. */
+
+ *token = yaml_token_t{
+ token_type: yaml_SCALAR_TOKEN,
+ start_mark: start_mark,
+ end_mark: end_mark,
+ value: s,
+ style: yaml_PLAIN_SCALAR_STYLE,
+ }
+
+ /* Note that we change the 'simple_key_allowed' flag. */
+
+ if leading_blanks {
+ parser.simple_key_allowed = true
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
new file mode 100644
index 00000000..f153aee4
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/tags.go
@@ -0,0 +1,360 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ flow bool
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("yaml")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, field{name, tagged, index, ft,
+ opts.Contains("omitempty"), opts.Contains("flow")})
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, field{name: ft.Name(), index: index, typ: ft})
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func fieldByIndex(v reflect.Value, index []int) reflect.Value {
+ for _, i := range index {
+ if v.Kind() == reflect.Ptr {
+ if v.IsNil() {
+ return reflect.Value{}
+ }
+ v = v.Elem()
+ }
+ v = v.Field(i)
+ }
+ return v
+}
+
+func typeByIndex(t reflect.Type, index []int) reflect.Type {
+ for _, i := range index {
+ if t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+ t = t.Field(i).Type
+ }
+ return t
+}
+
+// stringValues is a slice of reflect.Value holding *reflect.StringValue.
+// It implements the methods to sort by string.
+type stringValues []reflect.Value
+
+func (sv stringValues) Len() int { return len(sv) }
+func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
+func (sv stringValues) Less(i, j int) bool {
+ av, ak := getElem(sv[i])
+ bv, bk := getElem(sv[j])
+ if ak == reflect.String && bk == reflect.String {
+ return av.String() < bv.String()
+ }
+
+ return ak < bk
+}
+
+func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
+ k := v.Kind()
+ for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
+ v = v.Elem()
+ k = v.Kind()
+ }
+
+ return v, k
+}
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
new file mode 100644
index 00000000..a76b6336
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/writer.go
@@ -0,0 +1,128 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+/*
+ * Set the writer error and return 0.
+ */
+
+func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
+ emitter.error = yaml_WRITER_ERROR
+ emitter.problem = problem
+
+ return false
+}
+
+/*
+ * Flush the output buffer.
+ */
+
+func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
+ if emitter.write_handler == nil {
+ panic("Write handler must be set") /* Write handler must be set. */
+ }
+ if emitter.encoding == yaml_ANY_ENCODING {
+ panic("Encoding must be set") /* Output encoding must be set. */
+ }
+
+ /* Check if the buffer is empty. */
+
+ if emitter.buffer_pos == 0 {
+ return true
+ }
+
+ /* If the output encoding is UTF-8, we don't need to recode the buffer. */
+
+ if emitter.encoding == yaml_UTF8_ENCODING {
+ if err := emitter.write_handler(emitter,
+ emitter.buffer[:emitter.buffer_pos]); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+ emitter.buffer_pos = 0
+ return true
+ }
+
+ /* Recode the buffer into the raw buffer. */
+
+ var low, high int
+ if emitter.encoding == yaml_UTF16LE_ENCODING {
+ low, high = 0, 1
+ } else {
+ high, low = 1, 0
+ }
+
+ pos := 0
+ for pos < emitter.buffer_pos {
+
+ /*
+ * See the "reader.c" code for more details on UTF-8 encoding. Note
+ * that we assume that the buffer contains a valid UTF-8 sequence.
+ */
+
+ /* Read the next UTF-8 character. */
+
+ octet := emitter.buffer[pos]
+
+ var w int
+ var value rune
+ switch {
+ case octet&0x80 == 0x00:
+ w, value = 1, rune(octet&0x7F)
+ case octet&0xE0 == 0xC0:
+ w, value = 2, rune(octet&0x1F)
+ case octet&0xF0 == 0xE0:
+ w, value = 3, rune(octet&0x0F)
+ case octet&0xF8 == 0xF0:
+ w, value = 4, rune(octet&0x07)
+ }
+
+ for k := 1; k < w; k++ {
+ octet = emitter.buffer[pos+k]
+ value = (value << 6) + (rune(octet) & 0x3F)
+ }
+
+ pos += w
+
+ /* Write the character. */
+
+ if value < 0x10000 {
+ var b [2]byte
+ b[high] = byte(value >> 8)
+ b[low] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
+ } else {
+ /* Write the character using a surrogate pair (check "reader.c"). */
+
+ var b [4]byte
+ value -= 0x10000
+ b[high] = byte(0xD8 + (value >> 18))
+ b[low] = byte((value >> 10) & 0xFF)
+ b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
+ b[low+2] = byte(value & 0xFF)
+ emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
+ }
+ }
+
+ /* Write the raw buffer. */
+
+ // Write the raw buffer.
+ if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
+ return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
+ }
+
+ emitter.buffer_pos = 0
+ emitter.raw_buffer = emitter.raw_buffer[:0]
+ return true
+}
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
new file mode 100644
index 00000000..de4c05ad
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_definesh.go
@@ -0,0 +1,22 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+const (
+ yaml_VERSION_MAJOR = 0
+ yaml_VERSION_MINOR = 1
+ yaml_VERSION_PATCH = 6
+ yaml_VERSION_STRING = "0.1.6"
+)
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
new file mode 100644
index 00000000..2b3b7d74
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yaml_privateh.go
@@ -0,0 +1,891 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+const (
+ INPUT_RAW_BUFFER_SIZE = 1024
+
+ /*
+ * The size of the input buffer.
+ *
+ * It should be possible to decode the whole raw buffer.
+ */
+ INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
+
+ /*
+ * The size of the output buffer.
+ */
+
+ OUTPUT_BUFFER_SIZE = 512
+
+ /*
+ * The size of the output raw buffer.
+ *
+ * It should be possible to encode the whole output buffer.
+ */
+
+ OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
+
+ INITIAL_STACK_SIZE = 16
+ INITIAL_QUEUE_SIZE = 16
+)
+
+func width(b byte) int {
+ if b&0x80 == 0 {
+ return 1
+ }
+
+ if b&0xE0 == 0xC0 {
+ return 2
+ }
+
+ if b&0xF0 == 0xE0 {
+ return 3
+ }
+
+ if b&0xF8 == 0xF0 {
+ return 4
+ }
+
+ return 0
+}
+
+func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
+ w := width(src[*src_pos])
+ switch w {
+ case 4:
+ dest[*dest_pos+3] = src[*src_pos+3]
+ fallthrough
+ case 3:
+ dest[*dest_pos+2] = src[*src_pos+2]
+ fallthrough
+ case 2:
+ dest[*dest_pos+1] = src[*src_pos+1]
+ fallthrough
+ case 1:
+ dest[*dest_pos] = src[*src_pos]
+ default:
+ panic("invalid width")
+ }
+ *dest_pos += w
+ *src_pos += w
+}
+
+// /*
+// * Check if the character at the specified position is an alphabetical
+// * character, a digit, '_', or '-'.
+// */
+
+func is_alpha(b byte) bool {
+ return (b >= '0' && b <= '9') ||
+ (b >= 'A' && b <= 'Z') ||
+ (b >= 'a' && b <= 'z') ||
+ b == '_' || b == '-'
+}
+
+// /*
+// * Check if the character at the specified position is a digit.
+// */
+//
+func is_digit(b byte) bool {
+ return b >= '0' && b <= '9'
+}
+
+// /*
+// * Get the value of a digit.
+// */
+//
+func as_digit(b byte) int {
+ return int(b) - '0'
+}
+
+// /*
+// * Check if the character at the specified position is a hex-digit.
+// */
+//
+func is_hex(b byte) bool {
+ return (b >= '0' && b <= '9') ||
+ (b >= 'A' && b <= 'F') ||
+ (b >= 'a' && b <= 'f')
+}
+
+//
+// /*
+// * Get the value of a hex-digit.
+// */
+//
+func as_hex(b byte) int {
+ if b >= 'A' && b <= 'F' {
+ return int(b) - 'A' + 10
+ } else if b >= 'a' && b <= 'f' {
+ return int(b) - 'a' + 10
+ }
+ return int(b) - '0'
+}
+
+// #define AS_HEX_AT(string,offset) \
+// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
+// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
+// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
+// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
+// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
+// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
+// ((string).pointer[offset] - (yaml_char_t) '0'))
+
+// /*
+// * Check if the character is a line break, space, tab, or NUL.
+// */
+func is_blankz_at(b []byte, i int) bool {
+ return is_blank(b[i]) || is_breakz_at(b, i)
+}
+
+// /*
+// * Check if the character at the specified position is a line break.
+// */
+func is_break_at(b []byte, i int) bool {
+ return b[i] == '\r' || /* CR (#xD)*/
+ b[i] == '\n' || /* LF (#xA) */
+ (b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
+ (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
+ (b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
+}
+
+func is_breakz_at(b []byte, i int) bool {
+ return is_break_at(b, i) || is_z(b[i])
+}
+
+func is_crlf_at(b []byte, i int) bool {
+ return b[i] == '\r' && b[i+1] == '\n'
+}
+
+// /*
+// * Check if the character at the specified position is NUL.
+// */
+func is_z(b byte) bool {
+ return b == 0x0
+}
+
+// /*
+// * Check if the character at the specified position is space.
+// */
+func is_space(b byte) bool {
+ return b == ' '
+}
+
+//
+// /*
+// * Check if the character at the specified position is tab.
+// */
+func is_tab(b byte) bool {
+ return b == '\t'
+}
+
+// /*
+// * Check if the character at the specified position is blank (space or tab).
+// */
+func is_blank(b byte) bool {
+ return is_space(b) || is_tab(b)
+}
+
+// /*
+// * Check if the character is ASCII.
+// */
+func is_ascii(b byte) bool {
+ return b <= '\x7f'
+}
+
+// /*
+// * Check if the character can be printed unescaped.
+// */
+func is_printable_at(b []byte, i int) bool {
+ return ((b[i] == 0x0A) || /* . == #x0A */
+ (b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
+ (b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
+ (b[i] > 0xC2 && b[i] < 0xED) ||
+ (b[i] == 0xED && b[i+1] < 0xA0) ||
+ (b[i] == 0xEE) ||
+ (b[i] == 0xEF && /* && . != #xFEFF */
+ !(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
+ !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
+}
+
+func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
+ // collapse the slice
+ if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
+ if parser.tokens_head != len(parser.tokens) {
+ // move the tokens down
+ copy(parser.tokens, parser.tokens[parser.tokens_head:])
+ }
+ // readjust the length
+ parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
+ parser.tokens_head = 0
+ }
+
+ parser.tokens = append(parser.tokens, *token)
+ if pos < 0 {
+ return
+ }
+ copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
+ parser.tokens[parser.tokens_head+pos] = *token
+}
+
+// /*
+// * Check if the character at the specified position is BOM.
+// */
+//
+func is_bom_at(b []byte, i int) bool {
+ return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
+}
+
+//
+// #ifdef HAVE_CONFIG_H
+// #include
+// #endif
+//
+// #include "./yaml.h"
+//
+// #include
+// #include
+//
+// /*
+// * Memory management.
+// */
+//
+// yaml_DECLARE(void *)
+// yaml_malloc(size_t size);
+//
+// yaml_DECLARE(void *)
+// yaml_realloc(void *ptr, size_t size);
+//
+// yaml_DECLARE(void)
+// yaml_free(void *ptr);
+//
+// yaml_DECLARE(yaml_char_t *)
+// yaml_strdup(const yaml_char_t *);
+//
+// /*
+// * Reader: Ensure that the buffer contains at least `length` characters.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
+//
+// /*
+// * Scanner: Ensure that the token stack contains at least one token ready.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
+//
+// /*
+// * The size of the input raw buffer.
+// */
+//
+// #define INPUT_RAW_BUFFER_SIZE 16384
+//
+// /*
+// * The size of the input buffer.
+// *
+// * It should be possible to decode the whole raw buffer.
+// */
+//
+// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
+//
+// /*
+// * The size of the output buffer.
+// */
+//
+// #define OUTPUT_BUFFER_SIZE 16384
+//
+// /*
+// * The size of the output raw buffer.
+// *
+// * It should be possible to encode the whole output buffer.
+// */
+//
+// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
+//
+// /*
+// * The size of other stacks and queues.
+// */
+//
+// #define INITIAL_STACK_SIZE 16
+// #define INITIAL_QUEUE_SIZE 16
+// #define INITIAL_STRING_SIZE 16
+//
+// /*
+// * Buffer management.
+// */
+//
+// #define BUFFER_INIT(context,buffer,size) \
+// (((buffer).start = yaml_malloc(size)) ? \
+// ((buffer).last = (buffer).pointer = (buffer).start, \
+// (buffer).end = (buffer).start+(size), \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define BUFFER_DEL(context,buffer) \
+// (yaml_free((buffer).start), \
+// (buffer).start = (buffer).pointer = (buffer).end = 0)
+//
+// /*
+// * String management.
+// */
+//
+// typedef struct {
+// yaml_char_t *start;
+// yaml_char_t *end;
+// yaml_char_t *pointer;
+// } yaml_string_t;
+//
+// yaml_DECLARE(int)
+// yaml_string_extend(yaml_char_t **start,
+// yaml_char_t **pointer, yaml_char_t **end);
+//
+// yaml_DECLARE(int)
+// yaml_string_join(
+// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
+// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
+//
+// #define NULL_STRING { NULL, NULL, NULL }
+//
+// #define STRING(string,length) { (string), (string)+(length), (string) }
+//
+// #define STRING_ASSIGN(value,string,length) \
+// ((value).start = (string), \
+// (value).end = (string)+(length), \
+// (value).pointer = (string))
+//
+// #define STRING_INIT(context,string,size) \
+// (((string).start = yaml_malloc(size)) ? \
+// ((string).pointer = (string).start, \
+// (string).end = (string).start+(size), \
+// memset((string).start, 0, (size)), \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define STRING_DEL(context,string) \
+// (yaml_free((string).start), \
+// (string).start = (string).pointer = (string).end = 0)
+//
+// #define STRING_EXTEND(context,string) \
+// (((string).pointer+5 < (string).end) \
+// || yaml_string_extend(&(string).start, \
+// &(string).pointer, &(string).end))
+//
+// #define CLEAR(context,string) \
+// ((string).pointer = (string).start, \
+// memset((string).start, 0, (string).end-(string).start))
+//
+// #define JOIN(context,string_a,string_b) \
+// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
+// &(string_a).end, &(string_b).start, \
+// &(string_b).pointer, &(string_b).end)) ? \
+// ((string_b).pointer = (string_b).start, \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// /*
+// * String check operations.
+// */
+//
+// /*
+// * Check the octet at the specified position.
+// */
+//
+// #define CHECK_AT(string,octet,offset) \
+// ((string).pointer[offset] == (yaml_char_t)(octet))
+//
+// /*
+// * Check the current octet in the buffer.
+// */
+//
+// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
+//
+// /*
+// * Check if the character at the specified position is an alphabetical
+// * character, a digit, '_', or '-'.
+// */
+//
+// #define IS_ALPHA_AT(string,offset) \
+// (((string).pointer[offset] >= (yaml_char_t) '0' && \
+// (string).pointer[offset] <= (yaml_char_t) '9') || \
+// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
+// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
+// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
+// (string).pointer[offset] <= (yaml_char_t) 'z') || \
+// (string).pointer[offset] == '_' || \
+// (string).pointer[offset] == '-')
+//
+// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is a digit.
+// */
+//
+// #define IS_DIGIT_AT(string,offset) \
+// (((string).pointer[offset] >= (yaml_char_t) '0' && \
+// (string).pointer[offset] <= (yaml_char_t) '9'))
+//
+// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
+//
+// /*
+// * Get the value of a digit.
+// */
+//
+// #define AS_DIGIT_AT(string,offset) \
+// ((string).pointer[offset] - (yaml_char_t) '0')
+//
+// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is a hex-digit.
+// */
+//
+// #define IS_HEX_AT(string,offset) \
+// (((string).pointer[offset] >= (yaml_char_t) '0' && \
+// (string).pointer[offset] <= (yaml_char_t) '9') || \
+// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
+// (string).pointer[offset] <= (yaml_char_t) 'F') || \
+// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
+// (string).pointer[offset] <= (yaml_char_t) 'f'))
+//
+// #define IS_HEX(string) IS_HEX_AT((string),0)
+//
+// /*
+// * Get the value of a hex-digit.
+// */
+//
+// #define AS_HEX_AT(string,offset) \
+// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
+// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
+// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
+// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
+// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
+// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
+// ((string).pointer[offset] - (yaml_char_t) '0'))
+//
+// #define AS_HEX(string) AS_HEX_AT((string),0)
+//
+// /*
+// * Check if the character is ASCII.
+// */
+//
+// #define IS_ASCII_AT(string,offset) \
+// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
+//
+// #define IS_ASCII(string) IS_ASCII_AT((string),0)
+//
+// /*
+// * Check if the character can be printed unescaped.
+// */
+//
+// #define IS_PRINTABLE_AT(string,offset) \
+// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
+// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
+// && (string).pointer[offset] <= 0x7E) \
+// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
+// && (string).pointer[offset+1] >= 0xA0) \
+// || ((string).pointer[offset] > 0xC2 \
+// && (string).pointer[offset] < 0xED) \
+// || ((string).pointer[offset] == 0xED \
+// && (string).pointer[offset+1] < 0xA0) \
+// || ((string).pointer[offset] == 0xEE) \
+// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
+// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
+// && (string).pointer[offset+2] == 0xBF) \
+// && !((string).pointer[offset+1] == 0xBF \
+// && ((string).pointer[offset+2] == 0xBE \
+// || (string).pointer[offset+2] == 0xBF))))
+//
+// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is NUL.
+// */
+//
+// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
+//
+// #define IS_Z(string) IS_Z_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is BOM.
+// */
+//
+// #define IS_BOM_AT(string,offset) \
+// (CHECK_AT((string),'\xEF',(offset)) \
+// && CHECK_AT((string),'\xBB',(offset)+1) \
+// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
+//
+// #define IS_BOM(string) IS_BOM_AT(string,0)
+//
+// /*
+// * Check if the character at the specified position is space.
+// */
+//
+// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
+//
+// #define IS_SPACE(string) IS_SPACE_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is tab.
+// */
+//
+// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
+//
+// #define IS_TAB(string) IS_TAB_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is blank (space or tab).
+// */
+//
+// #define IS_BLANK_AT(string,offset) \
+// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
+//
+// #define IS_BLANK(string) IS_BLANK_AT((string),0)
+//
+// /*
+// * Check if the character at the specified position is a line break.
+// */
+//
+// #define IS_BREAK_AT(string,offset) \
+// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
+// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
+// || (CHECK_AT((string),'\xC2',(offset)) \
+// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
+// || (CHECK_AT((string),'\xE2',(offset)) \
+// && CHECK_AT((string),'\x80',(offset)+1) \
+// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
+// || (CHECK_AT((string),'\xE2',(offset)) \
+// && CHECK_AT((string),'\x80',(offset)+1) \
+// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
+//
+// #define IS_BREAK(string) IS_BREAK_AT((string),0)
+//
+// #define IS_CRLF_AT(string,offset) \
+// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
+//
+// #define IS_CRLF(string) IS_CRLF_AT((string),0)
+//
+// /*
+// * Check if the character is a line break or NUL.
+// */
+//
+// #define IS_BREAKZ_AT(string,offset) \
+// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
+//
+// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
+//
+// /*
+// * Check if the character is a line break, space, or NUL.
+// */
+//
+// #define IS_SPACEZ_AT(string,offset) \
+// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
+//
+// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
+//
+// /*
+// * Check if the character is a line break, space, tab, or NUL.
+// */
+//
+// #define IS_BLANKZ_AT(string,offset) \
+// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
+//
+// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
+//
+// /*
+// * Determine the width of the character.
+// */
+//
+// #define WIDTH_AT(string,offset) \
+// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
+// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
+// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
+// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
+//
+// #define WIDTH(string) WIDTH_AT((string),0)
+//
+// /*
+// * Move the string pointer to the next character.
+// */
+//
+// #define MOVE(string) ((string).pointer += WIDTH((string)))
+//
+// /*
+// * Copy a character and move the pointers of both strings.
+// */
+//
+// #define COPY(string_a,string_b) \
+// ((*(string_b).pointer & 0x80) == 0x00 ? \
+// (*((string_a).pointer++) = *((string_b).pointer++)) : \
+// (*(string_b).pointer & 0xE0) == 0xC0 ? \
+// (*((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++)) : \
+// (*(string_b).pointer & 0xF0) == 0xE0 ? \
+// (*((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++)) : \
+// (*(string_b).pointer & 0xF8) == 0xF0 ? \
+// (*((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++), \
+// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
+//
+// /*
+// * Stack and queue management.
+// */
+//
+// yaml_DECLARE(int)
+// yaml_stack_extend(void **start, void **top, void **end);
+//
+// yaml_DECLARE(int)
+// yaml_queue_extend(void **start, void **head, void **tail, void **end);
+//
+// #define STACK_INIT(context,stack,size) \
+// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
+// ((stack).top = (stack).start, \
+// (stack).end = (stack).start+(size), \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define STACK_DEL(context,stack) \
+// (yaml_free((stack).start), \
+// (stack).start = (stack).top = (stack).end = 0)
+//
+// #define STACK_EMPTY(context,stack) \
+// ((stack).start == (stack).top)
+//
+// #define PUSH(context,stack,value) \
+// (((stack).top != (stack).end \
+// || yaml_stack_extend((void **)&(stack).start, \
+// (void **)&(stack).top, (void **)&(stack).end)) ? \
+// (*((stack).top++) = value, \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define POP(context,stack) \
+// (*(--(stack).top))
+//
+// #define QUEUE_INIT(context,queue,size) \
+// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
+// ((queue).head = (queue).tail = (queue).start, \
+// (queue).end = (queue).start+(size), \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define QUEUE_DEL(context,queue) \
+// (yaml_free((queue).start), \
+// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
+//
+// #define QUEUE_EMPTY(context,queue) \
+// ((queue).head == (queue).tail)
+//
+// #define ENQUEUE(context,queue,value) \
+// (((queue).tail != (queue).end \
+// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
+// (void **)&(queue).tail, (void **)&(queue).end)) ? \
+// (*((queue).tail++) = value, \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// #define DEQUEUE(context,queue) \
+// (*((queue).head++))
+//
+// #define QUEUE_INSERT(context,queue,index,value) \
+// (((queue).tail != (queue).end \
+// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
+// (void **)&(queue).tail, (void **)&(queue).end)) ? \
+// (memmove((queue).head+(index)+1,(queue).head+(index), \
+// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
+// *((queue).head+(index)) = value, \
+// (queue).tail++, \
+// 1) : \
+// ((context)->error = yaml_MEMORY_ERROR, \
+// 0))
+//
+// /*
+// * Token initializers.
+// */
+//
+// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
+// (memset(&(token), 0, sizeof(yaml_token_t)), \
+// (token).type = (token_type), \
+// (token).start_mark = (token_start_mark), \
+// (token).end_mark = (token_end_mark))
+//
+// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
+// (token).data.stream_start.encoding = (token_encoding))
+//
+// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
+//
+// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
+// (token).data.alias.value = (token_value))
+//
+// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
+// (token).data.anchor.value = (token_value))
+//
+// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
+// (token).data.tag.handle = (token_handle), \
+// (token).data.tag.suffix = (token_suffix))
+//
+// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
+// (token).data.scalar.value = (token_value), \
+// (token).data.scalar.length = (token_length), \
+// (token).data.scalar.style = (token_style))
+//
+// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
+// (token).data.version_directive.major = (token_major), \
+// (token).data.version_directive.minor = (token_minor))
+//
+// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
+// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
+// (token).data.tag_directive.handle = (token_handle), \
+// (token).data.tag_directive.prefix = (token_prefix))
+//
+// /*
+// * Event initializers.
+// */
+//
+// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
+// (memset(&(event), 0, sizeof(yaml_event_t)), \
+// (event).type = (event_type), \
+// (event).start_mark = (event_start_mark), \
+// (event).end_mark = (event_end_mark))
+//
+// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
+// (event).data.stream_start.encoding = (event_encoding))
+//
+// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
+//
+// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
+// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
+// (event).data.document_start.version_directive = (event_version_directive), \
+// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
+// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
+// (event).data.document_start.implicit = (event_implicit))
+//
+// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
+// (event).data.document_end.implicit = (event_implicit))
+//
+// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
+// (event).data.alias.anchor = (event_anchor))
+//
+// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
+// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
+// (event).data.scalar.anchor = (event_anchor), \
+// (event).data.scalar.tag = (event_tag), \
+// (event).data.scalar.value = (event_value), \
+// (event).data.scalar.length = (event_length), \
+// (event).data.scalar.plain_implicit = (event_plain_implicit), \
+// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
+// (event).data.scalar.style = (event_style))
+//
+// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
+// event_implicit,event_style,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
+// (event).data.sequence_start.anchor = (event_anchor), \
+// (event).data.sequence_start.tag = (event_tag), \
+// (event).data.sequence_start.implicit = (event_implicit), \
+// (event).data.sequence_start.style = (event_style))
+//
+// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
+//
+// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
+// event_implicit,event_style,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
+// (event).data.mapping_start.anchor = (event_anchor), \
+// (event).data.mapping_start.tag = (event_tag), \
+// (event).data.mapping_start.implicit = (event_implicit), \
+// (event).data.mapping_start.style = (event_style))
+//
+// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
+// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
+//
+// /*
+// * Document initializer.
+// */
+//
+// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
+// document_version_directive,document_tag_directives_start, \
+// document_tag_directives_end,document_start_implicit, \
+// document_end_implicit,document_start_mark,document_end_mark) \
+// (memset(&(document), 0, sizeof(yaml_document_t)), \
+// (document).nodes.start = (document_nodes_start), \
+// (document).nodes.end = (document_nodes_end), \
+// (document).nodes.top = (document_nodes_start), \
+// (document).version_directive = (document_version_directive), \
+// (document).tag_directives.start = (document_tag_directives_start), \
+// (document).tag_directives.end = (document_tag_directives_end), \
+// (document).start_implicit = (document_start_implicit), \
+// (document).end_implicit = (document_end_implicit), \
+// (document).start_mark = (document_start_mark), \
+// (document).end_mark = (document_end_mark))
+//
+// /*
+// * Node initializers.
+// */
+//
+// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
+// (memset(&(node), 0, sizeof(yaml_node_t)), \
+// (node).type = (node_type), \
+// (node).tag = (node_tag), \
+// (node).start_mark = (node_start_mark), \
+// (node).end_mark = (node_end_mark))
+//
+// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
+// node_style,start_mark,end_mark) \
+// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
+// (node).data.scalar.value = (node_value), \
+// (node).data.scalar.length = (node_length), \
+// (node).data.scalar.style = (node_style))
+//
+// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
+// node_style,start_mark,end_mark) \
+// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
+// (node).data.sequence.items.start = (node_items_start), \
+// (node).data.sequence.items.end = (node_items_end), \
+// (node).data.sequence.items.top = (node_items_start), \
+// (node).data.sequence.style = (node_style))
+//
+// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
+// node_style,start_mark,end_mark) \
+// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
+// (node).data.mapping.pairs.start = (node_pairs_start), \
+// (node).data.mapping.pairs.end = (node_pairs_end), \
+// (node).data.mapping.pairs.top = (node_pairs_start), \
+// (node).data.mapping.style = (node_style))
+//
diff --git a/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
new file mode 100644
index 00000000..d608dbb3
--- /dev/null
+++ b/vendor/github.com/cloudfoundry-incubator/candiedyaml/yamlh.go
@@ -0,0 +1,953 @@
+/*
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package candiedyaml
+
+import (
+ "fmt"
+ "io"
+)
+
+/** The version directive data. */
+type yaml_version_directive_t struct {
+ major int // The major version number
+ minor int // The minor version number
+}
+
+/** The tag directive data. */
+type yaml_tag_directive_t struct {
+ handle []byte // The tag handle
+ prefix []byte // The tag prefix
+}
+
+/** The stream encoding. */
+type yaml_encoding_t int
+
+const (
+ /** Let the parser choose the encoding. */
+ yaml_ANY_ENCODING yaml_encoding_t = iota
+ /** The defau lt UTF-8 encoding. */
+ yaml_UTF8_ENCODING
+ /** The UTF-16-LE encoding with BOM. */
+ yaml_UTF16LE_ENCODING
+ /** The UTF-16-BE encoding with BOM. */
+ yaml_UTF16BE_ENCODING
+)
+
+/** Line break types. */
+type yaml_break_t int
+
+const (
+ yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
+ yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
+ yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
+ yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
+)
+
+/** Many bad things could happen with the parser and emitter. */
+type YAML_error_type_t int
+
+const (
+ /** No error is produced. */
+ yaml_NO_ERROR YAML_error_type_t = iota
+
+ /** Cannot allocate or reallocate a block of memory. */
+ yaml_MEMORY_ERROR
+
+ /** Cannot read or decode the input stream. */
+ yaml_READER_ERROR
+ /** Cannot scan the input stream. */
+ yaml_SCANNER_ERROR
+ /** Cannot parse the input stream. */
+ yaml_PARSER_ERROR
+ /** Cannot compose a YAML document. */
+ yaml_COMPOSER_ERROR
+
+ /** Cannot write to the output stream. */
+ yaml_WRITER_ERROR
+ /** Cannot emit a YAML stream. */
+ yaml_EMITTER_ERROR
+)
+
+/** The pointer position. */
+type YAML_mark_t struct {
+ /** The position index. */
+ index int
+
+ /** The position line. */
+ line int
+
+ /** The position column. */
+ column int
+}
+
+func (m YAML_mark_t) String() string {
+ return fmt.Sprintf("line %d, column %d", m.line, m.column)
+}
+
+/** @} */
+
+/**
+ * @defgroup styles Node Styles
+ * @{
+ */
+
+type yaml_style_t int
+
+/** Scalar styles. */
+type yaml_scalar_style_t yaml_style_t
+
+const (
+ /** Let the emitter choose the style. */
+ yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
+
+ /** The plain scalar style. */
+ yaml_PLAIN_SCALAR_STYLE
+
+ /** The single-quoted scalar style. */
+ yaml_SINGLE_QUOTED_SCALAR_STYLE
+ /** The double-quoted scalar style. */
+ yaml_DOUBLE_QUOTED_SCALAR_STYLE
+
+ /** The literal scalar style. */
+ yaml_LITERAL_SCALAR_STYLE
+ /** The folded scalar style. */
+ yaml_FOLDED_SCALAR_STYLE
+)
+
+/** Sequence styles. */
+type yaml_sequence_style_t yaml_style_t
+
+const (
+ /** Let the emitter choose the style. */
+ yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
+
+ /** The block sequence style. */
+ yaml_BLOCK_SEQUENCE_STYLE
+ /** The flow sequence style. */
+ yaml_FLOW_SEQUENCE_STYLE
+)
+
+/** Mapping styles. */
+type yaml_mapping_style_t yaml_style_t
+
+const (
+ /** Let the emitter choose the style. */
+ yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
+
+ /** The block mapping style. */
+ yaml_BLOCK_MAPPING_STYLE
+ /** The flow mapping style. */
+ yaml_FLOW_MAPPING_STYLE
+
+/* yaml_FLOW_SET_MAPPING_STYLE */
+)
+
+/** @} */
+
+/**
+ * @defgroup tokens Tokens
+ * @{
+ */
+
+/** Token types. */
+type yaml_token_type_t int
+
+const (
+ /** An empty token. */
+ yaml_NO_TOKEN yaml_token_type_t = iota
+
+ /** A STREAM-START token. */
+ yaml_STREAM_START_TOKEN
+ /** A STREAM-END token. */
+ yaml_STREAM_END_TOKEN
+
+ /** A VERSION-DIRECTIVE token. */
+ yaml_VERSION_DIRECTIVE_TOKEN
+ /** A TAG-DIRECTIVE token. */
+ yaml_TAG_DIRECTIVE_TOKEN
+ /** A DOCUMENT-START token. */
+ yaml_DOCUMENT_START_TOKEN
+ /** A DOCUMENT-END token. */
+ yaml_DOCUMENT_END_TOKEN
+
+ /** A BLOCK-SEQUENCE-START token. */
+ yaml_BLOCK_SEQUENCE_START_TOKEN
+ /** A BLOCK-SEQUENCE-END token. */
+ yaml_BLOCK_MAPPING_START_TOKEN
+ /** A BLOCK-END token. */
+ yaml_BLOCK_END_TOKEN
+
+ /** A FLOW-SEQUENCE-START token. */
+ yaml_FLOW_SEQUENCE_START_TOKEN
+ /** A FLOW-SEQUENCE-END token. */
+ yaml_FLOW_SEQUENCE_END_TOKEN
+ /** A FLOW-MAPPING-START token. */
+ yaml_FLOW_MAPPING_START_TOKEN
+ /** A FLOW-MAPPING-END token. */
+ yaml_FLOW_MAPPING_END_TOKEN
+
+ /** A BLOCK-ENTRY token. */
+ yaml_BLOCK_ENTRY_TOKEN
+ /** A FLOW-ENTRY token. */
+ yaml_FLOW_ENTRY_TOKEN
+ /** A KEY token. */
+ yaml_KEY_TOKEN
+ /** A VALUE token. */
+ yaml_VALUE_TOKEN
+
+ /** An ALIAS token. */
+ yaml_ALIAS_TOKEN
+ /** An ANCHOR token. */
+ yaml_ANCHOR_TOKEN
+ /** A TAG token. */
+ yaml_TAG_TOKEN
+ /** A SCALAR token. */
+ yaml_SCALAR_TOKEN
+)
+
+/** The token structure. */
+type yaml_token_t struct {
+
+ /** The token type. */
+ token_type yaml_token_type_t
+
+ /** The token data. */
+ /** The stream start (for @c yaml_STREAM_START_TOKEN). */
+ encoding yaml_encoding_t
+
+ /** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
+ /** The anchor (for @c ). */
+ /** The scalar value (for @c ). */
+ value []byte
+
+ /** The tag suffix. */
+ suffix []byte
+
+ /** The scalar value (for @c yaml_SCALAR_TOKEN). */
+ /** The scalar style. */
+ style yaml_scalar_style_t
+
+ /** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
+ version_directive yaml_version_directive_t
+
+ /** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
+ prefix []byte
+
+ /** The beginning of the token. */
+ start_mark YAML_mark_t
+ /** The end of the token. */
+ end_mark YAML_mark_t
+
+ major, minor int
+}
+
+/**
+ * @defgroup events Events
+ * @{
+ */
+
+/** Event types. */
+type yaml_event_type_t int
+
+const (
+ /** An empty event. */
+ yaml_NO_EVENT yaml_event_type_t = iota
+
+ /** A STREAM-START event. */
+ yaml_STREAM_START_EVENT
+ /** A STREAM-END event. */
+ yaml_STREAM_END_EVENT
+
+ /** A DOCUMENT-START event. */
+ yaml_DOCUMENT_START_EVENT
+ /** A DOCUMENT-END event. */
+ yaml_DOCUMENT_END_EVENT
+
+ /** An ALIAS event. */
+ yaml_ALIAS_EVENT
+ /** A SCALAR event. */
+ yaml_SCALAR_EVENT
+
+ /** A SEQUENCE-START event. */
+ yaml_SEQUENCE_START_EVENT
+ /** A SEQUENCE-END event. */
+ yaml_SEQUENCE_END_EVENT
+
+ /** A MAPPING-START event. */
+ yaml_MAPPING_START_EVENT
+ /** A MAPPING-END event. */
+ yaml_MAPPING_END_EVENT
+)
+
+/** The event structure. */
+type yaml_event_t struct {
+
+ /** The event type. */
+ event_type yaml_event_type_t
+
+ /** The stream parameters (for @c yaml_STREAM_START_EVENT). */
+ encoding yaml_encoding_t
+
+ /** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
+ version_directive *yaml_version_directive_t
+
+ /** The beginning and end of the tag directives list. */
+ tag_directives []yaml_tag_directive_t
+
+ /** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
+ /** Is the document indicator implicit? */
+ implicit bool
+
+ /** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
+ /** The anchor. */
+ anchor []byte
+
+ /** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
+ /** The tag. */
+ tag []byte
+ /** The scalar value. */
+ value []byte
+
+ /** Is the tag optional for the plain style? */
+ plain_implicit bool
+ /** Is the tag optional for any non-plain style? */
+ quoted_implicit bool
+
+ /** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
+ /** The sequence style. */
+ /** The scalar style. */
+ style yaml_style_t
+
+ /** The beginning of the event. */
+ start_mark, end_mark YAML_mark_t
+}
+
+/**
+ * @defgroup nodes Nodes
+ * @{
+ */
+
+const (
+ /** The tag @c !!null with the only possible value: @c null. */
+ yaml_NULL_TAG = "tag:yaml.org,2002:null"
+ /** The tag @c !!bool with the values: @c true and @c falce. */
+ yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
+ /** The tag @c !!str for string values. */
+ yaml_STR_TAG = "tag:yaml.org,2002:str"
+ /** The tag @c !!int for integer values. */
+ yaml_INT_TAG = "tag:yaml.org,2002:int"
+ /** The tag @c !!float for float values. */
+ yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
+ /** The tag @c !!timestamp for date and time values. */
+ yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
+
+ /** The tag @c !!seq is used to denote sequences. */
+ yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
+ /** The tag @c !!map is used to denote mapping. */
+ yaml_MAP_TAG = "tag:yaml.org,2002:map"
+
+ /** The default scalar tag is @c !!str. */
+ yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
+ /** The default sequence tag is @c !!seq. */
+ yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
+ /** The default mapping tag is @c !!map. */
+ yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
+
+ yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
+)
+
+/** Node types. */
+type yaml_node_type_t int
+
+const (
+ /** An empty node. */
+ yaml_NO_NODE yaml_node_type_t = iota
+
+ /** A scalar node. */
+ yaml_SCALAR_NODE
+ /** A sequence node. */
+ yaml_SEQUENCE_NODE
+ /** A mapping node. */
+ yaml_MAPPING_NODE
+)
+
+/** An element of a sequence node. */
+type yaml_node_item_t int
+
+/** An element of a mapping node. */
+type yaml_node_pair_t struct {
+ /** The key of the element. */
+ key int
+ /** The value of the element. */
+ value int
+}
+
+/** The node structure. */
+type yaml_node_t struct {
+
+ /** The node type. */
+ node_type yaml_node_type_t
+
+ /** The node tag. */
+ tag []byte
+
+ /** The scalar parameters (for @c yaml_SCALAR_NODE). */
+ scalar struct {
+ /** The scalar value. */
+ value []byte
+ /** The scalar style. */
+ style yaml_scalar_style_t
+ }
+
+ /** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
+ sequence struct {
+ /** The stack of sequence items. */
+ items []yaml_node_item_t
+ /** The sequence style. */
+ style yaml_sequence_style_t
+ }
+
+ /** The mapping parameters (for @c yaml_MAPPING_NODE). */
+ mapping struct {
+ /** The stack of mapping pairs (key, value). */
+ pairs []yaml_node_pair_t
+ /** The mapping style. */
+ style yaml_mapping_style_t
+ }
+
+ /** The beginning of the node. */
+ start_mark YAML_mark_t
+ /** The end of the node. */
+ end_mark YAML_mark_t
+}
+
+/** The document structure. */
+type yaml_document_t struct {
+
+ /** The document nodes. */
+ nodes []yaml_node_t
+
+ /** The version directive. */
+ version_directive *yaml_version_directive_t
+
+ /** The list of tag directives. */
+ tags []yaml_tag_directive_t
+
+ /** Is the document start indicator implicit? */
+ start_implicit bool
+ /** Is the document end indicator implicit? */
+ end_implicit bool
+
+ /** The beginning of the document. */
+ start_mark YAML_mark_t
+ /** The end of the document. */
+ end_mark YAML_mark_t
+}
+
+/**
+ * The prototype of a read handler.
+ *
+ * The read handler is called when the parser needs to read more bytes from the
+ * source. The handler should write not more than @a size bytes to the @a
+ * buffer. The number of written bytes should be set to the @a length variable.
+ *
+ * @param[in,out] data A pointer to an application data specified by
+ * yaml_parser_set_input().
+ * @param[out] buffer The buffer to write the data from the source.
+ * @param[in] size The size of the buffer.
+ * @param[out] size_read The actual number of bytes read from the source.
+ *
+ * @returns On success, the handler should return @c 1. If the handler failed,
+ * the returned value should be @c 0. On EOF, the handler should set the
+ * @a size_read to @c 0 and return @c 1.
+ */
+
+type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
+
+/**
+ * This structure holds information about a potential simple key.
+ */
+
+type yaml_simple_key_t struct {
+ /** Is a simple key possible? */
+ possible bool
+
+ /** Is a simple key required? */
+ required bool
+
+ /** The number of the token. */
+ token_number int
+
+ /** The position mark. */
+ mark YAML_mark_t
+}
+
+/**
+ * The states of the parser.
+ */
+type yaml_parser_state_t int
+
+const (
+ /** Expect STREAM-START. */
+ yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
+ /** Expect the beginning of an implicit document. */
+ yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
+ /** Expect DOCUMENT-START. */
+ yaml_PARSE_DOCUMENT_START_STATE
+ /** Expect the content of a document. */
+ yaml_PARSE_DOCUMENT_CONTENT_STATE
+ /** Expect DOCUMENT-END. */
+ yaml_PARSE_DOCUMENT_END_STATE
+ /** Expect a block node. */
+ yaml_PARSE_BLOCK_NODE_STATE
+ /** Expect a block node or indentless sequence. */
+ yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
+ /** Expect a flow node. */
+ yaml_PARSE_FLOW_NODE_STATE
+ /** Expect the first entry of a block sequence. */
+ yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
+ /** Expect an entry of a block sequence. */
+ yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
+ /** Expect an entry of an indentless sequence. */
+ yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
+ /** Expect the first key of a block mapping. */
+ yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
+ /** Expect a block mapping key. */
+ yaml_PARSE_BLOCK_MAPPING_KEY_STATE
+ /** Expect a block mapping value. */
+ yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
+ /** Expect the first entry of a flow sequence. */
+ yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
+ /** Expect an entry of a flow sequence. */
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
+ /** Expect a key of an ordered mapping. */
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
+ /** Expect a value of an ordered mapping. */
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
+ /** Expect the and of an ordered mapping entry. */
+ yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
+ /** Expect the first key of a flow mapping. */
+ yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
+ /** Expect a key of a flow mapping. */
+ yaml_PARSE_FLOW_MAPPING_KEY_STATE
+ /** Expect a value of a flow mapping. */
+ yaml_PARSE_FLOW_MAPPING_VALUE_STATE
+ /** Expect an empty value of a flow mapping. */
+ yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
+ /** Expect nothing. */
+ yaml_PARSE_END_STATE
+)
+
+/**
+ * This structure holds aliases data.
+ */
+
+type yaml_alias_data_t struct {
+ /** The anchor. */
+ anchor []byte
+ /** The node id. */
+ index int
+ /** The anchor mark. */
+ mark YAML_mark_t
+}
+
+/**
+ * The parser structure.
+ *
+ * All members are internal. Manage the structure using the @c yaml_parser_
+ * family of functions.
+ */
+
+type yaml_parser_t struct {
+
+ /**
+ * @name Error handling
+ * @{
+ */
+
+ /** Error type. */
+ error YAML_error_type_t
+ /** Error description. */
+ problem string
+ /** The byte about which the problem occured. */
+ problem_offset int
+ /** The problematic value (@c -1 is none). */
+ problem_value int
+ /** The problem position. */
+ problem_mark YAML_mark_t
+ /** The error context. */
+ context string
+ /** The context position. */
+ context_mark YAML_mark_t
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Reader stuff
+ * @{
+ */
+
+ /** Read handler. */
+ read_handler yaml_read_handler_t
+
+ /** Reader input data. */
+ input_reader io.Reader
+ input []byte
+ input_pos int
+
+ /** EOF flag */
+ eof bool
+
+ /** The working buffer. */
+ buffer []byte
+ buffer_pos int
+
+ /* The number of unread characters in the buffer. */
+ unread int
+
+ /** The raw buffer. */
+ raw_buffer []byte
+ raw_buffer_pos int
+
+ /** The input encoding. */
+ encoding yaml_encoding_t
+
+ /** The offset of the current position (in bytes). */
+ offset int
+
+ /** The mark of the current position. */
+ mark YAML_mark_t
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Scanner stuff
+ * @{
+ */
+
+ /** Have we started to scan the input stream? */
+ stream_start_produced bool
+
+ /** Have we reached the end of the input stream? */
+ stream_end_produced bool
+
+ /** The number of unclosed '[' and '{' indicators. */
+ flow_level int
+
+ /** The tokens queue. */
+ tokens []yaml_token_t
+ tokens_head int
+
+ /** The number of tokens fetched from the queue. */
+ tokens_parsed int
+
+ /* Does the tokens queue contain a token ready for dequeueing. */
+ token_available bool
+
+ /** The indentation levels stack. */
+ indents []int
+
+ /** The current indentation level. */
+ indent int
+
+ /** May a simple key occur at the current position? */
+ simple_key_allowed bool
+
+ /** The stack of simple keys. */
+ simple_keys []yaml_simple_key_t
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Parser stuff
+ * @{
+ */
+
+ /** The parser states stack. */
+ states []yaml_parser_state_t
+
+ /** The current parser state. */
+ state yaml_parser_state_t
+
+ /** The stack of marks. */
+ marks []YAML_mark_t
+
+ /** The list of TAG directives. */
+ tag_directives []yaml_tag_directive_t
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Dumper stuff
+ * @{
+ */
+
+ /** The alias data. */
+ aliases []yaml_alias_data_t
+
+ /** The currently parsed document. */
+ document *yaml_document_t
+
+ /**
+ * @}
+ */
+
+}
+
+/**
+ * The prototype of a write handler.
+ *
+ * The write handler is called when the emitter needs to flush the accumulated
+ * characters to the output. The handler should write @a size bytes of the
+ * @a buffer to the output.
+ *
+ * @param[in,out] data A pointer to an application data specified by
+ * yaml_emitter_set_output().
+ * @param[in] buffer The buffer with bytes to be written.
+ * @param[in] size The size of the buffer.
+ *
+ * @returns On success, the handler should return @c 1. If the handler failed,
+ * the returned value should be @c 0.
+ */
+
+type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
+
+/** The emitter states. */
+type yaml_emitter_state_t int
+
+const (
+ /** Expect STREAM-START. */
+ yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
+ /** Expect the first DOCUMENT-START or STREAM-END. */
+ yaml_EMIT_FIRST_DOCUMENT_START_STATE
+ /** Expect DOCUMENT-START or STREAM-END. */
+ yaml_EMIT_DOCUMENT_START_STATE
+ /** Expect the content of a document. */
+ yaml_EMIT_DOCUMENT_CONTENT_STATE
+ /** Expect DOCUMENT-END. */
+ yaml_EMIT_DOCUMENT_END_STATE
+ /** Expect the first item of a flow sequence. */
+ yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
+ /** Expect an item of a flow sequence. */
+ yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
+ /** Expect the first key of a flow mapping. */
+ yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
+ /** Expect a key of a flow mapping. */
+ yaml_EMIT_FLOW_MAPPING_KEY_STATE
+ /** Expect a value for a simple key of a flow mapping. */
+ yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
+ /** Expect a value of a flow mapping. */
+ yaml_EMIT_FLOW_MAPPING_VALUE_STATE
+ /** Expect the first item of a block sequence. */
+ yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
+ /** Expect an item of a block sequence. */
+ yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
+ /** Expect the first key of a block mapping. */
+ yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
+ /** Expect the key of a block mapping. */
+ yaml_EMIT_BLOCK_MAPPING_KEY_STATE
+ /** Expect a value for a simple key of a block mapping. */
+ yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
+ /** Expect a value of a block mapping. */
+ yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
+ /** Expect nothing. */
+ yaml_EMIT_END_STATE
+)
+
+/**
+ * The emitter structure.
+ *
+ * All members are internal. Manage the structure using the @c yaml_emitter_
+ * family of functions.
+ */
+
+type yaml_emitter_t struct {
+
+ /**
+ * @name Error handling
+ * @{
+ */
+
+ /** Error type. */
+ error YAML_error_type_t
+ /** Error description. */
+ problem string
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Writer stuff
+ * @{
+ */
+
+ /** Write handler. */
+ write_handler yaml_write_handler_t
+
+ /** Standard (string or file) output data. */
+ output_buffer *[]byte
+ output_writer io.Writer
+
+ /** The working buffer. */
+ buffer []byte
+ buffer_pos int
+
+ /** The raw buffer. */
+ raw_buffer []byte
+ raw_buffer_pos int
+
+ /** The stream encoding. */
+ encoding yaml_encoding_t
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Emitter stuff
+ * @{
+ */
+
+ /** If the output is in the canonical style? */
+ canonical bool
+ /** The number of indentation spaces. */
+ best_indent int
+ /** The preferred width of the output lines. */
+ best_width int
+ /** Allow unescaped non-ASCII characters? */
+ unicode bool
+ /** The preferred line break. */
+ line_break yaml_break_t
+
+ /** The stack of states. */
+ states []yaml_emitter_state_t
+
+ /** The current emitter state. */
+ state yaml_emitter_state_t
+
+ /** The event queue. */
+ events []yaml_event_t
+ events_head int
+
+ /** The stack of indentation levels. */
+ indents []int
+
+ /** The list of tag directives. */
+ tag_directives []yaml_tag_directive_t
+
+ /** The current indentation level. */
+ indent int
+
+ /** The current flow level. */
+ flow_level int
+
+ /** Is it the document root context? */
+ root_context bool
+ /** Is it a sequence context? */
+ sequence_context bool
+ /** Is it a mapping context? */
+ mapping_context bool
+ /** Is it a simple mapping key context? */
+ simple_key_context bool
+
+ /** The current line. */
+ line int
+ /** The current column. */
+ column int
+ /** If the last character was a whitespace? */
+ whitespace bool
+ /** If the last character was an indentation character (' ', '-', '?', ':')? */
+ indention bool
+ /** If an explicit document end is required? */
+ open_ended bool
+
+ /** Anchor analysis. */
+ anchor_data struct {
+ /** The anchor value. */
+ anchor []byte
+ /** Is it an alias? */
+ alias bool
+ }
+
+ /** Tag analysis. */
+ tag_data struct {
+ /** The tag handle. */
+ handle []byte
+ /** The tag suffix. */
+ suffix []byte
+ }
+
+ /** Scalar analysis. */
+ scalar_data struct {
+ /** The scalar value. */
+ value []byte
+ /** Does the scalar contain line breaks? */
+ multiline bool
+ /** Can the scalar be expessed in the flow plain style? */
+ flow_plain_allowed bool
+ /** Can the scalar be expressed in the block plain style? */
+ block_plain_allowed bool
+ /** Can the scalar be expressed in the single quoted style? */
+ single_quoted_allowed bool
+ /** Can the scalar be expressed in the literal or folded styles? */
+ block_allowed bool
+ /** The output style. */
+ style yaml_scalar_style_t
+ }
+
+ /**
+ * @}
+ */
+
+ /**
+ * @name Dumper stuff
+ * @{
+ */
+
+ /** If the stream was already opened? */
+ opened bool
+ /** If the stream was already closed? */
+ closed bool
+
+ /** The information associated with the document nodes. */
+ anchors *struct {
+ /** The number of references. */
+ references int
+ /** The anchor id. */
+ anchor int
+ /** If the node has been emitted? */
+ serialized bool
+ }
+
+ /** The last assigned anchor id. */
+ last_anchor_id int
+
+ /** The currently emitted document. */
+ document *yaml_document_t
+
+ /**
+ * @}
+ */
+
+}
diff --git a/vendor/github.com/djherbis/fscache/LICENSE b/vendor/github.com/djherbis/fscache/LICENSE
new file mode 100644
index 00000000..1e7b7cc0
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dustin H
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/github.com/djherbis/fscache/README.md b/vendor/github.com/djherbis/fscache/README.md
new file mode 100644
index 00000000..bae10838
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/README.md
@@ -0,0 +1,93 @@
+fscache
+==========
+
+[![GoDoc](https://godoc.org/github.com/djherbis/fscache?status.svg)](https://godoc.org/github.com/djherbis/fscache)
+[![Release](https://img.shields.io/github/release/djherbis/fscache.svg)](https://github.com/djherbis/fscache/releases/latest)
+[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
+[![Build Status](https://travis-ci.org/djherbis/fscache.svg?branch=master)](https://travis-ci.org/djherbis/fscache)
+[![Coverage Status](https://coveralls.io/repos/djherbis/fscache/badge.svg?branch=master)](https://coveralls.io/r/djherbis/fscache?branch=master)
+[![Go Report Card](https://goreportcard.com/badge/github.com/djherbis/fscache)](https://goreportcard.com/report/github.com/djherbis/fscache)
+
+Usage
+------------
+Streaming File Cache for #golang
+
+fscache allows multiple readers to read from a cache while its being written to. [blog post](https://djherbis.github.io/post/fscache/)
+
+Using the Cache directly:
+
+```go
+package main
+
+import (
+ "io"
+ "log"
+ "os"
+ "time"
+
+ "gopkg.in/djherbis/fscache.v0"
+)
+
+func main() {
+
+ // create the cache, keys expire after 1 hour.
+ c, err := fscache.New("./cache", 0755, time.Hour)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ // wipe the cache when done
+ defer c.Clean()
+
+ // Get() and it's streams can be called concurrently but just for example:
+ for i := 0; i < 3; i++ {
+ r, w, err := c.Get("stream")
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ if w != nil { // a new stream, write to it.
+ go func(){
+ w.Write([]byte("hello world\n"))
+ w.Close()
+ }()
+ }
+
+ // the stream has started, read from it
+ io.Copy(os.Stdout, r)
+ r.Close()
+ }
+}
+```
+
+A Caching Middle-ware:
+
+```go
+package main
+
+import(
+ "net/http"
+ "time"
+
+ "gopkg.in/djherbis/fscache.v0"
+)
+
+func main(){
+ c, err := fscache.New("./cache", 0700, 0)
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+
+ handler := func(w http.ResponseWriter, r *http.Request) {
+ fmt.Fprintf(w, "%v: %s", time.Now(), "hello world")
+ }
+
+ http.ListenAndServe(":8080", fscache.Handler(c, http.HandlerFunc(handler)))
+}
+```
+
+Installation
+------------
+```sh
+go get gopkg.in/djherbis/fscache.v0
+```
diff --git a/vendor/github.com/djherbis/fscache/distrib.go b/vendor/github.com/djherbis/fscache/distrib.go
new file mode 100644
index 00000000..60994cc5
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/distrib.go
@@ -0,0 +1,85 @@
+package fscache
+
+import (
+ "bytes"
+ "crypto/sha1"
+ "encoding/binary"
+ "io"
+)
+
+// Distributor provides a way to partition keys into Caches.
+type Distributor interface {
+
+ // GetCache will always return the same Cache for the same key.
+ GetCache(key string) Cache
+
+ // Clean should wipe all the caches this Distributor manages
+ Clean() error
+}
+
+// stdDistribution distributes the keyspace evenly.
+func stdDistribution(key string, n uint64) uint64 {
+ h := sha1.New()
+ io.WriteString(h, key)
+ buf := bytes.NewBuffer(h.Sum(nil)[:8])
+ i, _ := binary.ReadUvarint(buf)
+ return i % n
+}
+
+// NewDistributor returns a Distributor which evenly distributes the keyspace
+// into the passed caches.
+func NewDistributor(caches ...Cache) Distributor {
+ if len(caches) == 0 {
+ return nil
+ }
+ return &distrib{
+ distribution: stdDistribution,
+ caches: caches,
+ size: uint64(len(caches)),
+ }
+}
+
+type distrib struct {
+ distribution func(key string, n uint64) uint64
+ caches []Cache
+ size uint64
+}
+
+func (d *distrib) GetCache(key string) Cache {
+ return d.caches[d.distribution(key, d.size)]
+}
+
+// BUG(djherbis): Return an error if cleaning fails
+func (d *distrib) Clean() error {
+ for _, c := range d.caches {
+ c.Clean()
+ }
+ return nil
+}
+
+// NewPartition returns a Cache which uses the Caches defined by the passed Distributor.
+func NewPartition(d Distributor) Cache {
+ return &partition{
+ distributor: d,
+ }
+}
+
+type partition struct {
+ distributor Distributor
+}
+
+func (p *partition) Get(key string) (ReadAtCloser, io.WriteCloser, error) {
+ return p.distributor.GetCache(key).Get(key)
+}
+
+func (p *partition) Remove(key string) error {
+ return p.distributor.GetCache(key).Remove(key)
+}
+
+func (p *partition) Exists(key string) bool {
+ return p.distributor.GetCache(key).Exists(key)
+}
+
+func (p *partition) Clean() error {
+ return p.distributor.Clean()
+}
diff --git a/vendor/github.com/djherbis/fscache/fs.go b/vendor/github.com/djherbis/fscache/fs.go
new file mode 100644
index 00000000..91aaae34
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/fs.go
@@ -0,0 +1,199 @@
+package fscache
+
+import (
+ "bytes"
+ "crypto/md5"
+ "crypto/rand"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "gopkg.in/djherbis/atime.v1"
+ "gopkg.in/djherbis/stream.v1"
+)
+
+// FileSystem is used as the source for a Cache.
+type FileSystem interface {
+ // Stream FileSystem
+ stream.FileSystem
+
+ // Reload should look through the FileSystem and call the suplied fn
+ // with the key/filename pairs that are found.
+ Reload(func(key, name string)) error
+
+ // RemoveAll should empty the FileSystem of all files.
+ RemoveAll() error
+
+ // AccessTimes takes a File.Name() and returns the last time the file was read,
+ // and the last time it was written to.
+ // It will be used to check expiry of a file, and must be concurrent safe
+ // with modifications to the FileSystem (writes, reads etc.)
+ AccessTimes(name string) (rt, wt time.Time, err error)
+}
+
+type stdFs struct {
+ root string
+}
+
+// NewFs returns a FileSystem rooted at directory dir.
+// Dir is created with perms if it doesn't exist.
+func NewFs(dir string, mode os.FileMode) (FileSystem, error) {
+ return &stdFs{root: dir}, os.MkdirAll(dir, mode)
+}
+
+func (fs *stdFs) Reload(add func(key, name string)) error {
+ files, err := ioutil.ReadDir(fs.root)
+ if err != nil {
+ return err
+ }
+
+ addfiles := make(map[string]struct {
+ os.FileInfo
+ key string
+ })
+
+ for _, f := range files {
+
+ if strings.HasSuffix(f.Name(), ".key") {
+ continue
+ }
+
+ key, err := fs.getKey(f.Name())
+ if err != nil {
+ return err
+ }
+ fi, ok := addfiles[key]
+
+ if !ok || fi.ModTime().Before(f.ModTime()) {
+ if ok {
+ fs.Remove(fi.Name())
+ }
+ addfiles[key] = struct {
+ os.FileInfo
+ key string
+ }{
+ FileInfo: f,
+ key: key,
+ }
+ } else {
+ fs.Remove(f.Name())
+ }
+
+ }
+
+ for _, f := range addfiles {
+ path, err := filepath.Abs(filepath.Join(fs.root, f.Name()))
+ if err != nil {
+ return err
+ }
+ add(f.key, path)
+ }
+
+ return nil
+}
+
+func (fs *stdFs) Create(name string) (stream.File, error) {
+ name, err := fs.makeName(name)
+ if err != nil {
+ return nil, err
+ }
+ return fs.create(name)
+}
+
+func (fs *stdFs) create(name string) (stream.File, error) {
+ return os.OpenFile(filepath.Join(fs.root, name), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
+}
+
+func (fs *stdFs) Open(name string) (stream.File, error) {
+ return os.Open(name)
+}
+
+func (fs *stdFs) Remove(name string) error {
+ os.Remove(fmt.Sprintf("%s.key", name))
+ return os.Remove(name)
+}
+
+func (fs *stdFs) RemoveAll() error {
+ return os.RemoveAll(fs.root)
+}
+
+func (fs *stdFs) AccessTimes(name string) (rt, wt time.Time, err error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return rt, wt, err
+ }
+ return atime.Get(fi), fi.ModTime(), nil
+}
+
+const (
+ saltSize = 8
+ maxShort = 20
+ shortPrefix = "s"
+ longPrefix = "l"
+)
+
+func salt() string {
+ buf := bytes.NewBufferString("")
+ enc := base64.NewEncoder(base64.URLEncoding, buf)
+ io.CopyN(enc, rand.Reader, saltSize)
+ return buf.String()
+}
+
+func tob64(s string) string {
+ buf := bytes.NewBufferString("")
+ enc := base64.NewEncoder(base64.URLEncoding, buf)
+ enc.Write([]byte(s))
+ enc.Close()
+ return buf.String()
+}
+
+func fromb64(s string) string {
+ buf := bytes.NewBufferString(s)
+ dec := base64.NewDecoder(base64.URLEncoding, buf)
+ out := bytes.NewBufferString("")
+ io.Copy(out, dec)
+ return out.String()
+}
+
+func (fs *stdFs) makeName(key string) (string, error) {
+ b64key := tob64(key)
+ // short name
+ if len(b64key) < maxShort {
+ return fmt.Sprintf("%s%s%s", shortPrefix, salt(), b64key), nil
+ }
+
+ // long name
+ hash := md5.Sum([]byte(key))
+ name := fmt.Sprintf("%s%s%x", longPrefix, salt(), hash[:])
+ f, err := fs.create(fmt.Sprintf("%s.key", name))
+ if err != nil {
+ return "", err
+ }
+ _, err = f.Write([]byte(key))
+ f.Close()
+ return name, err
+}
+
+func (fs *stdFs) getKey(name string) (string, error) {
+ // short name
+ if strings.HasPrefix(name, shortPrefix) {
+ return fromb64(strings.TrimPrefix(name, shortPrefix)[saltSize:]), nil
+ }
+
+ // long name
+ f, err := fs.Open(filepath.Join(fs.root, fmt.Sprintf("%s.key", name)))
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ key, err := ioutil.ReadAll(f)
+ if err != nil {
+ return "", err
+ }
+ return string(key), nil
+}
diff --git a/vendor/github.com/djherbis/fscache/fscache.go b/vendor/github.com/djherbis/fscache/fscache.go
new file mode 100644
index 00000000..2d8acb1d
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/fscache.go
@@ -0,0 +1,303 @@
+package fscache
+
+import (
+ "io"
+ "os"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "gopkg.in/djherbis/stream.v1"
+)
+
+// Cache works like a concurrent-safe map for streams.
+type Cache interface {
+
+ // Get manages access to the streams in the cache.
+ // If the key does not exist, w != nil and you can start writing to the stream.
+ // If the key does exist, w == nil.
+ // r will always be non-nil as long as err == nil and you must close r when you're done reading.
+ // Get can be called concurrently, and writing and reading is concurrent safe.
+ Get(key string) (ReadAtCloser, io.WriteCloser, error)
+
+ // Remove deletes the stream from the cache, blocking until the underlying
+ // file can be deleted (all active streams finish with it).
+ // It is safe to call Remove concurrently with Get.
+ Remove(key string) error
+
+ // Exists checks if a key is in the cache.
+ // It is safe to call Exists concurrently with Get.
+ Exists(key string) bool
+
+ // Clean will empty the cache and delete the cache folder.
+ // Clean is not safe to call while streams are being read/written.
+ Clean() error
+}
+
+type cache struct {
+ mu sync.RWMutex
+ files map[string]fileStream
+ grim Reaper
+ fs FileSystem
+}
+
+// ReadAtCloser is an io.ReadCloser, and an io.ReaderAt. It supports both so that Range
+// Requests are possible.
+type ReadAtCloser interface {
+ io.ReadCloser
+ io.ReaderAt
+}
+
+type fileStream interface {
+ next() (ReadAtCloser, error)
+ inUse() bool
+ io.WriteCloser
+ Remove() error
+ Name() string
+}
+
+// New creates a new Cache using NewFs(dir, perms).
+// expiry is the duration after which an un-accessed key will be removed from
+// the cache, a zero value expiro means never expire.
+func New(dir string, perms os.FileMode, expiry time.Duration) (Cache, error) {
+ fs, err := NewFs(dir, perms)
+ if err != nil {
+ return nil, err
+ }
+ var grim Reaper
+ if expiry > 0 {
+ grim = &reaper{
+ expiry: expiry,
+ period: expiry,
+ }
+ }
+ return NewCache(fs, grim)
+}
+
+// NewCache creates a new Cache based on FileSystem fs.
+// fs.Files() are loaded using the name they were created with as a key.
+// Reaper is used to determine when files expire, nil means never expire.
+func NewCache(fs FileSystem, grim Reaper) (Cache, error) {
+ c := &cache{
+ files: make(map[string]fileStream),
+ grim: grim,
+ fs: fs,
+ }
+ err := c.load()
+ if err != nil {
+ return nil, err
+ }
+ if grim != nil {
+ c.haunter()
+ }
+ return c, nil
+}
+
+func (c *cache) haunter() {
+ c.haunt()
+ time.AfterFunc(c.grim.Next(), c.haunter)
+}
+
+func (c *cache) haunt() {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ for key, f := range c.files {
+ if f.inUse() {
+ continue
+ }
+
+ lastRead, lastWrite, err := c.fs.AccessTimes(f.Name())
+ if err != nil {
+ continue
+ }
+
+ if c.grim.Reap(key, lastRead, lastWrite) {
+ delete(c.files, key)
+ c.fs.Remove(f.Name())
+ }
+ }
+ return
+}
+
+func (c *cache) load() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.fs.Reload(func(key, name string) {
+ c.files[key] = c.oldFile(name)
+ })
+}
+
+func (c *cache) Exists(key string) bool {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ _, ok := c.files[key]
+ return ok
+}
+
+func (c *cache) Get(key string) (r ReadAtCloser, w io.WriteCloser, err error) {
+ c.mu.RLock()
+ f, ok := c.files[key]
+ if ok {
+ r, err = f.next()
+ c.mu.RUnlock()
+ return r, nil, err
+ }
+ c.mu.RUnlock()
+
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ f, ok = c.files[key]
+ if ok {
+ r, err = f.next()
+ return r, nil, err
+ }
+
+ f, err = c.newFile(key)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ r, err = f.next()
+ if err != nil {
+ f.Close()
+ c.fs.Remove(f.Name())
+ return nil, nil, err
+ }
+
+ c.files[key] = f
+
+ return r, f, err
+}
+
+func (c *cache) Remove(key string) error {
+ c.mu.Lock()
+ f, ok := c.files[key]
+ delete(c.files, key)
+ c.mu.Unlock()
+
+ if ok {
+ return f.Remove()
+ }
+ return nil
+}
+
+func (c *cache) Clean() error {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ c.files = make(map[string]fileStream)
+ return c.fs.RemoveAll()
+}
+
+type cachedFile struct {
+ stream *stream.Stream
+ handleCounter
+}
+
+func (c *cache) newFile(name string) (fileStream, error) {
+ s, err := stream.NewStream(name, c.fs)
+ if err != nil {
+ return nil, err
+ }
+ cf := &cachedFile{
+ stream: s,
+ }
+ cf.inc()
+ return cf, nil
+}
+
+func (c *cache) oldFile(name string) fileStream {
+ return &reloadedFile{
+ fs: c.fs,
+ name: name,
+ }
+}
+
+type reloadedFile struct {
+ fs FileSystem
+ name string
+ handleCounter
+ io.WriteCloser // nop Write & Close methods. will never be called.
+}
+
+func (f *reloadedFile) Name() string { return f.name }
+
+func (f *reloadedFile) Remove() error {
+ f.waitUntilFree()
+ return f.fs.Remove(f.name)
+}
+
+func (f *reloadedFile) next() (r ReadAtCloser, err error) {
+ r, err = f.fs.Open(f.name)
+ if err == nil {
+ f.inc()
+ }
+ return &cacheReader{r: r, cnt: &f.handleCounter}, err
+}
+
+func (f *cachedFile) Name() string { return f.stream.Name() }
+
+func (f *cachedFile) Remove() error { return f.stream.Remove() }
+
+func (f *cachedFile) next() (r ReadAtCloser, err error) {
+ reader, err := f.stream.NextReader()
+ if err != nil {
+ return nil, err
+ }
+ f.inc()
+ return &cacheReader{
+ r: reader,
+ cnt: &f.handleCounter,
+ }, nil
+}
+
+func (f *cachedFile) Write(p []byte) (int, error) {
+ return f.stream.Write(p)
+}
+
+func (f *cachedFile) Close() error {
+ defer f.dec()
+ return f.stream.Close()
+}
+
+type cacheReader struct {
+ r ReadAtCloser
+ cnt *handleCounter
+}
+
+func (r *cacheReader) ReadAt(p []byte, off int64) (n int, err error) {
+ return r.r.ReadAt(p, off)
+}
+
+func (r *cacheReader) Read(p []byte) (n int, err error) {
+ return r.r.Read(p)
+}
+
+func (r *cacheReader) Close() error {
+ defer r.cnt.dec()
+ return r.r.Close()
+}
+
+type handleCounter struct {
+ cnt int64
+ grp sync.WaitGroup
+}
+
+func (h *handleCounter) inc() {
+ h.grp.Add(1)
+ atomic.AddInt64(&h.cnt, 1)
+}
+
+func (h *handleCounter) dec() {
+ atomic.AddInt64(&h.cnt, -1)
+ h.grp.Done()
+}
+
+func (h *handleCounter) inUse() bool {
+ return atomic.LoadInt64(&h.cnt) > 0
+}
+
+func (h *handleCounter) waitUntilFree() {
+ h.grp.Wait()
+}
diff --git a/vendor/github.com/djherbis/fscache/handler.go b/vendor/github.com/djherbis/fscache/handler.go
new file mode 100644
index 00000000..8df85400
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/handler.go
@@ -0,0 +1,41 @@
+package fscache
+
+import (
+ "io"
+ "net/http"
+)
+
+// Handler is a caching middle-ware for http Handlers.
+// It responds to http requests via the passed http.Handler, and caches the response
+// using the passed cache. The cache key for the request is the req.URL.String().
+// Note: It does not cache http headers. It is more efficient to set them yourself.
+func Handler(c Cache, h http.Handler) http.Handler {
+ return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
+ url := req.URL.String()
+ r, w, err := c.Get(url)
+ if err != nil {
+ h.ServeHTTP(rw, req)
+ return
+ }
+ defer r.Close()
+ if w != nil {
+ go func() {
+ defer w.Close()
+ h.ServeHTTP(&respWrapper{
+ ResponseWriter: rw,
+ Writer: w,
+ }, req)
+ }()
+ }
+ io.Copy(rw, r)
+ })
+}
+
+type respWrapper struct {
+ http.ResponseWriter
+ io.Writer
+}
+
+func (r *respWrapper) Write(p []byte) (int, error) {
+ return r.Writer.Write(p)
+}
diff --git a/vendor/github.com/djherbis/fscache/layers.go b/vendor/github.com/djherbis/fscache/layers.go
new file mode 100644
index 00000000..74a53d07
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/layers.go
@@ -0,0 +1,129 @@
+package fscache
+
+import (
+ "errors"
+ "io"
+ "sync"
+)
+
+type layeredCache struct {
+ layers []Cache
+}
+
+// NewLayered returns a Cache which stores its data in all the passed
+// caches, when a key is requested it is loaded into all the caches above the first hit.
+func NewLayered(caches ...Cache) Cache {
+ return &layeredCache{layers: caches}
+}
+
+func (l *layeredCache) Get(key string) (r ReadAtCloser, w io.WriteCloser, err error) {
+ var last ReadAtCloser
+ var writers []io.WriteCloser
+
+ for i, layer := range l.layers {
+ r, w, err = layer.Get(key)
+ if err != nil {
+ if len(writers) > 0 {
+ last.Close()
+ multiWC(writers...).Close()
+ }
+ return nil, nil, err
+ }
+
+ // hit
+ if w == nil {
+ if len(writers) > 0 {
+ go func(r io.ReadCloser) {
+ wc := multiWC(writers...)
+ defer r.Close()
+ defer wc.Close()
+ io.Copy(wc, r)
+ }(r)
+ return last, nil, nil
+ }
+ return r, nil, nil
+ }
+
+ // miss
+ writers = append(writers, w)
+
+ if i == len(l.layers)-1 {
+ if last != nil {
+ last.Close()
+ }
+ return r, multiWC(writers...), nil
+ }
+
+ if last != nil {
+ last.Close()
+ }
+ last = r
+ }
+
+ return nil, nil, errors.New("no caches")
+}
+
+func (l *layeredCache) Remove(key string) error {
+ var grp sync.WaitGroup
+ // walk upwards so that lower layers don't
+ // restore upper layers on Get()
+ for i := len(l.layers) - 1; i >= 0; i-- {
+ grp.Add(1)
+ go func(layer Cache) {
+ defer grp.Done()
+ layer.Remove(key)
+ }(l.layers[i])
+ }
+ grp.Wait()
+ return nil
+}
+
+func (l *layeredCache) Exists(key string) bool {
+ for _, layer := range l.layers {
+ if layer.Exists(key) {
+ return true
+ }
+ }
+ return false
+}
+
+func (l *layeredCache) Clean() (err error) {
+ for _, layer := range l.layers {
+ er := layer.Clean()
+ if er != nil {
+ err = er
+ }
+ }
+ return nil
+}
+
+func multiWC(wc ...io.WriteCloser) io.WriteCloser {
+ if len(wc) == 0 {
+ return nil
+ }
+
+ return &multiWriteCloser{
+ writers: wc,
+ }
+}
+
+type multiWriteCloser struct {
+ writers []io.WriteCloser
+}
+
+func (t *multiWriteCloser) Write(p []byte) (n int, err error) {
+ for _, w := range t.writers {
+ n, err = w.Write(p)
+ if err != nil {
+ return
+ }
+ }
+ return len(p), nil
+}
+
+func (t *multiWriteCloser) Close() error {
+ for _, w := range t.writers {
+ w.Close()
+ }
+ return nil
+}
diff --git a/vendor/github.com/djherbis/fscache/memfs.go b/vendor/github.com/djherbis/fscache/memfs.go
new file mode 100644
index 00000000..cfe7e0de
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/memfs.go
@@ -0,0 +1,133 @@
+package fscache
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+ "time"
+
+ "gopkg.in/djherbis/stream.v1"
+)
+
+type memFS struct {
+ mu sync.RWMutex
+ files map[string]*memFile
+}
+
+// NewMemFs creates an in-memory FileSystem.
+// It does not support persistence (Reload is a nop).
+func NewMemFs() FileSystem {
+ return &memFS{
+ files: make(map[string]*memFile),
+ }
+}
+
+func (fs *memFS) Reload(add func(key, name string)) error {
+ return nil
+}
+
+func (fs *memFS) AccessTimes(name string) (rt, wt time.Time, err error) {
+ fs.mu.RLock()
+ defer fs.mu.RUnlock()
+ f, ok := fs.files[name]
+ if ok {
+ return f.rt, f.wt, nil
+ }
+ return rt, wt, errors.New("file has not been read")
+}
+
+func (fs *memFS) Create(key string) (stream.File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if _, ok := fs.files[key]; ok {
+ return nil, errors.New("file exists")
+ }
+ file := &memFile{
+ name: key,
+ r: bytes.NewBuffer(nil),
+ wt: time.Now(),
+ }
+ file.memReader.memFile = file
+ fs.files[key] = file
+ return file, nil
+}
+
+func (fs *memFS) Open(name string) (stream.File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ if f, ok := fs.files[name]; ok {
+ f.rt = time.Now()
+ return &memReader{memFile: f}, nil
+ }
+ return nil, errors.New("file does not exist")
+}
+
+func (fs *memFS) Remove(key string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ delete(fs.files, key)
+ return nil
+}
+
+func (fs *memFS) RemoveAll() error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ fs.files = make(map[string]*memFile)
+ return nil
+}
+
+type memFile struct {
+ mu sync.RWMutex
+ name string
+ r *bytes.Buffer
+ memReader
+ rt, wt time.Time
+}
+
+func (f *memFile) Name() string {
+ return f.name
+}
+
+func (f *memFile) Write(p []byte) (int, error) {
+ if len(p) > 0 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.r.Write(p)
+ }
+ return len(p), nil
+}
+
+func (f *memFile) Bytes() []byte {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ return f.r.Bytes()
+}
+
+func (f *memFile) Close() error {
+ return nil
+}
+
+type memReader struct {
+ *memFile
+ n int
+}
+
+func (r *memReader) ReadAt(p []byte, off int64) (n int, err error) {
+ data := r.Bytes()
+ if int64(len(data)) < off {
+ return 0, io.EOF
+ }
+ n, err = bytes.NewReader(data[off:]).ReadAt(p, 0)
+ return n, err
+}
+
+func (r *memReader) Read(p []byte) (n int, err error) {
+ n, err = bytes.NewReader(r.Bytes()[r.n:]).Read(p)
+ r.n += n
+ return n, err
+}
+
+func (r *memReader) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/djherbis/fscache/reaper.go b/vendor/github.com/djherbis/fscache/reaper.go
new file mode 100644
index 00000000..601e02cb
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/reaper.go
@@ -0,0 +1,37 @@
+package fscache
+
+import "time"
+
+// Reaper is used to control when streams expire from the cache.
+// It is called once right after loading, and then it is run
+// again after every Next() period of time.
+type Reaper interface {
+ // Returns the amount of time to wait before the next scheduled Reaping.
+ Next() time.Duration
+
+ // Given a key and the last r/w times of a file, return true
+ // to remove the file from the cache, false to keep it.
+ Reap(key string, lastRead, lastWrite time.Time) bool
+}
+
+// NewReaper returns a simple reaper which runs every "period"
+// and reaps files which are older than "expiry".
+func NewReaper(expiry, period time.Duration) Reaper {
+ return &reaper{
+ expiry: expiry,
+ period: period,
+ }
+}
+
+type reaper struct {
+ period time.Duration
+ expiry time.Duration
+}
+
+func (g *reaper) Next() time.Duration {
+ return g.period
+}
+
+func (g *reaper) Reap(key string, lastRead, lastWrite time.Time) bool {
+ return lastRead.Before(time.Now().Add(-g.expiry))
+}
diff --git a/vendor/github.com/djherbis/fscache/server.go b/vendor/github.com/djherbis/fscache/server.go
new file mode 100644
index 00000000..dba74aad
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/server.go
@@ -0,0 +1,206 @@
+package fscache
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io"
+ "net"
+)
+
+// ListenAndServe hosts a Cache for access via NewRemote
+func ListenAndServe(c Cache, addr string) error {
+ return (&server{c: c}).ListenAndServe(addr)
+}
+
+// NewRemote returns a Cache run via ListenAndServe
+func NewRemote(raddr string) Cache {
+ return &remote{raddr: raddr}
+}
+
+type server struct {
+ c Cache
+}
+
+func (s *server) ListenAndServe(addr string) error {
+ l, err := net.Listen("tcp", addr)
+ if err != nil {
+ return err
+ }
+
+ for {
+ c, err := l.Accept()
+ if err != nil {
+ return err
+ }
+
+ go s.Serve(c)
+ }
+}
+
+const (
+ actionGet = iota
+ actionRemove = iota
+ actionExists = iota
+ actionClean = iota
+)
+
+func getKey(r io.Reader) string {
+ dec := newDecoder(r)
+ buf := bytes.NewBufferString("")
+ io.Copy(buf, dec)
+ return buf.String()
+}
+
+func sendKey(w io.Writer, key string) {
+ enc := newEncoder(w)
+ enc.Write([]byte(key))
+ enc.Close()
+}
+
+func (s *server) Serve(c net.Conn) {
+ var action int
+ fmt.Fscanf(c, "%d\n", &action)
+
+ switch action {
+ case actionGet:
+ s.get(c, getKey(c))
+ case actionRemove:
+ s.c.Remove(getKey(c))
+ case actionExists:
+ s.exists(c, getKey(c))
+ case actionClean:
+ s.c.Clean()
+ }
+}
+
+func (s *server) exists(c net.Conn, key string) {
+ if s.c.Exists(key) {
+ fmt.Fprintf(c, "%d\n", 1)
+ } else {
+ fmt.Fprintf(c, "%d\n", 0)
+ }
+}
+
+func (s *server) get(c net.Conn, key string) {
+ r, w, err := s.c.Get(key)
+ if err != nil {
+ return // handle this better
+ }
+ defer r.Close()
+
+ if w != nil {
+ go func() {
+ fmt.Fprintf(c, "%d\n", 1)
+ io.Copy(w, newDecoder(c))
+ w.Close()
+ }()
+ } else {
+ fmt.Fprintf(c, "%d\n", 0)
+ }
+
+ enc := newEncoder(c)
+ io.Copy(enc, r)
+ enc.Close()
+}
+
+type remote struct {
+ raddr string
+}
+
+func (rmt *remote) Get(key string) (r ReadAtCloser, w io.WriteCloser, err error) {
+ c, err := net.Dial("tcp", rmt.raddr)
+ if err != nil {
+ return nil, nil, err
+ }
+ fmt.Fprintf(c, "%d\n", actionGet)
+ sendKey(c, key)
+
+ var i int
+ fmt.Fscanf(c, "%d\n", &i)
+
+ var ch chan struct{}
+
+ switch i {
+ case 0:
+ ch = make(chan struct{}) // close net.Conn on reader close
+ case 1:
+ ch = make(chan struct{}, 1) // two closes before net.Conn close
+
+ w = &safeCloser{
+ c: c,
+ ch: ch,
+ w: newEncoder(c),
+ }
+ default:
+ return nil, nil, errors.New("bad bad bad")
+ }
+
+ r = &safeCloser{
+ c: c,
+ ch: ch,
+ r: newDecoder(c),
+ }
+
+ return r, w, nil
+}
+
+type safeCloser struct {
+ c net.Conn
+ ch chan<- struct{}
+ r ReadAtCloser
+ w io.WriteCloser
+}
+
+func (s *safeCloser) ReadAt(p []byte, off int64) (int, error) {
+ return s.r.ReadAt(p, off)
+}
+func (s *safeCloser) Read(p []byte) (int, error) { return s.r.Read(p) }
+func (s *safeCloser) Write(p []byte) (int, error) { return s.w.Write(p) }
+
+// Close only closes the underlying connection when ch is full.
+func (s *safeCloser) Close() (err error) {
+ if s.r != nil {
+ err = s.r.Close()
+ } else if s.w != nil {
+ err = s.w.Close()
+ }
+
+ select {
+ case s.ch <- struct{}{}:
+ return err
+ default:
+ return s.c.Close()
+ }
+}
+
+func (rmt *remote) Exists(key string) bool {
+ c, err := net.Dial("tcp", rmt.raddr)
+ if err != nil {
+ return false
+ }
+ fmt.Fprintf(c, "%d\n", actionExists)
+ sendKey(c, key)
+ var i int
+ fmt.Fscanf(c, "%d\n", &i)
+ return i == 1
+}
+
+func (rmt *remote) Remove(key string) error {
+ c, err := net.Dial("tcp", rmt.raddr)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(c, "%d\n", actionRemove)
+ sendKey(c, key)
+ return nil
+}
+
+func (rmt *remote) Clean() error {
+ c, err := net.Dial("tcp", rmt.raddr)
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(c, "%d\n", actionClean)
+ return nil
+}
diff --git a/vendor/github.com/djherbis/fscache/stream.go b/vendor/github.com/djherbis/fscache/stream.go
new file mode 100644
index 00000000..9cccb248
--- /dev/null
+++ b/vendor/github.com/djherbis/fscache/stream.go
@@ -0,0 +1,72 @@
+package fscache
+
+import (
+ "encoding/json"
+ "errors"
+ "io"
+)
+
+type decoder interface {
+ Decode(interface{}) error
+}
+
+type encoder interface {
+ Encode(interface{}) error
+}
+
+type pktReader struct {
+ dec decoder
+}
+
+type pktWriter struct {
+ enc encoder
+}
+
+type packet struct {
+ Err int
+ Data []byte
+}
+
+const eof = 1
+
+func (t *pktReader) ReadAt(p []byte, off int64) (n int, err error) {
+ // TODO not implemented
+ return 0, errors.New("not implemented")
+}
+
+func (t *pktReader) Read(p []byte) (int, error) {
+ var pkt packet
+ err := t.dec.Decode(&pkt)
+ if err != nil {
+ return 0, err
+ }
+ if pkt.Err == eof {
+ return 0, io.EOF
+ }
+ return copy(p, pkt.Data), nil
+}
+
+func (t *pktReader) Close() error {
+ return nil
+}
+
+func (t *pktWriter) Write(p []byte) (int, error) {
+ pkt := packet{Data: p}
+ err := t.enc.Encode(pkt)
+ if err != nil {
+ return 0, err
+ }
+ return len(p), nil
+}
+
+func (t *pktWriter) Close() error {
+ return t.enc.Encode(packet{Err: eof})
+}
+
+func newEncoder(w io.Writer) io.WriteCloser {
+ return &pktWriter{enc: json.NewEncoder(w)}
+}
+
+func newDecoder(r io.Reader) ReadAtCloser {
+ return &pktReader{dec: json.NewDecoder(r)}
+}
diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE
new file mode 100644
index 00000000..7805d36d
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/LICENSE
@@ -0,0 +1,50 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Sam Ghods
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+Copyright (c) 2012 The Go Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md
new file mode 100644
index 00000000..c27d1fee
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/README.md
@@ -0,0 +1,116 @@
+# YAML marshaling and unmarshaling support for Go
+
+[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml)
+
+## Introduction
+
+A wrapper around [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) designed to enable a better way of handling YAML when marshaling to and from structs.
+
+In short, this library first converts YAML to JSON using candiedyaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike candiedyaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
+
+## Compatibility
+
+This package uses [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) and therefore supports [everything candiedyaml supports](https://github.com/cloudfoundry-incubator/candiedyaml#candiedyaml).
+
+## Caveats
+
+**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, candiedyaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example:
+
+```
+BAD:
+ exampleKey: !!binary gIGC
+
+GOOD:
+ exampleKey: gIGC
+... and decode the base64 data in your code.
+```
+
+**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys.
+
+## Installation and usage
+
+To install, run:
+
+```
+$ go get github.com/ghodss/yaml
+```
+
+And import using:
+
+```
+import "github.com/ghodss/yaml"
+```
+
+Usage is very similar to the JSON library:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+
+type Person struct {
+ Name string `json:"name"` // Affects YAML field names too.
+ Age int `json:"name"`
+}
+
+func main() {
+ // Marshal a Person struct to YAML.
+ p := Person{"John", 30}
+ y, err := yaml.Marshal(p)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+
+ // Unmarshal the YAML back into a Person struct.
+ var p2 Person
+ err := yaml.Unmarshal(y, &p2)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(p2)
+ /* Output:
+ {John 30}
+ */
+}
+```
+
+`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
+
+```go
+import (
+ "fmt"
+
+ "github.com/ghodss/yaml"
+)
+func main() {
+ j := []byte(`{"name": "John", "age": 30}`)
+ y, err := yaml.JSONToYAML(j)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(y))
+ /* Output:
+ name: John
+ age: 30
+ */
+ j2, err := yaml.YAMLToJSON(y)
+ if err != nil {
+ fmt.Printf("err: %v\n", err)
+ return
+ }
+ fmt.Println(string(j2))
+ /* Output:
+ {"age":30,"name":"John"}
+ */
+}
+```
diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go
new file mode 100644
index 00000000..0bd3c2b4
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/fields.go
@@ -0,0 +1,497 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package yaml
+
+import (
+ "bytes"
+ "encoding"
+ "encoding/json"
+ "reflect"
+ "sort"
+ "strings"
+ "sync"
+ "unicode"
+ "unicode/utf8"
+)
+
+// indirect walks down v allocating pointers as needed,
+// until it gets to a non-pointer.
+// if it encounters an Unmarshaler, indirect stops and returns that.
+// if decodingNull is true, indirect stops at the last pointer so it can be set to nil.
+func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) {
+ // If v is a named type and is addressable,
+ // start with its address, so that if the type has pointer methods,
+ // we find them.
+ if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
+ v = v.Addr()
+ }
+ for {
+ // Load value from interface, but only if the result will be
+ // usefully addressable.
+ if v.Kind() == reflect.Interface && !v.IsNil() {
+ e := v.Elem()
+ if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
+ v = e
+ continue
+ }
+ }
+
+ if v.Kind() != reflect.Ptr {
+ break
+ }
+
+ if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
+ break
+ }
+ if v.IsNil() {
+ v.Set(reflect.New(v.Type().Elem()))
+ }
+ if v.Type().NumMethod() > 0 {
+ if u, ok := v.Interface().(json.Unmarshaler); ok {
+ return u, nil, reflect.Value{}
+ }
+ if u, ok := v.Interface().(encoding.TextUnmarshaler); ok {
+ return nil, u, reflect.Value{}
+ }
+ }
+ v = v.Elem()
+ }
+ return nil, nil, v
+}
+
+// A field represents a single field found in a struct.
+type field struct {
+ name string
+ nameBytes []byte // []byte(name)
+ equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
+
+ tag bool
+ index []int
+ typ reflect.Type
+ omitEmpty bool
+ quoted bool
+}
+
+func fillField(f field) field {
+ f.nameBytes = []byte(f.name)
+ f.equalFold = foldFunc(f.nameBytes)
+ return f
+}
+
+// byName sorts field by name, breaking ties with depth,
+// then breaking ties with "name came from json tag", then
+// breaking ties with index sequence.
+type byName []field
+
+func (x byName) Len() int { return len(x) }
+
+func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byName) Less(i, j int) bool {
+ if x[i].name != x[j].name {
+ return x[i].name < x[j].name
+ }
+ if len(x[i].index) != len(x[j].index) {
+ return len(x[i].index) < len(x[j].index)
+ }
+ if x[i].tag != x[j].tag {
+ return x[i].tag
+ }
+ return byIndex(x).Less(i, j)
+}
+
+// byIndex sorts field by index sequence.
+type byIndex []field
+
+func (x byIndex) Len() int { return len(x) }
+
+func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+func (x byIndex) Less(i, j int) bool {
+ for k, xik := range x[i].index {
+ if k >= len(x[j].index) {
+ return false
+ }
+ if xik != x[j].index[k] {
+ return xik < x[j].index[k]
+ }
+ }
+ return len(x[i].index) < len(x[j].index)
+}
+
+// typeFields returns a list of fields that JSON should recognize for the given type.
+// The algorithm is breadth-first search over the set of structs to include - the top struct
+// and then any reachable anonymous structs.
+func typeFields(t reflect.Type) []field {
+ // Anonymous fields to explore at the current level and the next.
+ current := []field{}
+ next := []field{{typ: t}}
+
+ // Count of queued names for current level and the next.
+ count := map[reflect.Type]int{}
+ nextCount := map[reflect.Type]int{}
+
+ // Types already visited at an earlier level.
+ visited := map[reflect.Type]bool{}
+
+ // Fields found.
+ var fields []field
+
+ for len(next) > 0 {
+ current, next = next, current[:0]
+ count, nextCount = nextCount, map[reflect.Type]int{}
+
+ for _, f := range current {
+ if visited[f.typ] {
+ continue
+ }
+ visited[f.typ] = true
+
+ // Scan f.typ for fields to include.
+ for i := 0; i < f.typ.NumField(); i++ {
+ sf := f.typ.Field(i)
+ if sf.PkgPath != "" { // unexported
+ continue
+ }
+ tag := sf.Tag.Get("json")
+ if tag == "-" {
+ continue
+ }
+ name, opts := parseTag(tag)
+ if !isValidTag(name) {
+ name = ""
+ }
+ index := make([]int, len(f.index)+1)
+ copy(index, f.index)
+ index[len(f.index)] = i
+
+ ft := sf.Type
+ if ft.Name() == "" && ft.Kind() == reflect.Ptr {
+ // Follow pointer.
+ ft = ft.Elem()
+ }
+
+ // Record found field and index sequence.
+ if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
+ tagged := name != ""
+ if name == "" {
+ name = sf.Name
+ }
+ fields = append(fields, fillField(field{
+ name: name,
+ tag: tagged,
+ index: index,
+ typ: ft,
+ omitEmpty: opts.Contains("omitempty"),
+ quoted: opts.Contains("string"),
+ }))
+ if count[f.typ] > 1 {
+ // If there were multiple instances, add a second,
+ // so that the annihilation code will see a duplicate.
+ // It only cares about the distinction between 1 or 2,
+ // so don't bother generating any more copies.
+ fields = append(fields, fields[len(fields)-1])
+ }
+ continue
+ }
+
+ // Record new anonymous struct to explore in next round.
+ nextCount[ft]++
+ if nextCount[ft] == 1 {
+ next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
+ }
+ }
+ }
+ }
+
+ sort.Sort(byName(fields))
+
+ // Delete all fields that are hidden by the Go rules for embedded fields,
+ // except that fields with JSON tags are promoted.
+
+ // The fields are sorted in primary order of name, secondary order
+ // of field index length. Loop over names; for each name, delete
+ // hidden fields by choosing the one dominant field that survives.
+ out := fields[:0]
+ for advance, i := 0, 0; i < len(fields); i += advance {
+ // One iteration per name.
+ // Find the sequence of fields with the name of this first field.
+ fi := fields[i]
+ name := fi.name
+ for advance = 1; i+advance < len(fields); advance++ {
+ fj := fields[i+advance]
+ if fj.name != name {
+ break
+ }
+ }
+ if advance == 1 { // Only one field with this name
+ out = append(out, fi)
+ continue
+ }
+ dominant, ok := dominantField(fields[i : i+advance])
+ if ok {
+ out = append(out, dominant)
+ }
+ }
+
+ fields = out
+ sort.Sort(byIndex(fields))
+
+ return fields
+}
+
+// dominantField looks through the fields, all of which are known to
+// have the same name, to find the single field that dominates the
+// others using Go's embedding rules, modified by the presence of
+// JSON tags. If there are multiple top-level fields, the boolean
+// will be false: This condition is an error in Go and we skip all
+// the fields.
+func dominantField(fields []field) (field, bool) {
+ // The fields are sorted in increasing index-length order. The winner
+ // must therefore be one with the shortest index length. Drop all
+ // longer entries, which is easy: just truncate the slice.
+ length := len(fields[0].index)
+ tagged := -1 // Index of first tagged field.
+ for i, f := range fields {
+ if len(f.index) > length {
+ fields = fields[:i]
+ break
+ }
+ if f.tag {
+ if tagged >= 0 {
+ // Multiple tagged fields at the same level: conflict.
+ // Return no field.
+ return field{}, false
+ }
+ tagged = i
+ }
+ }
+ if tagged >= 0 {
+ return fields[tagged], true
+ }
+ // All remaining fields have the same length. If there's more than one,
+ // we have a conflict (two fields named "X" at the same level) and we
+ // return no field.
+ if len(fields) > 1 {
+ return field{}, false
+ }
+ return fields[0], true
+}
+
+var fieldCache struct {
+ sync.RWMutex
+ m map[reflect.Type][]field
+}
+
+// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
+func cachedTypeFields(t reflect.Type) []field {
+ fieldCache.RLock()
+ f := fieldCache.m[t]
+ fieldCache.RUnlock()
+ if f != nil {
+ return f
+ }
+
+ // Compute fields without lock.
+ // Might duplicate effort but won't hold other computations back.
+ f = typeFields(t)
+ if f == nil {
+ f = []field{}
+ }
+
+ fieldCache.Lock()
+ if fieldCache.m == nil {
+ fieldCache.m = map[reflect.Type][]field{}
+ }
+ fieldCache.m[t] = f
+ fieldCache.Unlock()
+ return f
+}
+
+func isValidTag(s string) bool {
+ if s == "" {
+ return false
+ }
+ for _, c := range s {
+ switch {
+ case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
+ // Backslash and quote chars are reserved, but
+ // otherwise any punctuation chars are allowed
+ // in a tag name.
+ default:
+ if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+const (
+ caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
+ kelvin = '\u212a'
+ smallLongEss = '\u017f'
+)
+
+// foldFunc returns one of four different case folding equivalence
+// functions, from most general (and slow) to fastest:
+//
+// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
+// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
+// 3) asciiEqualFold, no special, but includes non-letters (including _)
+// 4) simpleLetterEqualFold, no specials, no non-letters.
+//
+// The letters S and K are special because they map to 3 runes, not just 2:
+// * S maps to s and to U+017F 'ſ' Latin small letter long s
+// * k maps to K and to U+212A 'K' Kelvin sign
+// See http://play.golang.org/p/tTxjOc0OGo
+//
+// The returned function is specialized for matching against s and
+// should only be given s. It's not curried for performance reasons.
+func foldFunc(s []byte) func(s, t []byte) bool {
+ nonLetter := false
+ special := false // special letter
+ for _, b := range s {
+ if b >= utf8.RuneSelf {
+ return bytes.EqualFold
+ }
+ upper := b & caseMask
+ if upper < 'A' || upper > 'Z' {
+ nonLetter = true
+ } else if upper == 'K' || upper == 'S' {
+ // See above for why these letters are special.
+ special = true
+ }
+ }
+ if special {
+ return equalFoldRight
+ }
+ if nonLetter {
+ return asciiEqualFold
+ }
+ return simpleLetterEqualFold
+}
+
+// equalFoldRight is a specialization of bytes.EqualFold when s is
+// known to be all ASCII (including punctuation), but contains an 's',
+// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
+// See comments on foldFunc.
+func equalFoldRight(s, t []byte) bool {
+ for _, sb := range s {
+ if len(t) == 0 {
+ return false
+ }
+ tb := t[0]
+ if tb < utf8.RuneSelf {
+ if sb != tb {
+ sbUpper := sb & caseMask
+ if 'A' <= sbUpper && sbUpper <= 'Z' {
+ if sbUpper != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ t = t[1:]
+ continue
+ }
+ // sb is ASCII and t is not. t must be either kelvin
+ // sign or long s; sb must be s, S, k, or K.
+ tr, size := utf8.DecodeRune(t)
+ switch sb {
+ case 's', 'S':
+ if tr != smallLongEss {
+ return false
+ }
+ case 'k', 'K':
+ if tr != kelvin {
+ return false
+ }
+ default:
+ return false
+ }
+ t = t[size:]
+
+ }
+ if len(t) > 0 {
+ return false
+ }
+ return true
+}
+
+// asciiEqualFold is a specialization of bytes.EqualFold for use when
+// s is all ASCII (but may contain non-letters) and contains no
+// special-folding letters.
+// See comments on foldFunc.
+func asciiEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, sb := range s {
+ tb := t[i]
+ if sb == tb {
+ continue
+ }
+ if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
+ if sb&caseMask != tb&caseMask {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ return true
+}
+
+// simpleLetterEqualFold is a specialization of bytes.EqualFold for
+// use when s is all ASCII letters (no underscores, etc) and also
+// doesn't contain 'k', 'K', 's', or 'S'.
+// See comments on foldFunc.
+func simpleLetterEqualFold(s, t []byte) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i, b := range s {
+ if b&caseMask != t[i]&caseMask {
+ return false
+ }
+ }
+ return true
+}
+
+// tagOptions is the string following a comma in a struct field's "json"
+// tag, or the empty string. It does not include the leading comma.
+type tagOptions string
+
+// parseTag splits a struct field's json tag into its name and
+// comma-separated options.
+func parseTag(tag string) (string, tagOptions) {
+ if idx := strings.Index(tag, ","); idx != -1 {
+ return tag[:idx], tagOptions(tag[idx+1:])
+ }
+ return tag, tagOptions("")
+}
+
+// Contains reports whether a comma-separated list of options
+// contains a particular substr flag. substr must be surrounded by a
+// string boundary or commas.
+func (o tagOptions) Contains(optionName string) bool {
+ if len(o) == 0 {
+ return false
+ }
+ s := string(o)
+ for s != "" {
+ var next string
+ i := strings.Index(s, ",")
+ if i >= 0 {
+ s, next = s[:i], s[i+1:]
+ }
+ if s == optionName {
+ return true
+ }
+ s = next
+ }
+ return false
+}
diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go
new file mode 100644
index 00000000..b4fe6abc
--- /dev/null
+++ b/vendor/github.com/ghodss/yaml/yaml.go
@@ -0,0 +1,277 @@
+package yaml
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "reflect"
+ "strconv"
+
+ yaml "github.com/cloudfoundry-incubator/candiedyaml"
+)
+
+// Marshals the object into JSON then converts JSON to YAML and returns the
+// YAML.
+func Marshal(o interface{}) ([]byte, error) {
+ j, err := json.Marshal(o)
+ if err != nil {
+ return nil, fmt.Errorf("error marshaling into JSON: ", err)
+ }
+
+ y, err := JSONToYAML(j)
+ if err != nil {
+ return nil, fmt.Errorf("error converting JSON to YAML: ", err)
+ }
+
+ return y, nil
+}
+
+// Converts YAML to JSON then uses JSON to unmarshal into an object.
+func Unmarshal(y []byte, o interface{}) error {
+ vo := reflect.ValueOf(o)
+ j, err := yamlToJSON(y, &vo)
+ if err != nil {
+ return fmt.Errorf("error converting YAML to JSON: %v", err)
+ }
+
+ err = json.Unmarshal(j, o)
+ if err != nil {
+ return fmt.Errorf("error unmarshaling JSON: %v", err)
+ }
+
+ return nil
+}
+
+// Convert JSON to YAML.
+func JSONToYAML(j []byte) ([]byte, error) {
+ // Convert the JSON to an object.
+ var jsonObj interface{}
+ // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
+ // Go JSON library doesn't try to pick the right number type (int, float,
+ // etc.) when unmarshling to interface{}, it just picks float64
+ // universally. go-yaml does go through the effort of picking the right
+ // number type, so we can preserve number type throughout this process.
+ err := yaml.Unmarshal(j, &jsonObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // Marshal this object into YAML.
+ return yaml.Marshal(jsonObj)
+}
+
+// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
+// this method should be a no-op.
+//
+// Things YAML can do that are not supported by JSON:
+// * In YAML you can have binary and null keys in your maps. These are invalid
+// in JSON. (int and float keys are converted to strings.)
+// * Binary data in YAML with the !!binary tag is not supported. If you want to
+// use binary data with this library, encode the data as base64 as usual but do
+// not use the !!binary tag in your YAML. This will ensure the original base64
+// encoded data makes it all the way through to the JSON.
+func YAMLToJSON(y []byte) ([]byte, error) {
+ return yamlToJSON(y, nil)
+}
+
+func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
+ // Convert the YAML to an object.
+ var yamlObj interface{}
+ err := yaml.Unmarshal(y, &yamlObj)
+ if err != nil {
+ return nil, err
+ }
+
+ // YAML objects are not completely compatible with JSON objects (e.g. you
+ // can have non-string keys in YAML). So, convert the YAML-compatible object
+ // to a JSON-compatible object, failing with an error if irrecoverable
+ // incompatibilties happen along the way.
+ jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget)
+ if err != nil {
+ return nil, err
+ }
+
+ // Convert this object to JSON and return the data.
+ return json.Marshal(jsonObj)
+}
+
+func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) {
+ var err error
+
+ // Resolve jsonTarget to a concrete value (i.e. not a pointer or an
+ // interface). We pass decodingNull as false because we're not actually
+ // decoding into the value, we're just checking if the ultimate target is a
+ // string.
+ if jsonTarget != nil {
+ ju, tu, pv := indirect(*jsonTarget, false)
+ // We have a JSON or Text Umarshaler at this level, so we can't be trying
+ // to decode into a string.
+ if ju != nil || tu != nil {
+ jsonTarget = nil
+ } else {
+ jsonTarget = &pv
+ }
+ }
+
+ // If yamlObj is a number or a boolean, check if jsonTarget is a string -
+ // if so, coerce. Else return normal.
+ // If yamlObj is a map or array, find the field that each key is
+ // unmarshaling to, and when you recurse pass the reflect.Value for that
+ // field back into this function.
+ switch typedYAMLObj := yamlObj.(type) {
+ case map[interface{}]interface{}:
+ // JSON does not support arbitrary keys in a map, so we must convert
+ // these keys to strings.
+ //
+ // From my reading of go-yaml v2 (specifically the resolve function),
+ // keys can only have the types string, int, int64, float64, binary
+ // (unsupported), or null (unsupported).
+ strMap := make(map[string]interface{})
+ for k, v := range typedYAMLObj {
+ // Resolve the key to a string first.
+ var keyString string
+ switch typedKey := k.(type) {
+ case string:
+ keyString = typedKey
+ case int:
+ keyString = strconv.Itoa(typedKey)
+ case int64:
+ // go-yaml will only return an int64 as a key if the system
+ // architecture is 32-bit and the key's value is between 32-bit
+ // and 64-bit. Otherwise the key type will simply be int.
+ keyString = strconv.FormatInt(typedKey, 10)
+ case float64:
+ // Stolen from go-yaml to use the same conversion to string as
+ // the go-yaml library uses to convert float to string when
+ // Marshaling.
+ s := strconv.FormatFloat(typedKey, 'g', -1, 32)
+ switch s {
+ case "+Inf":
+ s = ".inf"
+ case "-Inf":
+ s = "-.inf"
+ case "NaN":
+ s = ".nan"
+ }
+ keyString = s
+ case bool:
+ if typedKey {
+ keyString = "true"
+ } else {
+ keyString = "false"
+ }
+ default:
+ return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v",
+ reflect.TypeOf(k), k, v)
+ }
+
+ // jsonTarget should be a struct or a map. If it's a struct, find
+ // the field it's going to map to and pass its reflect.Value. If
+ // it's a map, find the element type of the map and pass the
+ // reflect.Value created from that type. If it's neither, just pass
+ // nil - JSON conversion will error for us if it's a real issue.
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Struct {
+ keyBytes := []byte(keyString)
+ // Find the field that the JSON library would use.
+ var f *field
+ fields := cachedTypeFields(t.Type())
+ for i := range fields {
+ ff := &fields[i]
+ if bytes.Equal(ff.nameBytes, keyBytes) {
+ f = ff
+ break
+ }
+ // Do case-insensitive comparison.
+ if f == nil && ff.equalFold(ff.nameBytes, keyBytes) {
+ f = ff
+ }
+ }
+ if f != nil {
+ // Find the reflect.Value of the most preferential
+ // struct field.
+ jtf := t.Field(f.index[0])
+ strMap[keyString], err = convertToJSONableObject(v, &jtf)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ } else if t.Kind() == reflect.Map {
+ // Create a zero value of the map's element type to use as
+ // the JSON target.
+ jtv := reflect.Zero(t.Type().Elem())
+ strMap[keyString], err = convertToJSONableObject(v, &jtv)
+ if err != nil {
+ return nil, err
+ }
+ continue
+ }
+ }
+ strMap[keyString], err = convertToJSONableObject(v, nil)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return strMap, nil
+ case []interface{}:
+ // We need to recurse into arrays in case there are any
+ // map[interface{}]interface{}'s inside and to convert any
+ // numbers to strings.
+
+ // If jsonTarget is a slice (which it really should be), find the
+ // thing it's going to map to. If it's not a slice, just pass nil
+ // - JSON conversion will error for us if it's a real issue.
+ var jsonSliceElemValue *reflect.Value
+ if jsonTarget != nil {
+ t := *jsonTarget
+ if t.Kind() == reflect.Slice {
+ // By default slices point to nil, but we need a reflect.Value
+ // pointing to a value of the slice type, so we create one here.
+ ev := reflect.Indirect(reflect.New(t.Type().Elem()))
+ jsonSliceElemValue = &ev
+ }
+ }
+
+ // Make and use a new array.
+ arr := make([]interface{}, len(typedYAMLObj))
+ for i, v := range typedYAMLObj {
+ arr[i], err = convertToJSONableObject(v, jsonSliceElemValue)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return arr, nil
+ default:
+ // If the target type is a string and the YAML type is a number,
+ // convert the YAML type to a string.
+ if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String {
+ // Based on my reading of go-yaml, it may return int, int64,
+ // float64, or uint64.
+ var s string
+ switch typedVal := typedYAMLObj.(type) {
+ case int:
+ s = strconv.FormatInt(int64(typedVal), 10)
+ case int64:
+ s = strconv.FormatInt(typedVal, 10)
+ case float64:
+ s = strconv.FormatFloat(typedVal, 'g', -1, 32)
+ case uint64:
+ s = strconv.FormatUint(typedVal, 10)
+ case bool:
+ if typedVal {
+ s = "true"
+ } else {
+ s = "false"
+ }
+ }
+ if len(s) > 0 {
+ yamlObj = interface{}(s)
+ }
+ }
+ return yamlObj, nil
+ }
+
+ return nil, nil
+}
diff --git a/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go b/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go
new file mode 100644
index 00000000..464efecd
--- /dev/null
+++ b/vendor/github.com/gin-gonic/contrib/ginrus/ginrus.go
@@ -0,0 +1,51 @@
+// Package ginrus provides log handling using logrus package.
+//
+// Based on github.com/stephenmuss/ginerus but adds more options.
+package ginrus
+
+import (
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/gin-gonic/gin"
+)
+
+// Ginrus returns a gin.HandlerFunc (middleware) that logs requests using logrus.
+//
+// Requests with errors are logged using logrus.Error().
+// Requests without errors are logged using logrus.Info().
+//
+// It receives:
+// 1. A time package format string (e.g. time.RFC3339).
+// 2. A boolean stating whether to use UTC time zone or local.
+func Ginrus(logger *logrus.Logger, timeFormat string, utc bool) gin.HandlerFunc {
+ return func(c *gin.Context) {
+ start := time.Now()
+ // some evil middlewares modify this values
+ path := c.Request.URL.Path
+ c.Next()
+
+ end := time.Now()
+ latency := end.Sub(start)
+ if utc {
+ end = end.UTC()
+ }
+
+ entry := logger.WithFields(logrus.Fields{
+ "status": c.Writer.Status(),
+ "method": c.Request.Method,
+ "path": path,
+ "ip": c.ClientIP(),
+ "latency": latency,
+ "user-agent": c.Request.UserAgent(),
+ "time": end.Format(timeFormat),
+ })
+
+ if len(c.Errors) > 0 {
+ // Append error field if this is an erroneous request.
+ entry.Error(c.Errors.String())
+ } else {
+ entry.Info()
+ }
+ }
+}
diff --git a/vendor/github.com/ianschenck/envflag/LICENSE b/vendor/github.com/ianschenck/envflag/LICENSE
new file mode 100644
index 00000000..dfdbd2ad
--- /dev/null
+++ b/vendor/github.com/ianschenck/envflag/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2013, Ian Schenck
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+ Neither the name of Ian Schenck nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/github.com/ianschenck/envflag/README.md b/vendor/github.com/ianschenck/envflag/README.md
new file mode 100644
index 00000000..360daff3
--- /dev/null
+++ b/vendor/github.com/ianschenck/envflag/README.md
@@ -0,0 +1,37 @@
+envflag
+=======
+
+Golang flags, but bolted onto the environment rather than the command-line.
+
+Read the [godocs](http://godoc.org/github.com/ianschenck/envflag).
+
+Motivation
+==========
+
+Some like the distinction that command-line flags control behavior
+while environment variables configure. Also
+[12-factor](http://12factor.net/) recommends the use of environment
+variables for configuration. The interface of the golang flag package
+is well designed and easy to use, and allows for other lists
+(os.Environ() vs os.Args) to be parsed as flags. It makes sense then
+to use the same interface, the same types, and the same parsing
+(caveat: there is some ugly string hacking to make environment
+variables look like flags) to the same ends.
+
+Differences
+===========
+
+Calling `flag.Parse()` will not parse environment flags. Calling
+`envflag.Parse()` will not parse command-line flags. There is no good
+reason to combine these two when the net savings is a single line in a
+`func main()`. Furthermore, doing so would require users to accept a
+precedence order of my choosing.
+
+The presence of an environment variable named `h` or `help` will
+probably cause problems (print Usage and os.Exit(0)). Work around this
+by defining those flags somewhere (and ignoring them).
+
+Before calling `Flagset.Parse` on `EnvironmentFlags`, the environment
+variables being passed to `Parse` are trimmed down using
+`Lookup`. This behavior is different from `flag.Parse` in that extra
+environment variables are ignored (and won't crash `envflag.Parse`).
diff --git a/vendor/github.com/ianschenck/envflag/envflag.go b/vendor/github.com/ianschenck/envflag/envflag.go
new file mode 100644
index 00000000..128244cf
--- /dev/null
+++ b/vendor/github.com/ianschenck/envflag/envflag.go
@@ -0,0 +1,192 @@
+// Copyright 2013 Ian Schenck. Use of this source code is governed by
+// a license that can be found in the LICENSE file.
+
+/*
+ Package envflag adds environment variable flags to the flag package.
+
+ Usage:
+
+ Define flags using envflag.String(), Bool(), Int(), etc. This package
+ works nearly the same as the stdlib flag package. Parsing the
+ Environment flags is done by calling envflag.Parse()
+
+ It will *not* attempt to parse any normally-defined command-line
+ flags. Command-line flags are explicitly left alone and separate.
+*/
+package envflag
+
+import (
+ "flag"
+ "fmt"
+ "os"
+ "strings"
+ "time"
+)
+
+// VisitAll visits the environment flags in lexicographical order,
+// calling fn for each. It visits all flags, even those not set.
+func VisitAll(fn func(*flag.Flag)) {
+ EnvironmentFlags.VisitAll(fn)
+}
+
+// Visit visits the environment flags in lexicographical order,
+// calling fn for each. It visits only those flags that have been
+// set.
+func Visit(fn func(*flag.Flag)) {
+ EnvironmentFlags.Visit(fn)
+}
+
+// Lookup returns the Flag structure of the named environment flag,
+// returning nil if none exists.
+func Lookup(name string) *flag.Flag {
+ return EnvironmentFlags.Lookup(name)
+}
+
+// Set sets the value of the named environment flag.
+func Set(name, value string) error {
+ return EnvironmentFlags.Set(name, value)
+}
+
+// BoolVar defines a bool flag with specified name, default value, and
+// usage string. The argument p points to a bool variable in which to
+// store the value of the flag.
+func BoolVar(p *bool, name string, value bool, usage string) {
+ EnvironmentFlags.BoolVar(p, name, value, usage)
+}
+
+// Bool defines a bool flag with specified name, default value, and
+// usage string. The return value is the address of a bool variable
+// that stores the value of the flag.
+func Bool(name string, value bool, usage string) *bool {
+ return EnvironmentFlags.Bool(name, value, usage)
+}
+
+// IntVar defines an int flag with specified name, default value, and
+// usage string. The argument p points to an int variable in which to
+// store the value of the flag.
+func IntVar(p *int, name string, value int, usage string) {
+ EnvironmentFlags.IntVar(p, name, value, usage)
+}
+
+// Int defines an int flag with specified name, default value, and
+// usage string. The return value is the address of an int variable
+// that stores the value of the flag.
+func Int(name string, value int, usage string) *int {
+ return EnvironmentFlags.Int(name, value, usage)
+}
+
+// Int64Var defines an int64 flag with specified name, default value,
+// and usage string. The argument p points to an int64 variable in
+// which to store the value of the flag.
+func Int64Var(p *int64, name string, value int64, usage string) {
+ EnvironmentFlags.Int64Var(p, name, value, usage)
+}
+
+// Int64 defines an int64 flag with specified name, default value, and
+// usage string. The return value is the address of an int64 variable
+// that stores the value of the flag.
+func Int64(name string, value int64, usage string) *int64 {
+ return EnvironmentFlags.Int64(name, value, usage)
+}
+
+// UintVar defines a uint flag with specified name, default value, and
+// usage string. The argument p points to a uint variable in which to
+// store the value of the flag.
+func UintVar(p *uint, name string, value uint, usage string) {
+ EnvironmentFlags.UintVar(p, name, value, usage)
+}
+
+// Uint defines a uint flag with specified name, default value, and
+// usage string. The return value is the address of a uint variable
+// that stores the value of the flag.
+func Uint(name string, value uint, usage string) *uint {
+ return EnvironmentFlags.Uint(name, value, usage)
+}
+
+// Uint64Var defines a uint64 flag with specified name, default value,
+// and usage string. The argument p points to a uint64 variable in
+// which to store the value of the flag.
+func Uint64Var(p *uint64, name string, value uint64, usage string) {
+ EnvironmentFlags.Uint64Var(p, name, value, usage)
+}
+
+// Uint64 defines a uint64 flag with specified name, default value,
+// and usage string. The return value is the address of a uint64
+// variable that stores the value of the flag.
+func Uint64(name string, value uint64, usage string) *uint64 {
+ return EnvironmentFlags.Uint64(name, value, usage)
+}
+
+// StringVar defines a string flag with specified name, default value,
+// and usage string. The argument p points to a string variable in
+// which to store the value of the flag.
+func StringVar(p *string, name string, value string, usage string) {
+ EnvironmentFlags.StringVar(p, name, value, usage)
+}
+
+// String defines a string flag with specified name, default value,
+// and usage string. The return value is the address of a string
+// variable that stores the value of the flag.
+func String(name string, value string, usage string) *string {
+ return EnvironmentFlags.String(name, value, usage)
+}
+
+// Float64Var defines a float64 flag with specified name, default
+// value, and usage string. The argument p points to a float64
+// variable in which to store the value of the flag.
+func Float64Var(p *float64, name string, value float64, usage string) {
+ EnvironmentFlags.Float64Var(p, name, value, usage)
+}
+
+// Float64 defines a float64 flag with specified name, default value,
+// and usage string. The return value is the address of a float64
+// variable that stores the value of the flag.
+func Float64(name string, value float64, usage string) *float64 {
+ return EnvironmentFlags.Float64(name, value, usage)
+}
+
+// DurationVar defines a time.Duration flag with specified name,
+// default value, and usage string. The argument p points to a
+// time.Duration variable in which to store the value of the flag.
+func DurationVar(p *time.Duration, name string, value time.Duration, usage string) {
+ EnvironmentFlags.DurationVar(p, name, value, usage)
+}
+
+// Duration defines a time.Duration flag with specified name, default
+// value, and usage string. The return value is the address of a
+// time.Duration variable that stores the value of the flag.
+func Duration(name string, value time.Duration, usage string) *time.Duration {
+ return EnvironmentFlags.Duration(name, value, usage)
+}
+
+// PrintDefaults prints to standard error the default values of all
+// defined environment flags.
+func PrintDefaults() {
+ EnvironmentFlags.PrintDefaults()
+}
+
+// Parse parses the environment flags from os.Environ. Must be called
+// after all flags are defined and before flags are accessed by the
+// program.
+func Parse() {
+ env := os.Environ()
+ // Clean up and "fake" some flag k/v pairs.
+ args := make([]string, 0, len(env))
+ for _, value := range env {
+ if Lookup(value[:strings.Index(value, "=")]) == nil {
+ continue
+ }
+ args = append(args, fmt.Sprintf("-%s", value))
+ }
+ EnvironmentFlags.Parse(args)
+}
+
+// Parsed returns true if the environment flags have been parsed.
+func Parsed() bool {
+ return EnvironmentFlags.Parsed()
+}
+
+// EnvironmentFlags is the default set of environment flags, parsed
+// from os.Environ(). The top-level functions such as BoolVar, Arg,
+// and on are wrappers for the methods of EnvironmentFlags.
+var EnvironmentFlags = flag.NewFlagSet("environment", flag.ExitOnError)
diff --git a/vendor/github.com/joho/godotenv/LICENCE b/vendor/github.com/joho/godotenv/LICENCE
new file mode 100644
index 00000000..e7ddd51b
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/LICENCE
@@ -0,0 +1,23 @@
+Copyright (c) 2013 John Barton
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
diff --git a/vendor/github.com/joho/godotenv/README.md b/vendor/github.com/joho/godotenv/README.md
new file mode 100644
index 00000000..05c47e6f
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/README.md
@@ -0,0 +1,127 @@
+# GoDotEnv [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78 "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78)
+
+A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file)
+
+From the original Library:
+
+> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables.
+>
+> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
+
+It can be used as a library (for loading in env for your own daemons etc) or as a bin command.
+
+There is test coverage and CI for both linuxish and windows environments, but I make no guarantees about the bin version working on windows.
+
+## Installation
+
+As a library
+
+```shell
+go get github.com/joho/godotenv
+```
+
+or if you want to use it as a bin command
+```shell
+go get github.com/joho/godotenv/cmd/godotenv
+```
+
+## Usage
+
+Add your application configuration to your `.env` file in the root of your project:
+
+```shell
+S3_BUCKET=YOURS3BUCKET
+SECRET_KEY=YOURSECRETKEYGOESHERE
+```
+
+Then in your Go app you can do something like
+
+```go
+package main
+
+import (
+ "github.com/joho/godotenv"
+ "log"
+ "os"
+)
+
+func main() {
+ err := godotenv.Load()
+ if err != nil {
+ log.Fatal("Error loading .env file")
+ }
+
+ s3Bucket := os.Getenv("S3_BUCKET")
+ secretKey := os.Getenv("SECRET_KEY")
+
+ // now do something with s3 or whatever
+}
+```
+
+If you're even lazier than that, you can just take advantage of the autoload package which will read in `.env` on import
+
+```go
+import _ "github.com/joho/godotenv/autoload"
+```
+
+While `.env` in the project root is the default, you don't have to be constrained, both examples below are 100% legit
+
+```go
+_ = godotenv.Load("somerandomfile")
+_ = godotenv.Load("filenumberone.env", "filenumbertwo.env")
+```
+
+If you want to be really fancy with your env file you can do comments and exports (below is a valid env file)
+
+```shell
+# I am a comment and that is OK
+SOME_VAR=someval
+FOO=BAR # comments at line end are OK too
+export BAR=BAZ
+```
+
+Or finally you can do YAML(ish) style
+
+```yaml
+FOO: bar
+BAR: baz
+```
+
+as a final aside, if you don't want godotenv munging your env you can just get a map back instead
+
+```go
+var myEnv map[string]string
+myEnv, err := godotenv.Read()
+
+s3Bucket := myEnv["S3_BUCKET"]
+```
+
+### Command Mode
+
+Assuming you've installed the command as above and you've got `$GOPATH/bin` in your `$PATH`
+
+```
+godotenv -f /some/path/to/.env some_command with some args
+```
+
+If you don't specify `-f` it will fall back on the default of loading `.env` in `PWD`
+
+## Contributing
+
+Contributions are most welcome! The parser itself is pretty stupidly naive and I wouldn't be surprised if it breaks with edge cases.
+
+*code changes without tests will not be accepted*
+
+1. Fork it
+2. Create your feature branch (`git checkout -b my-new-feature`)
+3. Commit your changes (`git commit -am 'Added some feature'`)
+4. Push to the branch (`git push origin my-new-feature`)
+5. Create new Pull Request
+
+## CI
+
+Linux: [![wercker status](https://app.wercker.com/status/507594c2ec7e60f19403a568dfea0f78/m "wercker status")](https://app.wercker.com/project/bykey/507594c2ec7e60f19403a568dfea0f78) Windows: [![Build status](https://ci.appveyor.com/api/projects/status/9v40vnfvvgde64u4)](https://ci.appveyor.com/project/joho/godotenv)
+
+## Who?
+
+The original library [dotenv](https://github.com/bkeepers/dotenv) was written by [Brandon Keepers](http://opensoul.org/), and this port was done by [John Barton](http://whoisjohnbarton.com) based off the tests/fixtures in the original library.
diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go
new file mode 100644
index 00000000..fbcd2bdf
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/autoload/autoload.go
@@ -0,0 +1,15 @@
+package autoload
+
+/*
+ You can just read the .env file on import just by doing
+
+ import _ "github.com/joho/godotenv/autoload"
+
+ And bob's your mother's brother
+*/
+
+import "github.com/joho/godotenv"
+
+func init() {
+ godotenv.Load()
+}
diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go
new file mode 100644
index 00000000..94b2676b
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/godotenv.go
@@ -0,0 +1,229 @@
+// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
+//
+// Examples/readme can be found on the github page at https://github.com/joho/godotenv
+//
+// The TL;DR is that you make a .env file that looks something like
+//
+// SOME_ENV_VAR=somevalue
+//
+// and then in your go code you can call
+//
+// godotenv.Load()
+//
+// and all the env vars declared in .env will be avaiable through os.Getenv("SOME_ENV_VAR")
+package godotenv
+
+import (
+ "bufio"
+ "errors"
+ "os"
+ "os/exec"
+ "strings"
+)
+
+// Load will read your env file(s) and load them into ENV for this process.
+//
+// Call this function as close as possible to the start of your program (ideally in main)
+//
+// If you call Load without any args it will default to loading .env in the current path
+//
+// You can otherwise tell it which files to load (there can be more than one) like
+//
+// godotenv.Load("fileone", "filetwo")
+//
+// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults
+func Load(filenames ...string) (err error) {
+ filenames = filenamesOrDefault(filenames)
+
+ for _, filename := range filenames {
+ err = loadFile(filename, false)
+ if err != nil {
+ return // return early on a spazout
+ }
+ }
+ return
+}
+
+// Overload will read your env file(s) and load them into ENV for this process.
+//
+// Call this function as close as possible to the start of your program (ideally in main)
+//
+// If you call Overload without any args it will default to loading .env in the current path
+//
+// You can otherwise tell it which files to load (there can be more than one) like
+//
+// godotenv.Overload("fileone", "filetwo")
+//
+// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars.
+func Overload(filenames ...string) (err error) {
+ filenames = filenamesOrDefault(filenames)
+
+ for _, filename := range filenames {
+ err = loadFile(filename, true)
+ if err != nil {
+ return // return early on a spazout
+ }
+ }
+ return
+}
+
+// Read all env (with same file loading semantics as Load) but return values as
+// a map rather than automatically writing values into env
+func Read(filenames ...string) (envMap map[string]string, err error) {
+ filenames = filenamesOrDefault(filenames)
+ envMap = make(map[string]string)
+
+ for _, filename := range filenames {
+ individualEnvMap, individualErr := readFile(filename)
+
+ if individualErr != nil {
+ err = individualErr
+ return // return early on a spazout
+ }
+
+ for key, value := range individualEnvMap {
+ envMap[key] = value
+ }
+ }
+
+ return
+}
+
+// Exec loads env vars from the specified filenames (empty map falls back to default)
+// then executes the cmd specified.
+//
+// Simply hooks up os.Stdin/err/out to the command and calls Run()
+//
+// If you want more fine grained control over your command it's recommended
+// that you use `Load()` or `Read()` and the `os/exec` package yourself.
+func Exec(filenames []string, cmd string, cmdArgs []string) error {
+ Load(filenames...)
+
+ command := exec.Command(cmd, cmdArgs...)
+ command.Stdin = os.Stdin
+ command.Stdout = os.Stdout
+ command.Stderr = os.Stderr
+ return command.Run()
+}
+
+func filenamesOrDefault(filenames []string) []string {
+ if len(filenames) == 0 {
+ return []string{".env"}
+ }
+ return filenames
+}
+
+func loadFile(filename string, overload bool) error {
+ envMap, err := readFile(filename)
+ if err != nil {
+ return err
+ }
+
+ for key, value := range envMap {
+ if os.Getenv(key) == "" || overload {
+ os.Setenv(key, value)
+ }
+ }
+
+ return nil
+}
+
+func readFile(filename string) (envMap map[string]string, err error) {
+ file, err := os.Open(filename)
+ if err != nil {
+ return
+ }
+ defer file.Close()
+
+ envMap = make(map[string]string)
+
+ var lines []string
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ lines = append(lines, scanner.Text())
+ }
+
+ for _, fullLine := range lines {
+ if !isIgnoredLine(fullLine) {
+ key, value, err := parseLine(fullLine)
+
+ if err == nil {
+ envMap[key] = value
+ }
+ }
+ }
+ return
+}
+
+func parseLine(line string) (key string, value string, err error) {
+ if len(line) == 0 {
+ err = errors.New("zero length string")
+ return
+ }
+
+ // ditch the comments (but keep quoted hashes)
+ if strings.Contains(line, "#") {
+ segmentsBetweenHashes := strings.Split(line, "#")
+ quotesAreOpen := false
+ var segmentsToKeep []string
+ for _, segment := range segmentsBetweenHashes {
+ if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 {
+ if quotesAreOpen {
+ quotesAreOpen = false
+ segmentsToKeep = append(segmentsToKeep, segment)
+ } else {
+ quotesAreOpen = true
+ }
+ }
+
+ if len(segmentsToKeep) == 0 || quotesAreOpen {
+ segmentsToKeep = append(segmentsToKeep, segment)
+ }
+ }
+
+ line = strings.Join(segmentsToKeep, "#")
+ }
+
+ // now split key from value
+ splitString := strings.SplitN(line, "=", 2)
+
+ if len(splitString) != 2 {
+ // try yaml mode!
+ splitString = strings.SplitN(line, ":", 2)
+ }
+
+ if len(splitString) != 2 {
+ err = errors.New("Can't separate key from value")
+ return
+ }
+
+ // Parse the key
+ key = splitString[0]
+ if strings.HasPrefix(key, "export") {
+ key = strings.TrimPrefix(key, "export")
+ }
+ key = strings.Trim(key, " ")
+
+ // Parse the value
+ value = splitString[1]
+ // trim
+ value = strings.Trim(value, " ")
+
+ // check if we've got quoted values
+ if strings.Count(value, "\"") == 2 || strings.Count(value, "'") == 2 {
+ // pull the quotes off the edges
+ value = strings.Trim(value, "\"'")
+
+ // expand quotes
+ value = strings.Replace(value, "\\\"", "\"", -1)
+ // expand newlines
+ value = strings.Replace(value, "\\n", "\n", -1)
+ }
+
+ return
+}
+
+func isIgnoredLine(line string) bool {
+ trimmedLine := strings.Trim(line, " \n\t")
+ return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#")
+}
diff --git a/vendor/github.com/joho/godotenv/wercker.yml b/vendor/github.com/joho/godotenv/wercker.yml
new file mode 100644
index 00000000..c716ac92
--- /dev/null
+++ b/vendor/github.com/joho/godotenv/wercker.yml
@@ -0,0 +1 @@
+box: pjvds/golang
diff --git a/vendor/github.com/samalba/dockerclient/types.go b/vendor/github.com/samalba/dockerclient/types.go
index 7ba79915..193c72c5 100644
--- a/vendor/github.com/samalba/dockerclient/types.go
+++ b/vendor/github.com/samalba/dockerclient/types.go
@@ -91,7 +91,7 @@ type HostConfig struct {
VolumeDriver string
OomScoreAdj int
Tmpfs map[string]string
- ShmSize int64
+ ShmSize int64 `json:"omitempty"`
BlkioWeightDevice []WeightDevice
BlkioDeviceReadBps []ThrottleDevice
BlkioDeviceWriteBps []ThrottleDevice
@@ -195,6 +195,10 @@ func (s *State) String() string {
return "Dead"
}
+ if s.StartedAt.IsZero() {
+ return "Created"
+ }
+
if s.FinishedAt.IsZero() {
return ""
}
@@ -219,6 +223,10 @@ func (s *State) StateString() string {
return "dead"
}
+ if s.StartedAt.IsZero() {
+ return "created"
+ }
+
return "exited"
}
@@ -536,12 +544,14 @@ type BuildImage struct {
CpuSetMems string
CgroupParent string
BuildArgs map[string]string
+ Labels map[string]string // Labels hold metadata about the image
}
type Volume struct {
- Name string // Name is the name of the volume
- Driver string // Driver is the Driver name used to create the volume
- Mountpoint string // Mountpoint is the location on disk of the volume
+ Name string // Name is the name of the volume
+ Driver string // Driver is the Driver name used to create the volume
+ Mountpoint string // Mountpoint is the location on disk of the volume
+ Labels map[string]string // Labels hold metadata about the volume
}
type VolumesListResponse struct {
@@ -552,6 +562,7 @@ type VolumeCreateRequest struct {
Name string // Name is the requested name of the volume
Driver string // Driver is the name of the driver that should be used to create the volume
DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume.
+ Labels map[string]string // Labels hold metadata about the volume
}
// IPAM represents IP Address Management
@@ -585,6 +596,7 @@ type NetworkResource struct {
//Internal bool
Containers map[string]EndpointResource
Options map[string]string
+ Labels map[string]string // Labels hold metadata about the network
}
// EndpointResource contains network resources allocated and used for a container in a network
@@ -604,6 +616,7 @@ type NetworkCreate struct {
IPAM IPAM
Internal bool
Options map[string]string
+ Labels map[string]string // Labels hold metadata about the network
}
// NetworkCreateResponse is the response message sent by the server for network create call
diff --git a/vendor/gopkg.in/djherbis/atime.v1/LICENSE b/vendor/gopkg.in/djherbis/atime.v1/LICENSE
new file mode 100644
index 00000000..1e7b7cc0
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dustin H
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/gopkg.in/djherbis/atime.v1/README.md b/vendor/gopkg.in/djherbis/atime.v1/README.md
new file mode 100644
index 00000000..a96873ca
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/README.md
@@ -0,0 +1,39 @@
+atime
+==========
+
+[![GoDoc](https://godoc.org/github.com/djherbis/atime?status.svg)](https://godoc.org/github.com/djherbis/atime)
+[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
+[![Build Status](https://travis-ci.org/djherbis/atime.svg?branch=master)](https://travis-ci.org/djherbis/atime)
+[![Coverage Status](https://coveralls.io/repos/djherbis/atime/badge.svg?branch=master)](https://coveralls.io/r/djherbis/atime?branch=master)
+
+Usage
+------------
+File Access Times for #golang
+
+Looking for ctime or btime? Checkout https://github.com/djherbis/times
+
+Go has a hidden atime function for most platforms, this repo makes it accessible.
+
+```go
+package main
+
+import (
+ "log"
+
+ "github.com/djherbis/atime"
+)
+
+func main() {
+ at, err := atime.Stat("myfile")
+ if err != nil {
+ log.Fatal(err.Error())
+ }
+ log.Println(at)
+}
+```
+
+Installation
+------------
+```sh
+go get github.com/djherbis/atime
+```
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_darwin.go b/vendor/gopkg.in/djherbis/atime.v1/atime_darwin.go
new file mode 100644
index 00000000..ccf7ebc3
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_darwin.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_darwin.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_dragonfly.go b/vendor/gopkg.in/djherbis/atime.v1/atime_dragonfly.go
new file mode 100644
index 00000000..cd7619e6
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_dragonfly.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_dragonfly.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_freebsd.go b/vendor/gopkg.in/djherbis/atime.v1/atime_freebsd.go
new file mode 100644
index 00000000..ec7bb8b5
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_freebsd.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_freebsd.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_linux.go b/vendor/gopkg.in/djherbis/atime.v1/atime_linux.go
new file mode 100644
index 00000000..b8827bb3
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_linux.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_linux.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_nacl.go b/vendor/gopkg.in/djherbis/atime.v1/atime_nacl.go
new file mode 100644
index 00000000..ed257513
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_nacl.go
@@ -0,0 +1,22 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_nacl.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(sec, nsec int64) time.Time {
+ return time.Unix(sec, nsec)
+}
+
+func atime(fi os.FileInfo) time.Time {
+ st := fi.Sys().(*syscall.Stat_t)
+ return timespecToTime(st.Atime, st.AtimeNsec)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_netbsd.go b/vendor/gopkg.in/djherbis/atime.v1/atime_netbsd.go
new file mode 100644
index 00000000..6919d05a
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_netbsd.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_netbsd.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atimespec)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_openbsd.go b/vendor/gopkg.in/djherbis/atime.v1/atime_openbsd.go
new file mode 100644
index 00000000..3188a073
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_openbsd.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_openbsd.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_plan9.go b/vendor/gopkg.in/djherbis/atime.v1/atime_plan9.go
new file mode 100644
index 00000000..1b3bb972
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_plan9.go
@@ -0,0 +1,16 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_plan9.go
+
+package atime
+
+import (
+ "os"
+ "time"
+)
+
+func atime(fi os.FileInfo) time.Time {
+ return time.Unix(int64(fi.Sys().(*syscall.Dir).Atime), 0)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_solaris.go b/vendor/gopkg.in/djherbis/atime.v1/atime_solaris.go
new file mode 100644
index 00000000..28175a7d
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_solaris.go
@@ -0,0 +1,21 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_solaris.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func timespecToTime(ts syscall.Timespec) time.Time {
+ return time.Unix(int64(ts.Sec), int64(ts.Nsec))
+}
+
+func atime(fi os.FileInfo) time.Time {
+ return timespecToTime(fi.Sys().(*syscall.Stat_t).Atim)
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/atime_windows.go b/vendor/gopkg.in/djherbis/atime.v1/atime_windows.go
new file mode 100644
index 00000000..8a15146f
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/atime_windows.go
@@ -0,0 +1,17 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// http://golang.org/src/os/stat_windows.go
+
+package atime
+
+import (
+ "os"
+ "syscall"
+ "time"
+)
+
+func atime(fi os.FileInfo) time.Time {
+ return time.Unix(0, fi.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds())
+}
diff --git a/vendor/gopkg.in/djherbis/atime.v1/stat.go b/vendor/gopkg.in/djherbis/atime.v1/stat.go
new file mode 100644
index 00000000..eb658e14
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/atime.v1/stat.go
@@ -0,0 +1,21 @@
+// Package atime provides a platform-independent way to get atimes for files.
+package atime
+
+import (
+ "os"
+ "time"
+)
+
+// Get returns the Last Access Time for the given FileInfo
+func Get(fi os.FileInfo) time.Time {
+ return atime(fi)
+}
+
+// Stat returns the Last Access Time for the given filename
+func Stat(name string) (time.Time, error) {
+ fi, err := os.Stat(name)
+ if err != nil {
+ return time.Time{}, err
+ }
+ return atime(fi), nil
+}
diff --git a/vendor/gopkg.in/djherbis/stream.v1/LICENSE b/vendor/gopkg.in/djherbis/stream.v1/LICENSE
new file mode 100644
index 00000000..1e7b7cc0
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/LICENSE
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2015 Dustin H
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
diff --git a/vendor/gopkg.in/djherbis/stream.v1/README.md b/vendor/gopkg.in/djherbis/stream.v1/README.md
new file mode 100644
index 00000000..d6034d8e
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/README.md
@@ -0,0 +1,80 @@
+stream
+==========
+
+[![GoDoc](https://godoc.org/github.com/djherbis/stream?status.svg)](https://godoc.org/github.com/djherbis/stream)
+[![Release](https://img.shields.io/github/release/djherbis/stream.svg)](https://github.com/djherbis/stream/releases/latest)
+[![Software License](https://img.shields.io/badge/license-MIT-brightgreen.svg)](LICENSE.txt)
+[![Build Status](https://travis-ci.org/djherbis/stream.svg?branch=master)](https://travis-ci.org/djherbis/stream)
+[![Coverage Status](https://coveralls.io/repos/djherbis/stream/badge.svg?branch=master)](https://coveralls.io/r/djherbis/stream?branch=master)
+
+Usage
+------------
+
+Write and Read concurrently, and independently.
+
+To explain further, if you need to write to multiple places you can use io.MultiWriter,
+if you need multiple Readers on something you can use io.TeeReader. If you want concurrency you can use io.Pipe().
+
+However all of these methods "tie" each Read/Write together, your readers can't read from different places in the stream, each write must be distributed to all readers in sequence.
+
+This package provides a way for multiple Readers to read off the same Writer, without waiting for the others. This is done by writing to a "File" interface which buffers the input so it can be read at any time from many independent readers. Readers can even be created while writing or after the stream is closed. They will all see a consistent view of the stream and will block until the section of the stream they request is written, all while being unaffected by the actions of the other readers.
+
+The use case for this stems from my other project djherbis/fscache. I needed a byte caching mechanism which allowed many independent clients to have access to the data while it was being written, rather than re-generating the byte stream for each of them or waiting for a complete copy of the stream which could be stored and then re-used.
+
+```go
+import(
+ "io"
+ "log"
+ "os"
+ "time"
+
+ "github.com/djherbis/stream"
+)
+
+func main(){
+ w, err := stream.New("mystream")
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ go func(){
+ io.WriteString(w, "Hello World!")
+ <-time.After(time.Second)
+ io.WriteString(w, "Streaming updates...")
+ w.Close()
+ }()
+
+ waitForReader := make(chan struct{})
+ go func(){
+ // Read from the stream
+ r, err := w.NextReader()
+ if err != nil {
+ log.Fatal(err)
+ }
+ io.Copy(os.Stdout, r) // Hello World! (1 second) Streaming updates...
+ r.Close()
+ close(waitForReader)
+ }()
+
+ // Full copy of the stream!
+ r, err := w.NextReader()
+ if err != nil {
+ log.Fatal(err)
+ }
+ io.Copy(os.Stdout, r) // Hello World! (1 second) Streaming updates...
+
+ // r supports io.ReaderAt too.
+ p := make([]byte, 4)
+ r.ReadAt(p, 1) // Read "ello" into p
+
+ r.Close()
+
+ <-waitForReader // don't leave main before go-routine finishes
+}
+```
+
+Installation
+------------
+```sh
+go get github.com/djherbis/stream
+```
diff --git a/vendor/gopkg.in/djherbis/stream.v1/fs.go b/vendor/gopkg.in/djherbis/stream.v1/fs.go
new file mode 100644
index 00000000..fe808bf0
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/fs.go
@@ -0,0 +1,39 @@
+package stream
+
+import (
+ "io"
+ "os"
+)
+
+// File is a backing data-source for a Stream.
+type File interface {
+ Name() string // The name used to Create/Open the File
+ io.Reader // Reader must continue reading after EOF on subsequent calls after more Writes.
+ io.ReaderAt // Similarly to Reader
+ io.Writer // Concurrent reading/writing must be supported.
+ io.Closer // Close should do any cleanup when done with the File.
+}
+
+// FileSystem is used to manage Files
+type FileSystem interface {
+ Create(name string) (File, error) // Create must return a new File for Writing
+ Open(name string) (File, error) // Open must return an existing File for Reading
+ Remove(name string) error // Remove deletes an existing File
+}
+
+// StdFileSystem is backed by the os package.
+var StdFileSystem FileSystem = stdFS{}
+
+type stdFS struct{}
+
+func (fs stdFS) Create(name string) (File, error) {
+ return os.Create(name)
+}
+
+func (fs stdFS) Open(name string) (File, error) {
+ return os.Open(name)
+}
+
+func (fs stdFS) Remove(name string) error {
+ return os.Remove(name)
+}
diff --git a/vendor/gopkg.in/djherbis/stream.v1/memfs.go b/vendor/gopkg.in/djherbis/stream.v1/memfs.go
new file mode 100644
index 00000000..b4432ae3
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/memfs.go
@@ -0,0 +1,107 @@
+package stream
+
+import (
+ "bytes"
+ "errors"
+ "io"
+ "sync"
+)
+
+// ErrNotFoundInMem is returned when an in-memory FileSystem cannot find a file.
+var ErrNotFoundInMem = errors.New("not found")
+
+type memfs struct {
+ mu sync.RWMutex
+ files map[string]*memFile
+}
+
+// NewMemFS returns a New in-memory FileSystem
+func NewMemFS() FileSystem {
+ return &memfs{
+ files: make(map[string]*memFile),
+ }
+}
+
+func (fs *memfs) Create(key string) (File, error) {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+
+ file := &memFile{
+ name: key,
+ r: bytes.NewBuffer(nil),
+ }
+ file.memReader.memFile = file
+ fs.files[key] = file
+ return file, nil
+}
+
+func (fs *memfs) Open(key string) (File, error) {
+ fs.mu.RLock()
+ defer fs.mu.RUnlock()
+
+ if f, ok := fs.files[key]; ok {
+ return &memReader{memFile: f}, nil
+ }
+ return nil, ErrNotFoundInMem
+}
+
+func (fs *memfs) Remove(key string) error {
+ fs.mu.Lock()
+ defer fs.mu.Unlock()
+ delete(fs.files, key)
+ return nil
+}
+
+type memFile struct {
+ mu sync.RWMutex
+ name string
+ r *bytes.Buffer
+ memReader
+}
+
+func (f *memFile) Name() string {
+ return f.name
+}
+
+func (f *memFile) Write(p []byte) (int, error) {
+ if len(p) > 0 {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ return f.r.Write(p)
+ }
+ return len(p), nil
+}
+
+func (f *memFile) Bytes() []byte {
+ f.mu.RLock()
+ defer f.mu.RUnlock()
+ return f.r.Bytes()
+}
+
+func (f *memFile) Close() error {
+ return nil
+}
+
+type memReader struct {
+ *memFile
+ n int
+}
+
+func (r *memReader) ReadAt(p []byte, off int64) (n int, err error) {
+ data := r.Bytes()
+ if int64(len(data)) < off {
+ return 0, io.EOF
+ }
+ n, err = bytes.NewReader(data[off:]).ReadAt(p, 0)
+ return n, err
+}
+
+func (r *memReader) Read(p []byte) (n int, err error) {
+ n, err = bytes.NewReader(r.Bytes()[r.n:]).Read(p)
+ r.n += n
+ return n, err
+}
+
+func (r *memReader) Close() error {
+ return nil
+}
diff --git a/vendor/gopkg.in/djherbis/stream.v1/reader.go b/vendor/gopkg.in/djherbis/stream.v1/reader.go
new file mode 100644
index 00000000..83212708
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/reader.go
@@ -0,0 +1,82 @@
+package stream
+
+import "io"
+
+// Reader is a concurrent-safe Stream Reader.
+type Reader struct {
+ s *Stream
+ file File
+}
+
+// Name returns the name of the underlying File in the FileSystem.
+func (r *Reader) Name() string { return r.file.Name() }
+
+// ReadAt lets you Read from specific offsets in the Stream.
+// ReadAt blocks while waiting for the requested section of the Stream to be written,
+// unless the Stream is closed in which case it will always return immediately.
+func (r *Reader) ReadAt(p []byte, off int64) (n int, err error) {
+ r.s.b.RLock()
+ defer r.s.b.RUnlock()
+
+ var m int
+
+ for {
+
+ m, err = r.file.ReadAt(p[n:], off+int64(n))
+ n += m
+
+ if r.s.b.IsOpen() {
+
+ switch {
+ case n != 0 && err == nil:
+ return n, err
+ case err == io.EOF:
+ r.s.b.Wait()
+ case err != nil:
+ return n, err
+ }
+
+ } else {
+ return n, err
+ }
+
+ }
+}
+
+// Read reads from the Stream. If the end of an open Stream is reached, Read
+// blocks until more data is written or the Stream is Closed.
+func (r *Reader) Read(p []byte) (n int, err error) {
+ r.s.b.RLock()
+ defer r.s.b.RUnlock()
+
+ var m int
+
+ for {
+
+ m, err = r.file.Read(p[n:])
+ n += m
+
+ if r.s.b.IsOpen() {
+
+ switch {
+ case n != 0 && err == nil:
+ return n, err
+ case err == io.EOF:
+ r.s.b.Wait()
+ case err != nil:
+ return n, err
+ }
+
+ } else {
+ return n, err
+ }
+
+ }
+}
+
+// Close closes this Reader on the Stream. This must be called when done with the
+// Reader or else the Stream cannot be Removed.
+func (r *Reader) Close() error {
+ defer r.s.dec()
+ return r.file.Close()
+}
diff --git a/vendor/gopkg.in/djherbis/stream.v1/stream.go b/vendor/gopkg.in/djherbis/stream.v1/stream.go
new file mode 100644
index 00000000..a0b3e1a7
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/stream.go
@@ -0,0 +1,92 @@
+// Package stream provides a way to read and write to a synchronous buffered pipe, with multiple reader support.
+package stream
+
+import (
+ "errors"
+ "sync"
+)
+
+// ErrRemoving is returned when requesting a Reader on a Stream which is being Removed.
+var ErrRemoving = errors.New("cannot open a new reader while removing file")
+
+// Stream is used to concurrently Write and Read from a File.
+type Stream struct {
+ grp sync.WaitGroup
+ b *broadcaster
+ file File
+ fs FileSystem
+ removing chan struct{}
+}
+
+// New creates a new Stream from the StdFileSystem with Name "name".
+func New(name string) (*Stream, error) {
+ return NewStream(name, StdFileSystem)
+}
+
+// NewStream creates a new Stream with Name "name" in FileSystem fs.
+func NewStream(name string, fs FileSystem) (*Stream, error) {
+ f, err := fs.Create(name)
+ sf := &Stream{
+ file: f,
+ fs: fs,
+ b: newBroadcaster(),
+ removing: make(chan struct{}),
+ }
+ sf.inc()
+ return sf, err
+}
+
+// Name returns the name of the underlying File in the FileSystem.
+func (s *Stream) Name() string { return s.file.Name() }
+
+// Write writes p to the Stream. It's concurrent safe to be called with Stream's other methods.
+func (s *Stream) Write(p []byte) (int, error) {
+ defer s.b.Broadcast()
+ s.b.Lock()
+ defer s.b.Unlock()
+ return s.file.Write(p)
+}
+
+// Close will close the active stream. This will cause Readers to return EOF once they have
+// read the entire stream.
+func (s *Stream) Close() error {
+ defer s.dec()
+ defer s.b.Close()
+ s.b.Lock()
+ defer s.b.Unlock()
+ return s.file.Close()
+}
+
+// Remove will block until the Stream and all its Readers have been Closed,
+// at which point it will delete the underlying file. NextReader() will return
+// ErrRemoving if called after Remove.
+func (s *Stream) Remove() error {
+ close(s.removing)
+ s.grp.Wait()
+ return s.fs.Remove(s.file.Name())
+}
+
+// NextReader will return a concurrent-safe Reader for this stream. Each Reader will
+// see a complete and independent view of the stream, and can Read will the stream
+// is written to.
+func (s *Stream) NextReader() (*Reader, error) {
+ s.inc()
+
+ select {
+ case <-s.removing:
+ s.dec()
+ return nil, ErrRemoving
+ default:
+ }
+
+ file, err := s.fs.Open(s.file.Name())
+ if err != nil {
+ s.dec()
+ return nil, err
+ }
+
+ return &Reader{file: file, s: s}, nil
+}
+
+func (s *Stream) inc() { s.grp.Add(1) }
+func (s *Stream) dec() { s.grp.Done() }
diff --git a/vendor/gopkg.in/djherbis/stream.v1/sync.go b/vendor/gopkg.in/djherbis/stream.v1/sync.go
new file mode 100644
index 00000000..26096ed9
--- /dev/null
+++ b/vendor/gopkg.in/djherbis/stream.v1/sync.go
@@ -0,0 +1,34 @@
+package stream
+
+import (
+ "sync"
+ "sync/atomic"
+)
+
+type broadcaster struct {
+ sync.RWMutex
+ closed uint32
+ *sync.Cond
+}
+
+func newBroadcaster() *broadcaster {
+ var b broadcaster
+ b.Cond = sync.NewCond(b.RWMutex.RLocker())
+ return &b
+}
+
+func (b *broadcaster) Wait() {
+ if b.IsOpen() {
+ b.Cond.Wait()
+ }
+}
+
+func (b *broadcaster) IsOpen() bool {
+ return atomic.LoadUint32(&b.closed) == 0
+}
+
+func (b *broadcaster) Close() error {
+ atomic.StoreUint32(&b.closed, 1)
+ b.Cond.Broadcast()
+ return nil
+}
diff --git a/vendor/vendor.json b/vendor/vendor.json
index 6b229542..54157d0e 100644
--- a/vendor/vendor.json
+++ b/vendor/vendor.json
@@ -9,8 +9,13 @@
},
{
"path": "github.com/Sirupsen/logrus",
- "revision": "273bd5984cd7deae8d4b71b0ba9bfc5767f7284b",
- "revisionTime": "2015-02-17T12:42:44-05:00"
+ "revision": "4b6ea7319e214d98c938f12692336f7ca9348d6b",
+ "revisionTime": "2016-03-17T14:11:10Z"
+ },
+ {
+ "path": "github.com/cloudfoundry-incubator/candiedyaml",
+ "revision": "5cef21e2e4f0fd147973b558d4db7395176bcd95",
+ "revisionTime": "2016-03-22T13:50:45-07:00"
},
{
"origin": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew",
@@ -23,6 +28,11 @@
"revision": "c1da56349675b292d3200463e2c88b9aa5e02391",
"revisionTime": "2015-09-04T14:24:56-07:00"
},
+ {
+ "path": "github.com/djherbis/fscache",
+ "revision": "ffc728270b01f3906c396bbe796232b87750f24e",
+ "revisionTime": "2016-03-05T10:30:05-08:00"
+ },
{
"path": "github.com/docker/docker/pkg/stdcopy",
"revision": "9356c76d9f6e285e71f04df33ef7870455a42775",
@@ -53,6 +63,16 @@
"revision": "889391d730237f8aca06ce3e62975112983f96b4",
"revisionTime": "2016-01-23T18:11:54-03:00"
},
+ {
+ "path": "github.com/ghodss/yaml",
+ "revision": "1a6f069841556a7bcaff4a397ca6e8328d266c2f",
+ "revisionTime": "2016-03-07T13:52:05-08:00"
+ },
+ {
+ "path": "github.com/gin-gonic/contrib/ginrus",
+ "revision": "14f66d54cdb96059bafca98665bcc6d9df4951f2",
+ "revisionTime": "2015-08-15T19:25:43+02:00"
+ },
{
"path": "github.com/gin-gonic/gin",
"revision": "3d002e382355cafc15d706b92899b1961d5b79e9",
@@ -98,6 +118,21 @@
"revision": "1b0c7f6e9ab3d7f500fd7d50c7ad835ff428139b",
"revisionTime": "2014-04-09T13:11:00+02:00"
},
+ {
+ "path": "github.com/ianschenck/envflag",
+ "revision": "9111d830d133f952887a936367fb0211c3134f0d",
+ "revisionTime": "2014-07-20T15:03:42-06:00"
+ },
+ {
+ "path": "github.com/joho/godotenv",
+ "revision": "4ed13390c0acd2ff4e371e64d8b97c8954138243",
+ "revisionTime": "2015-09-07T11:02:28+10:00"
+ },
+ {
+ "path": "github.com/joho/godotenv/autoload",
+ "revision": "4ed13390c0acd2ff4e371e64d8b97c8954138243",
+ "revisionTime": "2015-09-07T11:02:28+10:00"
+ },
{
"path": "github.com/koding/cache",
"revision": "487fc0ca06f9aa1a02d796f5510784b47d5afae2",
@@ -146,9 +181,11 @@
"revisionTime": "2013-09-19T15:23:15-06:00"
},
{
+ "checksumSHA1": "+HvW+k8YkDaPKwF0Lwcz+Tf2A+E=",
+ "origin": "github.com/drone/drone/vendor/github.com/samalba/dockerclient",
"path": "github.com/samalba/dockerclient",
- "revision": "f274bbd0e2eb35ad1444dc6e6660214f9fbbc08c",
- "revisionTime": "2016-02-22T16:23:45-08:00"
+ "revision": "91d7393ff85980ba3a8966405871a3d446ca28f2",
+ "revisionTime": "2016-04-14T17:47:13Z"
},
{
"path": "github.com/square/go-jose",
@@ -196,6 +233,16 @@
"revision": "8a57ed94ffd43444c0879fe75701732a38afc985",
"revisionTime": "2015-12-29T21:02:54-07:00"
},
+ {
+ "path": "gopkg.in/djherbis/atime.v1",
+ "revision": "8e47e0e01d08df8b9f840d74299c8ab70a024a30",
+ "revisionTime": "2015-08-29T00:19:25-07:00"
+ },
+ {
+ "path": "gopkg.in/djherbis/stream.v1",
+ "revision": "26a761059928627ca84837000dfb33447c66a146",
+ "revisionTime": "2016-02-03T22:24:40-08:00"
+ },
{
"path": "gopkg.in/go-playground/validator.v8",
"revision": "014792cf3e266caff1e916876be12282b33059e0",
@@ -211,5 +258,6 @@
"revision": "5d6f7e02b7cdad63b06ab3877915532cd30073b4",
"revisionTime": "2015-01-19T16:55:52-02:00"
}
- ]
+ ],
+ "rootPath": "github.com/drone/drone"
}
diff --git a/version/version.go b/version/version.go
index 4a15c723..e63f3e02 100644
--- a/version/version.go
+++ b/version/version.go
@@ -2,7 +2,7 @@ package version
import "fmt"
-const (
+var (
// VersionMajor is for an API incompatible changes
VersionMajor = 0
// VersionMinor is for functionality in a backwards-compatible manner
diff --git a/web/hook.go b/web/hook.go
index 87cac44c..a476e5f1 100644
--- a/web/hook.go
+++ b/web/hook.go
@@ -11,14 +11,12 @@ import (
log "github.com/Sirupsen/logrus"
"github.com/drone/drone/engine"
+ "github.com/drone/drone/engine/parser"
"github.com/drone/drone/model"
"github.com/drone/drone/remote"
- "github.com/drone/drone/router/middleware/context"
"github.com/drone/drone/shared/httputil"
"github.com/drone/drone/shared/token"
"github.com/drone/drone/store"
- "github.com/drone/drone/yaml"
- "github.com/drone/drone/yaml/matrix"
)
var (
@@ -149,41 +147,27 @@ func PostHook(c *gin.Context) {
// NOTE we don't exit on failure. The sec file is optional
}
- axes, err := matrix.Parse(string(raw))
+ axes, err := parser.ParseMatrix(raw)
if err != nil {
- log.Errorf("failure to calculate matrix for %s. %s", repo.FullName, err)
- c.AbortWithError(400, err)
+ c.String(500, "Failed to parse yaml file or calculate matrix. %s", err)
return
}
if len(axes) == 0 {
- axes = append(axes, matrix.Axis{})
+ axes = append(axes, parser.Axis{})
}
netrc, err := remote_.Netrc(user, repo)
if err != nil {
- log.Errorf("failure to generate netrc for %s. %s", repo.FullName, err)
- c.AbortWithError(500, err)
+ c.String(500, "Failed to generate netrc file. %s", err)
return
}
key, _ := store.GetKey(c, repo)
// verify the branches can be built vs skipped
- yconfig, _ := yaml.Parse(string(raw))
- var match = false
- for _, branch := range yconfig.Branches {
- if branch == build.Branch {
- match = true
- break
- }
- match, _ = filepath.Match(branch, build.Branch)
- if match {
- break
- }
- }
- if !match && len(yconfig.Branches) != 0 {
- log.Infof("ignoring hook. yaml file excludes repo and branch %s %s", repo.FullName, build.Branch)
- c.AbortWithStatus(200)
+ branches := parser.ParseBranch(raw)
+ if !branches.Matches(build.Branch) {
+ c.String(200, "Branch does not match restrictions defined in yaml")
return
}
@@ -220,7 +204,7 @@ func PostHook(c *gin.Context) {
// on status change notifications
last, _ := store.GetBuildLastBefore(c, repo, build.Branch, build.ID)
- engine_ := context.Engine(c)
+ engine_ := engine.FromContext(c)
go engine_.Schedule(c.Copy(), &engine.Task{
User: user,
Repo: repo,
diff --git a/web/slack.go b/web/slack.go
new file mode 100644
index 00000000..bb03422c
--- /dev/null
+++ b/web/slack.go
@@ -0,0 +1,113 @@
+package web
+
+import (
+ "strings"
+
+ "github.com/drone/drone/store"
+
+ "github.com/gin-gonic/gin"
+)
+
+const (
+ slashDeploy = "deploy"
+ slashRestart = "restart"
+ slashStatus = "status"
+)
+
+// Slack is handler function that handles Slack slash commands.
+func Slack(c *gin.Context) {
+ command := c.Param("command")
+ text := c.PostForm("text")
+ args := strings.Split(text, " ")
+
+ if command == "" {
+ command = args[0]
+ args = args[1:]
+ }
+
+ switch command {
+ case slashStatus:
+ slackStatus(c, args)
+
+ case slashRestart:
+ slackRestart(c, args)
+
+ case slashDeploy:
+ slackDeploy(c, args)
+
+ default:
+ c.String(200, "sorry, I didn't understand [%s]", text)
+ }
+}
+
+func slackDeploy(c *gin.Context, args []string) {
+ if len(args) != 3 {
+ c.String(200, "Invalid command. Please provide [repo] [build number] [environment]")
+ return
+ }
+ var (
+ repo = args[0]
+ num = args[1]
+ env = args[2]
+ )
+ owner, name, _ := parseRepoBranch(repo)
+
+ c.String(200, "deploying build %s/%s#%s to %s", owner, name, num, env)
+}
+
+func slackRestart(c *gin.Context, args []string) {
+ var (
+ repo = args[0]
+ num = args[1]
+ )
+ owner, name, _ := parseRepoBranch(repo)
+
+ c.String(200, "restarting build %s/%s#%s", owner, name, num)
+}
+
+func slackStatus(c *gin.Context, args []string) {
+ var (
+ owner string
+ name string
+ branch string
+ )
+ if len(args) > 0 {
+ owner, name, branch = parseRepoBranch(args[0])
+ }
+
+ repo, err := store.GetRepoOwnerName(c, owner, name)
+ if err != nil {
+ c.String(200, "cannot find repository %s/%s", owner, name)
+ return
+ }
+ if branch == "" {
+ branch = repo.Branch
+ }
+ build, err := store.GetBuildLast(c, repo, branch)
+ if err != nil {
+ c.String(200, "cannot find status for %s/%s@%s", owner, name, branch)
+ return
+ }
+ c.String(200, "%s@%s build number %d finished with status %s",
+ repo.FullName,
+ build.Branch,
+ build.Number,
+ build.Status,
+ )
+}
+
+func parseRepoBranch(repo string) (owner, name, branch string) {
+
+ parts := strings.Split(repo, "@")
+ if len(parts) == 2 {
+ branch = parts[1]
+ repo = parts[0]
+ }
+
+ parts = strings.Split(repo, "/")
+ if len(parts) == 2 {
+ owner = parts[0]
+ name = parts[1]
+ }
+ return owner, name, branch
+}
diff --git a/web/stream.go b/web/stream.go
index e06b74ca..623beb82 100644
--- a/web/stream.go
+++ b/web/stream.go
@@ -8,7 +8,6 @@ import (
"github.com/docker/docker/pkg/stdcopy"
"github.com/drone/drone/engine"
- "github.com/drone/drone/router/middleware/context"
"github.com/drone/drone/router/middleware/session"
"github.com/drone/drone/store"
@@ -20,7 +19,7 @@ import (
// GetRepoEvents will upgrade the connection to a Websocket and will stream
// event updates to the browser.
func GetRepoEvents(c *gin.Context) {
- engine_ := context.Engine(c)
+ engine_ := engine.FromContext(c)
repo := session.Repo(c)
c.Writer.Header().Set("Content-Type", "text/event-stream")
@@ -55,7 +54,7 @@ func GetRepoEvents(c *gin.Context) {
func GetStream(c *gin.Context) {
- engine_ := context.Engine(c)
+ engine_ := engine.FromContext(c)
repo := session.Repo(c)
buildn, _ := strconv.Atoi(c.Param("build"))
jobn, _ := strconv.Atoi(c.Param("number"))
diff --git a/yaml/matrix/matrix.go b/yaml/matrix/matrix.go
deleted file mode 100644
index f22a4ada..00000000
--- a/yaml/matrix/matrix.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package matrix
-
-import (
- "strings"
-
- "gopkg.in/yaml.v2"
-)
-
-const (
- limitTags = 10
- limitAxis = 25
-)
-
-// Matrix represents the build matrix.
-type Matrix map[string][]string
-
-// Axis represents a single permutation of entries
-// from the build matrix.
-type Axis map[string]string
-
-// String returns a string representation of an Axis as
-// a comma-separated list of environment variables.
-func (a Axis) String() string {
- var envs []string
- for k, v := range a {
- envs = append(envs, k+"="+v)
- }
- return strings.Join(envs, " ")
-}
-
-// Parse parses the Matrix section of the yaml file and
-// returns a list of axis.
-func Parse(raw string) ([]Axis, error) {
- matrix, err := parseMatrix(raw)
- if err != nil {
- return nil, err
- }
-
- // if not a matrix build return an array
- // with just the single axis.
- if len(matrix) == 0 {
- return nil, nil
- }
-
- return Calc(matrix), nil
-}
-
-// Calc calculates the permutations for th build matrix.
-//
-// Note that this method will cap the number of permutations
-// to 25 to prevent an overly expensive calculation.
-func Calc(matrix Matrix) []Axis {
- // calculate number of permutations and
- // extract the list of tags
- // (ie go_version, redis_version, etc)
- var perm int
- var tags []string
- for k, v := range matrix {
- perm *= len(v)
- if perm == 0 {
- perm = len(v)
- }
- tags = append(tags, k)
- }
-
- // structure to hold the transformed
- // result set
- axisList := []Axis{}
-
- // for each axis calculate the uniqe
- // set of values that should be used.
- for p := 0; p < perm; p++ {
- axis := map[string]string{}
- decr := perm
- for i, tag := range tags {
- elems := matrix[tag]
- decr = decr / len(elems)
- elem := p / decr % len(elems)
- axis[tag] = elems[elem]
-
- // enforce a maximum number of tags
- // in the build matrix.
- if i > limitTags {
- break
- }
- }
-
- // append to the list of axis.
- axisList = append(axisList, axis)
-
- // enforce a maximum number of axis
- // that should be calculated.
- if p > limitAxis {
- break
- }
- }
-
- return axisList
-}
-
-// helper function to parse the Matrix data from
-// the raw yaml file.
-func parseMatrix(raw string) (Matrix, error) {
- data := struct {
- Matrix map[string][]string
- }{}
- err := yaml.Unmarshal([]byte(raw), &data)
- return data.Matrix, err
-}
diff --git a/yaml/yaml.go b/yaml/yaml.go
deleted file mode 100644
index 6767ba21..00000000
--- a/yaml/yaml.go
+++ /dev/null
@@ -1,16 +0,0 @@
-package yaml
-
-import (
- "gopkg.in/yaml.v2"
-)
-
-type Config struct {
- Debug bool `yaml:"debug"`
- Branches []string `yaml:"branches"`
-}
-
-func Parse(raw string) (*Config, error) {
- c := &Config{}
- err := yaml.Unmarshal([]byte(raw), c)
- return c, err
-}