harness-drone/drone/agent/agent.go

340 lines
8.5 KiB
Go
Raw Normal View History

2016-04-20 01:37:53 +00:00
package agent
import (
2017-03-16 10:14:02 +00:00
"context"
2017-04-01 11:17:04 +00:00
"encoding/json"
2017-03-16 10:14:02 +00:00
"io"
2017-04-01 11:17:04 +00:00
"io/ioutil"
2017-03-16 10:14:02 +00:00
"log"
2017-03-05 07:56:08 +00:00
"math"
2017-03-16 10:14:02 +00:00
"net/url"
2017-04-01 11:17:04 +00:00
"strconv"
2016-04-20 01:37:53 +00:00
"sync"
"time"
2017-03-16 10:14:02 +00:00
"github.com/cncd/pipeline/pipeline"
"github.com/cncd/pipeline/pipeline/backend"
"github.com/cncd/pipeline/pipeline/backend/docker"
"github.com/cncd/pipeline/pipeline/interrupt"
"github.com/cncd/pipeline/pipeline/multipart"
"github.com/cncd/pipeline/pipeline/rpc"
2017-04-11 22:24:32 +00:00
"github.com/drone/drone/version"
2016-09-26 08:29:05 +00:00
2017-03-16 10:14:02 +00:00
"github.com/tevino/abool"
"github.com/urfave/cli"
2016-04-20 01:37:53 +00:00
)
2017-04-29 17:03:45 +00:00
// Command exports the agent command.
var Command = cli.Command{
2016-04-20 01:37:53 +00:00
Name: "agent",
Usage: "starts the drone agent",
2017-03-16 10:14:02 +00:00
Action: loop,
2016-04-20 01:37:53 +00:00
Flags: []cli.Flag{
cli.StringFlag{
2017-03-05 07:56:08 +00:00
EnvVar: "DRONE_SERVER,DRONE_ENDPOINT",
2016-04-20 01:37:53 +00:00
Name: "drone-server",
Usage: "drone server address",
2016-09-26 08:29:05 +00:00
Value: "ws://localhost:8000/ws/broker",
2016-04-20 01:37:53 +00:00
},
cli.StringFlag{
EnvVar: "DRONE_SECRET,DRONE_AGENT_SECRET",
Name: "drone-secret",
Usage: "drone agent secret",
},
2016-04-20 01:37:53 +00:00
cli.DurationFlag{
2016-04-21 08:18:20 +00:00
EnvVar: "DRONE_BACKOFF",
Name: "backoff",
2016-04-20 01:37:53 +00:00
Usage: "drone server backoff interval",
Value: time.Second * 15,
},
2017-03-16 10:14:02 +00:00
cli.IntFlag{
Name: "retry-limit",
EnvVar: "DRONE_RETRY_LIMIT",
Value: math.MaxInt32,
},
2016-04-20 01:37:53 +00:00
cli.BoolFlag{
2016-04-21 08:18:20 +00:00
EnvVar: "DRONE_DEBUG",
2016-04-20 01:37:53 +00:00
Name: "debug",
Usage: "start the agent in debug mode",
},
cli.StringFlag{
EnvVar: "DRONE_FILTER",
Name: "filter",
Usage: "filter jobs processed by this agent",
},
2017-03-05 07:56:08 +00:00
cli.IntFlag{
Name: "max-procs",
EnvVar: "DRONE_MAX_PROCS",
Value: 1,
},
cli.StringFlag{
Name: "platform",
EnvVar: "DRONE_PLATFORM",
Value: "linux/amd64",
},
2016-04-20 01:37:53 +00:00
},
}
2017-03-16 10:14:02 +00:00
func loop(c *cli.Context) error {
endpoint, err := url.Parse(
c.String("drone-server"),
)
if err != nil {
return err
}
filter := rpc.Filter{
Labels: map[string]string{
"platform": c.String("platform"),
},
}
2016-04-20 01:37:53 +00:00
2017-03-16 10:14:02 +00:00
client, err := rpc.NewClient(
endpoint.String(),
rpc.WithRetryLimit(
c.Int("retry-limit"),
),
rpc.WithBackoff(
c.Duration("backoff"),
),
rpc.WithToken(
c.String("drone-secret"),
),
2017-04-11 22:24:32 +00:00
rpc.WithHeader(
"X-Drone-Version",
version.Version.String(),
2017-04-11 22:24:32 +00:00
),
2017-03-16 10:14:02 +00:00
)
if err != nil {
return err
2017-03-05 07:56:08 +00:00
}
2017-03-16 10:14:02 +00:00
defer client.Close()
2017-03-05 07:56:08 +00:00
2017-03-16 10:14:02 +00:00
sigterm := abool.New()
ctx := context.Background()
ctx = interrupt.WithContextFunc(ctx, func() {
println("ctrl+c received, terminating process")
sigterm.Set()
})
2016-09-29 21:45:13 +00:00
2017-03-16 10:14:02 +00:00
var wg sync.WaitGroup
parallel := c.Int("max-procs")
wg.Add(parallel)
2016-09-29 21:45:13 +00:00
2017-03-16 10:14:02 +00:00
for i := 0; i < parallel; i++ {
go func() {
defer wg.Done()
for {
if sigterm.IsSet() {
return
}
if err := run(ctx, client, filter); err != nil {
log.Printf("build runner encountered error: exiting: %s", err)
return
}
}
}()
2016-04-20 01:37:53 +00:00
}
2017-03-16 10:14:02 +00:00
wg.Wait()
return nil
}
2017-03-16 10:14:02 +00:00
const (
maxFileUpload = 5000000
maxLogsUpload = 5000000
)
2016-04-20 01:37:53 +00:00
2017-03-16 10:14:02 +00:00
func run(ctx context.Context, client rpc.Peer, filter rpc.Filter) error {
log.Println("pipeline: request next execution")
2016-04-20 01:37:53 +00:00
2017-03-16 10:14:02 +00:00
// get the next job from the queue
work, err := client.Next(ctx, filter)
if err != nil {
return err
}
if work == nil {
return nil
2016-04-20 01:37:53 +00:00
}
2017-03-16 10:14:02 +00:00
log.Printf("pipeline: received next execution: %s", work.ID)
// new docker engine
engine, err := docker.NewEnv()
2016-04-20 01:37:53 +00:00
if err != nil {
2017-03-16 10:14:02 +00:00
return err
2016-04-20 01:37:53 +00:00
}
2017-03-16 10:14:02 +00:00
timeout := time.Hour
if minutes := work.Timeout; minutes != 0 {
timeout = time.Duration(minutes) * time.Minute
}
2016-09-28 00:33:13 +00:00
2017-03-16 10:14:02 +00:00
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
2016-09-26 08:29:05 +00:00
2017-03-16 10:14:02 +00:00
cancelled := abool.New()
go func() {
if werr := client.Wait(ctx, work.ID); werr != nil {
cancelled.SetTo(true)
log.Printf("pipeline: cancel signal received: %s: %s", work.ID, werr)
cancel()
} else {
log.Printf("pipeline: cancel channel closed: %s", work.ID)
}
2017-03-16 10:14:02 +00:00
}()
2017-03-16 10:14:02 +00:00
go func() {
for {
select {
case <-ctx.Done():
log.Printf("pipeline: cancel ping loop: %s", work.ID)
return
case <-time.After(time.Minute):
log.Printf("pipeline: ping queue: %s", work.ID)
client.Extend(ctx, work.ID)
}
}
}()
state := rpc.State{}
state.Started = time.Now().Unix()
2017-04-01 11:17:04 +00:00
err = client.Init(context.Background(), work.ID, state)
2017-03-16 10:14:02 +00:00
if err != nil {
2017-04-01 11:17:04 +00:00
log.Printf("pipeline: error signaling pipeline init: %s: %s", work.ID, err)
2016-09-26 08:29:05 +00:00
}
2017-03-16 10:14:02 +00:00
var uploads sync.WaitGroup
defaultLogger := pipeline.LogFunc(func(proc *backend.Step, rc multipart.Reader) error {
part, rerr := rc.NextPart()
if rerr != nil {
return rerr
}
uploads.Add(1)
2017-04-01 11:17:04 +00:00
var secrets []string
for _, secret := range work.Config.Secrets {
if secret.Mask {
secrets = append(secrets, secret.Value)
}
}
limitedPart := io.LimitReader(part, maxLogsUpload)
logstream := rpc.NewLineWriter(client, work.ID, proc.Alias, secrets...)
io.Copy(logstream, limitedPart)
file := &rpc.File{}
file.Mime = "application/json+logs"
file.Proc = proc.Alias
file.Name = "logs.json"
file.Data, _ = json.Marshal(logstream.Lines())
file.Size = len(file.Data)
file.Time = time.Now().Unix()
if serr := client.Upload(context.Background(), work.ID, file); serr != nil {
log.Printf("pipeline: cannot upload logs: %s: %s: %s", work.ID, file.Mime, serr)
} else {
log.Printf("pipeline: finish uploading logs: %s: step %s: %s", file.Mime, work.ID, proc.Alias)
}
2016-09-26 08:29:05 +00:00
2017-03-16 10:14:02 +00:00
defer func() {
log.Printf("pipeline: finish uploading logs: %s: step %s", work.ID, proc.Alias)
uploads.Done()
}()
2016-09-28 00:33:13 +00:00
2017-03-16 10:14:02 +00:00
part, rerr = rc.NextPart()
if rerr != nil {
return nil
2016-09-28 00:33:13 +00:00
}
2017-04-01 11:17:04 +00:00
// TODO should be configurable
limitedPart = io.LimitReader(part, maxFileUpload)
file = &rpc.File{}
file.Mime = part.Header().Get("Content-Type")
file.Proc = proc.Alias
file.Name = part.FileName()
file.Data, _ = ioutil.ReadAll(limitedPart)
file.Size = len(file.Data)
file.Time = time.Now().Unix()
if serr := client.Upload(context.Background(), work.ID, file); serr != nil {
log.Printf("pipeline: cannot upload artifact: %s: %s: %s", work.ID, file.Mime, serr)
} else {
log.Printf("pipeline: finish uploading artifact: %s: step %s: %s", file.Mime, work.ID, proc.Alias)
}
return nil
})
defaultTracer := pipeline.TraceFunc(func(state *pipeline.State) error {
procState := rpc.State{
Proc: state.Pipeline.Step.Alias,
Exited: state.Process.Exited,
ExitCode: state.Process.ExitCode,
Started: time.Now().Unix(), // TODO do not do this
Finished: time.Now().Unix(),
}
defer func() {
if uerr := client.Update(context.Background(), work.ID, procState); uerr != nil {
log.Printf("Pipeine: error updating pipeline step status: %s: %s: %s", work.ID, procState.Proc, uerr)
}
}()
if state.Process.Exited {
return nil
}
if state.Pipeline.Step.Environment == nil {
state.Pipeline.Step.Environment = map[string]string{}
}
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
2017-05-05 16:19:20 +00:00
state.Pipeline.Step.Environment["DRONE_BUILD_STATUS"] = "success"
state.Pipeline.Step.Environment["DRONE_BUILD_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["DRONE_BUILD_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
2017-04-01 11:17:04 +00:00
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "success"
state.Pipeline.Step.Environment["CI_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["CI_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
2017-05-05 16:19:20 +00:00
state.Pipeline.Step.Environment["DRONE_JOB_STATUS"] = "success"
state.Pipeline.Step.Environment["DRONE_JOB_STARTED"] = strconv.FormatInt(state.Pipeline.Time, 10)
state.Pipeline.Step.Environment["DRONE_JOB_FINISHED"] = strconv.FormatInt(time.Now().Unix(), 10)
2017-04-01 11:17:04 +00:00
if state.Pipeline.Error != nil {
state.Pipeline.Step.Environment["CI_BUILD_STATUS"] = "failure"
state.Pipeline.Step.Environment["CI_JOB_STATUS"] = "failure"
2017-05-05 16:19:20 +00:00
state.Pipeline.Step.Environment["DRONE_BUILD_STATUS"] = "failure"
state.Pipeline.Step.Environment["DRONE_JOB_STATUS"] = "failure"
2016-09-28 00:33:13 +00:00
}
2017-03-16 10:14:02 +00:00
return nil
})
2016-09-28 00:33:13 +00:00
2017-03-16 10:14:02 +00:00
err = pipeline.New(work.Config,
pipeline.WithContext(ctx),
pipeline.WithLogger(defaultLogger),
2017-04-01 11:17:04 +00:00
pipeline.WithTracer(defaultTracer),
2017-03-16 10:14:02 +00:00
pipeline.WithEngine(engine),
).Run()
2016-09-28 00:33:13 +00:00
2017-03-16 10:14:02 +00:00
state.Finished = time.Now().Unix()
state.Exited = true
if err != nil {
2017-03-17 08:57:32 +00:00
switch xerr := err.(type) {
case *pipeline.ExitError:
2017-03-16 10:14:02 +00:00
state.ExitCode = xerr.Code
2017-03-17 08:57:32 +00:00
default:
state.ExitCode = 1
state.Error = err.Error()
2017-03-16 10:14:02 +00:00
}
if cancelled.IsSet() {
state.ExitCode = 137
}
2016-09-28 00:33:13 +00:00
}
2017-03-16 10:14:02 +00:00
log.Printf("pipeline: execution complete: %s", work.ID)
2017-03-16 10:14:02 +00:00
uploads.Wait()
2017-04-01 11:17:04 +00:00
err = client.Done(context.Background(), work.ID, state)
2017-03-16 10:14:02 +00:00
if err != nil {
2017-04-01 11:17:04 +00:00
log.Printf("Pipeine: error signaling pipeline done: %s: %s", work.ID, err)
2017-03-16 10:14:02 +00:00
}
2017-03-16 10:14:02 +00:00
return nil
}