Improve tty error logging when buildkit vertex is unknown (#2188)

* Improve tty error logging when buildkit vertex is unknown

Creates a generic "system" group in the tty output which captures
buildkit events that report a non-dagger vertex name. This happens
currently when using core.#Dockerfile actions since Dagger delegates the
LLB generation to buildkit through it's frontend and we don't get
meaningful events that we can correlate from Dagger's side

Signed-off-by: Marcos Lilljedahl <marcosnils@gmail.com>
This commit is contained in:
Marcos Nils 2022-04-18 21:50:34 -03:00 committed by GitHub
parent 437f4517b6
commit 27d878456f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 163 additions and 87 deletions

View File

@ -23,6 +23,8 @@ type PlainOutput struct {
Out io.Writer
}
const systemGroup = "system"
func (c *PlainOutput) Write(p []byte) (int, error) {
event := map[string]interface{}{}
d := json.NewDecoder(bytes.NewReader(p))
@ -117,7 +119,7 @@ func formatMessage(event map[string]interface{}) string {
}
func parseSource(event map[string]interface{}) string {
source := "system"
source := systemGroup
if task, ok := event["task"].(string); ok && task != "" {
source = task
}

View File

@ -43,8 +43,14 @@ func (l *Logs) Add(event Event) error {
l.l.Lock()
defer l.l.Unlock()
// same thing as in plain.go group all the non-identified tasks
// into a general group called system
source := systemGroup
taskPath, ok := event["task"].(string)
if !ok {
if ok && len(taskPath) > 0 {
source = taskPath
} else if !ok {
l.Messages = append(l.Messages, Message{
Event: event,
})
@ -53,7 +59,7 @@ func (l *Logs) Add(event Event) error {
}
// Hide hidden fields (e.g. `._*`) from log group names
groupKey := strings.Split(taskPath, "._")[0]
groupKey := strings.Split(source, "._")[0]
group := l.groups[groupKey]
@ -78,13 +84,21 @@ func (l *Logs) Add(event Event) error {
// For each task in a group, the status will transition from computing to complete, then back to computing and so on.
// The transition is fast enough not to cause a problem.
if st, ok := event["state"].(string); ok {
group.State = task.State(st)
if t, err := task.ParseState(st); err != nil {
return err
// concurrent "system" tasks are the only exception to transition
// from another state to failed since we need to show the error to
// the user
} else if group.State.CanTransition(t) ||
(group.Name == systemGroup && t == task.StateFailed) {
group.State = t
if group.State == task.StateComputing {
group.Completed = nil
} else {
now := time.Now()
group.Completed = &now
}
}
return nil
}
@ -262,6 +276,10 @@ func (c *TTYOutput) printLine(w io.Writer, event Event, width int) int {
func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int {
lineCount := 0
var out string
// treat the "system" group as a special case as we don't
// want it to be displayed as an action in the output
if group.Name != systemGroup {
prefix := ""
switch group.State {
case task.StateComputing:
@ -274,7 +292,7 @@ func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int {
prefix = "[✔]"
}
out := prefix + " " + group.Name
out = prefix + " " + group.Name
endTime := time.Now()
if group.Completed != nil {
@ -307,6 +325,7 @@ func (c *TTYOutput) printGroup(group *Group, width, maxLines int) int {
// Print
fmt.Fprint(c.cons, out)
lineCount++
}
printEvents := []Event{}
switch group.State {

View File

@ -8,6 +8,7 @@ import (
"time"
"go.dagger.io/dagger/compiler"
"go.dagger.io/dagger/plan/task"
"go.dagger.io/dagger/plancontext"
"go.dagger.io/dagger/solver"
@ -15,6 +16,7 @@ import (
"cuelang.org/go/cue"
cueflow "cuelang.org/go/tools/flow"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"go.opentelemetry.io/otel"
)
@ -124,6 +126,15 @@ func (r *Runner) shouldRun(p cue.Path) bool {
return ok
}
func taskLog(tp string, log *zerolog.Logger, t task.Task, fn func(lg zerolog.Logger)) {
fn(log.With().Str("task", tp).Logger())
// setup logger here
_, isDockerfileTask := t.(*task.DockerfileTask)
if isDockerfileTask {
fn(log.With().Str("task", "system").Logger())
}
}
func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
v := compiler.Wrap(flowVal)
handler, err := task.Lookup(v)
@ -142,31 +153,45 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
// Wrapper around `task.Run` that handles logging, tracing, etc.
return cueflow.RunnerFunc(func(t *cueflow.Task) error {
ctx := t.Context()
lg := log.Ctx(ctx).With().Str("task", t.Path().String()).Logger()
taskPath := t.Path().String()
lg := log.Ctx(ctx).With().Logger()
ctx = lg.WithContext(ctx)
ctx, span := otel.Tracer("dagger").Start(ctx, fmt.Sprintf("up: %s", t.Path().String()))
defer span.End()
lg.Info().Str("state", string(task.StateComputing)).Msg(string(task.StateComputing))
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Info().Str("state", task.StateComputing.String()).Msg(task.StateComputing.String())
})
// Debug: dump dependencies
for _, dep := range t.Dependencies() {
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Debug().Str("dependency", dep.Path().String()).Msg("dependency detected")
})
}
start := time.Now()
result, err := handler.Run(ctx, r.pctx, r.s, compiler.Wrap(t.Value()))
if err != nil {
// FIXME: this should use errdefs.IsCanceled(err)
// we don't wrap taskLog here since in some cases, actions could still be
// running in goroutines which will scramble outputs.
if strings.Contains(err.Error(), "context canceled") {
lg.Error().Dur("duration", time.Since(start)).Str("state", string(task.StateCanceled)).Msg(string(task.StateCanceled))
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Error().Dur("duration", time.Since(start)).Str("state", task.StateCanceled.String()).Msg(task.StateCanceled.String())
})
} else {
lg.Error().Dur("duration", time.Since(start)).Err(compiler.Err(err)).Str("state", string(task.StateFailed)).Msg(string(task.StateFailed))
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Error().Dur("duration", time.Since(start)).Err(compiler.Err(err)).Str("state", task.StateFailed.String()).Msg(task.StateFailed.String())
})
}
return fmt.Errorf("%s: %w", t.Path().String(), compiler.Err(err))
}
lg.Info().Dur("duration", time.Since(start)).Str("state", string(task.StateCompleted)).Msg(string(task.StateCompleted))
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Info().Dur("duration", time.Since(start)).Str("state", task.StateCompleted.String()).Msg(task.StateCompleted.String())
})
// If the result is not concrete (e.g. empty value), there's nothing to merge.
if !result.IsConcrete() {
@ -174,7 +199,9 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
}
if src, err := result.Source(); err == nil {
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Debug().Str("result", string(src)).Msg("merging task result")
})
}
// Mirror task result and re-scan tasks that should run.
@ -184,7 +211,9 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
// }
if err := t.Fill(result.Cue()); err != nil {
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
lg.Error().Err(err).Msg("failed to fill task")
})
return err
}

View File

@ -22,13 +22,12 @@ import (
)
func init() {
Register("Dockerfile", func() Task { return &dockerfileTask{} })
Register("Dockerfile", func() Task { return &DockerfileTask{} })
}
type dockerfileTask struct {
}
type DockerfileTask struct{}
func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *DockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
auths, err := v.Lookup("auth").Fields()
if err != nil {
@ -76,7 +75,7 @@ func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *
}
dockerfileDef, err = s.Marshal(ctx,
llb.Scratch().File(
llb.Mkfile("/Dockerfile", 0644, []byte(contents)),
llb.Mkfile("/Dockerfile", 0o644, []byte(contents)),
),
)
if err != nil {
@ -139,7 +138,7 @@ func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *
})
}
func (t *dockerfileTask) dockerBuildOpts(v *compiler.Value, pctx *plancontext.Context) (map[string]string, error) {
func (t *DockerfileTask) dockerBuildOpts(v *compiler.Value, pctx *plancontext.Context) (map[string]string, error) {
opts := map[string]string{}
if dockerfilePath := v.Lookup("dockerfile.path"); dockerfilePath.Exists() {

View File

@ -28,13 +28,36 @@ var (
)
// State is the state of the task.
type State string
type State int8
func (s State) String() string {
return [...]string{"computing", "cancelled", "failed", "completed"}[s]
}
func ParseState(s string) (State, error) {
switch s {
case "computing":
return StateComputing, nil
case "cancelled":
return StateCanceled, nil
case "failed":
return StateFailed, nil
case "completed":
return StateCompleted, nil
}
return -1, fmt.Errorf("invalid state [%s]", s)
}
func (s State) CanTransition(t State) bool {
return s == StateComputing && s <= t
}
const (
StateComputing = State("computing")
StateCanceled = State("canceled")
StateFailed = State("failed")
StateCompleted = State("completed")
StateComputing State = iota
StateCanceled
StateFailed
StateCompleted
)
type NewFunc func() Task

View File

@ -12,13 +12,11 @@ import (
"go.dagger.io/dagger/pkg"
)
var (
fsIDPath = cue.MakePath(
var fsIDPath = cue.MakePath(
cue.Str("$dagger"),
cue.Str("fs"),
cue.Hid("_id", pkg.DaggerPackage),
)
)
func IsFSValue(v *compiler.Value) bool {
return v.LookupPath(fsIDPath).Exists()

View File

@ -25,9 +25,11 @@ const (
defaultDisplayTimeout = 100 * time.Millisecond
)
type VertexPrintFunc func(v *client.Vertex, index int)
type StatusPrintFunc func(v *client.Vertex, format string, a ...interface{})
type LogPrintFunc func(v *client.Vertex, stream int, partial bool, format string, a ...interface{})
type (
VertexPrintFunc func(v *client.Vertex, index int)
StatusPrintFunc func(v *client.Vertex, format string, a ...interface{})
LogPrintFunc func(v *client.Vertex, stream int, partial bool, format string, a ...interface{})
)
func PrintSolveStatus(ctx context.Context, ch chan *client.SolveStatus, vertexPrintCb VertexPrintFunc, statusPrintCb StatusPrintFunc, logPrintCb LogPrintFunc) error {
printer := &textMux{
@ -148,8 +150,10 @@ func DisplaySolveStatus(ctx context.Context, phase string, c console.Console, w
}
}
const termHeight = 6
const termPad = 10
const (
termHeight = 6
termPad = 10
)
type displayInfo struct {
startTime time.Time

View File

@ -11,10 +11,12 @@ import (
"github.com/tonistiigi/units"
)
const antiFlicker = 5 * time.Second
const maxDelay = 10 * time.Second
const minTimeDelta = 5 * time.Second
const minProgressDelta = 0.05 // %
const (
antiFlicker = 5 * time.Second
maxDelay = 10 * time.Second
minTimeDelta = 5 * time.Second
minProgressDelta = 0.05 // %
)
type lastStatus struct {
Current int64