Improve tty error logging when buildkit vertex is unknown (#2188)
* Improve tty error logging when buildkit vertex is unknown Creates a generic "system" group in the tty output which captures buildkit events that report a non-dagger vertex name. This happens currently when using core.#Dockerfile actions since Dagger delegates the LLB generation to buildkit through it's frontend and we don't get meaningful events that we can correlate from Dagger's side Signed-off-by: Marcos Lilljedahl <marcosnils@gmail.com>
This commit is contained in:
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"go.dagger.io/dagger/compiler"
|
||||
|
||||
"go.dagger.io/dagger/plan/task"
|
||||
"go.dagger.io/dagger/plancontext"
|
||||
"go.dagger.io/dagger/solver"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
"cuelang.org/go/cue"
|
||||
cueflow "cuelang.org/go/tools/flow"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"go.opentelemetry.io/otel"
|
||||
)
|
||||
@@ -124,6 +126,15 @@ func (r *Runner) shouldRun(p cue.Path) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
func taskLog(tp string, log *zerolog.Logger, t task.Task, fn func(lg zerolog.Logger)) {
|
||||
fn(log.With().Str("task", tp).Logger())
|
||||
// setup logger here
|
||||
_, isDockerfileTask := t.(*task.DockerfileTask)
|
||||
if isDockerfileTask {
|
||||
fn(log.With().Str("task", "system").Logger())
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
|
||||
v := compiler.Wrap(flowVal)
|
||||
handler, err := task.Lookup(v)
|
||||
@@ -142,31 +153,45 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
|
||||
// Wrapper around `task.Run` that handles logging, tracing, etc.
|
||||
return cueflow.RunnerFunc(func(t *cueflow.Task) error {
|
||||
ctx := t.Context()
|
||||
lg := log.Ctx(ctx).With().Str("task", t.Path().String()).Logger()
|
||||
taskPath := t.Path().String()
|
||||
lg := log.Ctx(ctx).With().Logger()
|
||||
ctx = lg.WithContext(ctx)
|
||||
ctx, span := otel.Tracer("dagger").Start(ctx, fmt.Sprintf("up: %s", t.Path().String()))
|
||||
defer span.End()
|
||||
|
||||
lg.Info().Str("state", string(task.StateComputing)).Msg(string(task.StateComputing))
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Info().Str("state", task.StateComputing.String()).Msg(task.StateComputing.String())
|
||||
})
|
||||
|
||||
// Debug: dump dependencies
|
||||
for _, dep := range t.Dependencies() {
|
||||
lg.Debug().Str("dependency", dep.Path().String()).Msg("dependency detected")
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Debug().Str("dependency", dep.Path().String()).Msg("dependency detected")
|
||||
})
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
result, err := handler.Run(ctx, r.pctx, r.s, compiler.Wrap(t.Value()))
|
||||
if err != nil {
|
||||
// FIXME: this should use errdefs.IsCanceled(err)
|
||||
|
||||
// we don't wrap taskLog here since in some cases, actions could still be
|
||||
// running in goroutines which will scramble outputs.
|
||||
if strings.Contains(err.Error(), "context canceled") {
|
||||
lg.Error().Dur("duration", time.Since(start)).Str("state", string(task.StateCanceled)).Msg(string(task.StateCanceled))
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Error().Dur("duration", time.Since(start)).Str("state", task.StateCanceled.String()).Msg(task.StateCanceled.String())
|
||||
})
|
||||
} else {
|
||||
lg.Error().Dur("duration", time.Since(start)).Err(compiler.Err(err)).Str("state", string(task.StateFailed)).Msg(string(task.StateFailed))
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Error().Dur("duration", time.Since(start)).Err(compiler.Err(err)).Str("state", task.StateFailed.String()).Msg(task.StateFailed.String())
|
||||
})
|
||||
}
|
||||
return fmt.Errorf("%s: %w", t.Path().String(), compiler.Err(err))
|
||||
}
|
||||
|
||||
lg.Info().Dur("duration", time.Since(start)).Str("state", string(task.StateCompleted)).Msg(string(task.StateCompleted))
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Info().Dur("duration", time.Since(start)).Str("state", task.StateCompleted.String()).Msg(task.StateCompleted.String())
|
||||
})
|
||||
|
||||
// If the result is not concrete (e.g. empty value), there's nothing to merge.
|
||||
if !result.IsConcrete() {
|
||||
@@ -174,7 +199,9 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
|
||||
}
|
||||
|
||||
if src, err := result.Source(); err == nil {
|
||||
lg.Debug().Str("result", string(src)).Msg("merging task result")
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Debug().Str("result", string(src)).Msg("merging task result")
|
||||
})
|
||||
}
|
||||
|
||||
// Mirror task result and re-scan tasks that should run.
|
||||
@@ -184,7 +211,9 @@ func (r *Runner) taskFunc(flowVal cue.Value) (cueflow.Runner, error) {
|
||||
// }
|
||||
|
||||
if err := t.Fill(result.Cue()); err != nil {
|
||||
lg.Error().Err(err).Msg("failed to fill task")
|
||||
taskLog(taskPath, &lg, handler, func(lg zerolog.Logger) {
|
||||
lg.Error().Err(err).Msg("failed to fill task")
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
|
@@ -22,13 +22,12 @@ import (
|
||||
)
|
||||
|
||||
func init() {
|
||||
Register("Dockerfile", func() Task { return &dockerfileTask{} })
|
||||
Register("Dockerfile", func() Task { return &DockerfileTask{} })
|
||||
}
|
||||
|
||||
type dockerfileTask struct {
|
||||
}
|
||||
type DockerfileTask struct{}
|
||||
|
||||
func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
|
||||
func (t *DockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
|
||||
lg := log.Ctx(ctx)
|
||||
auths, err := v.Lookup("auth").Fields()
|
||||
if err != nil {
|
||||
@@ -76,7 +75,7 @@ func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *
|
||||
}
|
||||
dockerfileDef, err = s.Marshal(ctx,
|
||||
llb.Scratch().File(
|
||||
llb.Mkfile("/Dockerfile", 0644, []byte(contents)),
|
||||
llb.Mkfile("/Dockerfile", 0o644, []byte(contents)),
|
||||
),
|
||||
)
|
||||
if err != nil {
|
||||
@@ -139,7 +138,7 @@ func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *
|
||||
})
|
||||
}
|
||||
|
||||
func (t *dockerfileTask) dockerBuildOpts(v *compiler.Value, pctx *plancontext.Context) (map[string]string, error) {
|
||||
func (t *DockerfileTask) dockerBuildOpts(v *compiler.Value, pctx *plancontext.Context) (map[string]string, error) {
|
||||
opts := map[string]string{}
|
||||
|
||||
if dockerfilePath := v.Lookup("dockerfile.path"); dockerfilePath.Exists() {
|
||||
|
@@ -28,13 +28,36 @@ var (
|
||||
)
|
||||
|
||||
// State is the state of the task.
|
||||
type State string
|
||||
type State int8
|
||||
|
||||
func (s State) String() string {
|
||||
return [...]string{"computing", "cancelled", "failed", "completed"}[s]
|
||||
}
|
||||
|
||||
func ParseState(s string) (State, error) {
|
||||
switch s {
|
||||
case "computing":
|
||||
return StateComputing, nil
|
||||
case "cancelled":
|
||||
return StateCanceled, nil
|
||||
case "failed":
|
||||
return StateFailed, nil
|
||||
case "completed":
|
||||
return StateCompleted, nil
|
||||
}
|
||||
|
||||
return -1, fmt.Errorf("invalid state [%s]", s)
|
||||
}
|
||||
|
||||
func (s State) CanTransition(t State) bool {
|
||||
return s == StateComputing && s <= t
|
||||
}
|
||||
|
||||
const (
|
||||
StateComputing = State("computing")
|
||||
StateCanceled = State("canceled")
|
||||
StateFailed = State("failed")
|
||||
StateCompleted = State("completed")
|
||||
StateComputing State = iota
|
||||
StateCanceled
|
||||
StateFailed
|
||||
StateCompleted
|
||||
)
|
||||
|
||||
type NewFunc func() Task
|
||||
|
Reference in New Issue
Block a user