Merge branch 'main' into main

This commit is contained in:
Ekin Barut 2022-04-04 11:55:16 +03:00 committed by GitHub
commit 12c7a410d1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
63 changed files with 752 additions and 713 deletions

View File

@ -2,7 +2,7 @@ name: "Dagger CI"
on:
push:
branches: [main]
branches: [ main ]
paths:
- '**.sh'
- '**.bash'
@ -14,7 +14,7 @@ on:
- 'go.sum'
- '.github/workflows/dagger-ci.yml'
pull_request:
branches: [main]
branches: [ main ]
paths:
- '**.sh'
- '**.bash'
@ -33,11 +33,10 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
-
name: Checkout
- name: Checkout
uses: actions/checkout@v2
-
name: Dagger CI
- name: Dagger CI
uses: dagger/dagger-for-github@v2
with:
workdir: ci

View File

@ -2,7 +2,7 @@ name: "Test Integration"
on:
push:
branches: [main]
branches: [ main ]
paths:
- "**.sh"
- "**.bash"
@ -16,7 +16,7 @@ on:
- "!docs/**"
pull_request:
branches: [main]
branches: [ main ]
paths:
- "**.sh"
- "**.bash"
@ -67,9 +67,6 @@ jobs:
- name: Test
env:
DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
# TODO: https://github.com/dagger/dagger/pull/1341
# DAGGER_CACHE_TO: "type=gha,mode=max,scope=test-integration"
# DAGGER_CACHE_FROM: "type=gha,mode=max,scope=test-integration"
run: |
env
make core-integration
make core-integration

View File

@ -57,8 +57,5 @@ jobs:
uses: crazy-max/ghaction-github-runtime@v1
- name: Test
env:
DAGGER_CACHE_TO: "type=gha,mode=max,scope=test-universe"
DAGGER_CACHE_FROM: "type=gha,mode=max,scope=test-universe"
run: |
make universe-test

29
CODEOWNERS Normal file
View File

@ -0,0 +1,29 @@
# CODEOWNERS is a tool to encode PR approval rules.
#
# When a PR is opened, at least one code owner is required to approve it
# before being merged.
#
# It does **not**:
#
# - Limit reviewers: Everyone is welcome and encouraged to review any PR.
# But at least one CODEOWNER must approve before merging.
#
# - Limit contributions or ownership: Every maintainer is responsible for
# the entire project. CODEOWNERs are there to review PRs for
# consistency.
#
# By default, any maintainer can approve any PR. There's a couple of
# exceptions for consistency/specialty.
# Default owners for everything in the repo
# Later matches takes precedence
* @dagger/maintainers
# Core CUE API
/pkg/dagger.io/ @helderco @shykes
# Universe
/pkg/universe.dagger.io/ @helderco @shykes
# Documentation website
/website/ @slumbering

View File

@ -93,4 +93,4 @@ web: # Run the website locally
.PHONY: todo
todo: # Find all TODO items
grep -r -A 1 "TODO:" $(CURDIR)
grep -r -A 1 "TODO:" $(CURDIR)

View File

@ -7,7 +7,7 @@ import (
"strings"
"sync"
"github.com/containerd/containerd/platforms"
"github.com/google/uuid"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
@ -18,7 +18,6 @@ import (
// buildkit
bk "github.com/moby/buildkit/client"
_ "github.com/moby/buildkit/client/connhelper/dockercontainer" // import the container connection driver
"github.com/moby/buildkit/client/llb"
bkgw "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/session"
@ -72,7 +71,7 @@ func New(ctx context.Context, host string, cfg Config) (*Client, error) {
}, nil
}
type DoFunc func(context.Context, solver.Solver) error
type DoFunc func(context.Context, *solver.Solver) error
// FIXME: return completed *Route, instead of *compiler.Value
func (c *Client) Do(ctx context.Context, pctx *plancontext.Context, fn DoFunc) error {
@ -96,6 +95,19 @@ func (c *Client) Do(ctx context.Context, pctx *plancontext.Context, fn DoFunc) e
return eg.Wait()
}
func convertCacheOptionEntries(ims []bk.CacheOptionsEntry) []bkgw.CacheOptionsEntry {
convertIms := []bkgw.CacheOptionsEntry{}
for _, im := range ims {
convertIm := bkgw.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
}
convertIms = append(convertIms, convertIm)
}
return convertIms
}
func (c *Client) buildfn(ctx context.Context, pctx *plancontext.Context, fn DoFunc, ch chan *bk.SolveStatus) error {
wg := sync.WaitGroup{}
@ -156,29 +168,31 @@ func (c *Client) buildfn(ctx context.Context, pctx *plancontext.Context, fn DoFu
resp, err := c.c.Build(ctx, opts, "", func(ctx context.Context, gw bkgw.Client) (*bkgw.Result, error) {
s := solver.New(solver.Opts{
Control: c.c,
Gateway: gw,
Events: eventsCh,
Auth: auth,
NoCache: c.cfg.NoCache,
Control: c.c,
Gateway: gw,
Events: eventsCh,
Auth: auth,
NoCache: c.cfg.NoCache,
CacheImports: convertCacheOptionEntries(opts.CacheImports),
})
// Close events channel
defer s.Stop()
// Compute output overlay
res := bkgw.NewResult()
if fn != nil {
if err := fn(ctx, s); err != nil {
err := fn(ctx, s)
if err != nil {
return nil, compiler.Err(err)
}
}
ref, err := s.Solve(ctx, llb.Scratch(), platforms.DefaultSpec())
if err != nil {
return nil, err
refs := s.References()
// Add functions layers
for _, ref := range refs {
res.AddRef(uuid.New().String(), ref)
}
}
res := bkgw.NewResult()
res.SetRef(ref)
return res, nil
}, buildCh)
if err != nil {

View File

@ -67,7 +67,7 @@ var doCmd = &cobra.Command{
Value: target.String(),
})
err = cl.Do(ctx, p.Context(), func(ctx context.Context, s solver.Solver) error {
err = cl.Do(ctx, p.Context(), func(ctx context.Context, s *solver.Solver) error {
return p.Do(ctx, target, s)
})

View File

@ -10,9 +10,9 @@ slug: /1001/install/
:::
## Option 1: Use Homebrew (Mac OS only)
## Option 1 (Mac OS and Linux): Use Homebrew
From your Mac OS terminal, run the following command:
From a terminal, run the following command:
```shell
brew install dagger/tap/dagger
@ -25,7 +25,7 @@ brew update
brew upgrade dagger
```
## Option 2: Run a shell script
## Option 2 (Mac OS and Linux): Run a shell script
From a terminal, run the following command:

View File

@ -1,5 +1,5 @@
---
slug: /
slug: /1200/local-dev
displayed_sidebar: europa
---
@ -209,13 +209,10 @@ It becomes even more obvious when the change is not as straightforward as knowin
<TabItem value="windows">
We assume that you have [curl](https://curl.se/windows/) installed.
If you do, you can install `dagger` with a few commands. From a powershell terminal, run:
From a powershell terminal, run:
```shell
curl https://dl.dagger.io/dagger/install.ps1 -OutFile install.ps1
./install.ps1
rm install.ps1
Invoke-WebRequest -UseBasicParsing -Uri https://dl.dagger.io/dagger/install.ps1 | Invoke-Expression
```
We try to move the dagger binary under `C:\Windows\System32` but

View File

@ -0,0 +1,47 @@
---
slug: /1220/vs
displayed_sidebar: europa
---
# Dagger vs. Other Software
## Dagger vs. CI (Github Actions, Gitlab, CircleCI, Jenkins, etc.)
Dagger does not replace your CI: it improves it by adding a portable development layer on top of it.
* Dagger runs on all major CI products. This *reduces CI lock-in*: you can change CI without rewriting all your pipelines.
* Dagger also runs on your dev machine. This allows *dev/CI parity*: the same pipelines can be used in CI and development.
## Dagger vs. PaaS (Heroku, Firebase, etc.)
Dagger is not a PaaS, but you can use it to add PaaS-like features to your CICD pipelines:
* A simple deployment abstraction for the developer
* A catalog of possible customizations, managed by the platform team
* On-demand staging or development environments
Using Dagger is a good way to get many of the benefits of a PaaS (developer productivity and peace of mind),
without giving up the benefits of custom CICD pipelines (full control over your infrastructure and tooling).
## Dagger vs. artisanal deploy scripts
Most applications have a custom deploy script that usually gets the job done, but is painful to change and troubleshoot.
Using Dagger, you have two options:
1. You can *replace* your script with a DAG that is better in every way: more features, more reliable, faster, easier to read, improve, and debug.
2. You can *extend* your script by wrapping it, as-is, into a DAG. This allows you to start using Dagger right away, and worry about rewrites later.
## Dagger vs. Infrastructure as Code (Terraform, Pulumi, Cloudformation, CDK)
Dagger is the perfect complement to an IaC tool.
* IaC tools help infrastructure teams answer questions like: what is the current state of my infrastructure? What is its desired state? And how do I get there?
* Dagger helps CICD teams answer question like: what work needs to be done to deliver my application, in what order, and how do I orchestrate it?
It is very common for a Dagger configuration to integrate with at least one IaC tool.
## Dagger vs. Build Systems (Make, Maven, Bazel, Npm/Yarn, Docker Build, etc.)
Dagger is complementary to build systems. Most Dagger configurations involve integrating with at least one specialized build.
If several build systems are involved, Dagger helps integrate them into a unified graph.

View File

@ -0,0 +1,15 @@
---
slug: /
displayed_sidebar: europa
---
# Getting Started
```mdx-code-block
import DocCardList from '@theme/DocCardList';
import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
Run your CI/CD pipelines locally, then easily integrate them with any CI environment.
<DocCardList items={useCurrentSidebarCategory().items}/>
```

View File

@ -28,6 +28,12 @@ common_setup() {
export DAGGER_SANDBOX
dagger init --project "$DAGGER_SANDBOX"
if [ -n "$GITHUB_ACTIONS" ];
then
export DAGGER_CACHE_TO="type=gha,mode=max,scope=docs-tests-$BATS_TEST_NAME"
export DAGGER_CACHE_FROM="type=gha,scope=docs-tests-$BATS_TEST_NAME"
fi
# allows the use of `sops`
SOPS_AGE_KEY_FILE=~/.config/dagger/keys.txt
export SOPS_AGE_KEY_FILE

View File

@ -1,5 +1,5 @@
// ...
// A plan has pre-requisited that we cover below.
// A plan has pre-requisites that we cover below.
// For now we focus on the dagger.#Plan structure.
// ...

View File

@ -15,7 +15,7 @@ The production setup is a multi-node Docker Swarm cluster running on AWS.
The Particubes team chose Dagger for continuous deployment because it was the easiest way of integrating GitHub with Docker Swarm.
Every commit to the main branch goes straight to [docs.particubes.com](https://docs.particubes.com) via a Dagger pipeline that runs in GitHub Actions. Let us see how the Particubes Dagger plan fits together.
### Actions API
## Actions API
This is a high level overview of all actions in the Particubes docs Dagger plan:
@ -23,7 +23,7 @@ This is a high level overview of all actions in the Particubes docs Dagger plan:
We can see all available actions in a Plan by running the following command:
```bash
```console
$ dagger do
Execute a dagger action.
@ -34,7 +34,7 @@ Available Actions:
deploy Deploy a container image
```
### Client API
## Client API
Dagger actions usually need to interact with the host environment where the Dagger client runs. The Particubes' plan uses environment variables and the filesystem.
@ -45,20 +45,22 @@ This is an overview of all client interactions for this plan:
This is what the above looks like in the Dagger plan config:
```cue file=../tests/use-cases/go-docker-swarm/client-api.cue.fragment
```
### The *build* Action
## The `build` Action
This is a more in-depth overview of the *build* action and how it interacts with the client in the Particubes docs Dagger plan:
This is a more in-depth overview of the _build_ action and how it interacts with the client in the Particubes docs Dagger plan:
![build action](/img/use-cases/build-action.png)
This is what the above looks like in the Dagger plan config:
```cue file=../tests/use-cases/go-docker-swarm/build-action.cue.fragment
```
### Github Action integration
## Github Action integration
This is the GitHub Actions workflow config that invokes `dagger`, which in turn runs the full plan:
@ -102,16 +104,17 @@ dagger do
This is the first step that enabled the Particubes team to have the same CI/CD experience everywhere.
### Full Particubes docs Dagger plan
## Full Particubes docs Dagger plan
This is the entire plan running on Particubes' CI:
```cue file=../tests/use-cases/go-docker-swarm/full/particubes.docs.cue
```
### What comes next ?
## What comes next ?
Particubes' team suggested that we create a `dev` action with *hot reload*, that way Dagger would even asbtract away the ramp-up experience when developing the doc
Particubes' team suggested that we create a `dev` action with _hot reload_, that way Dagger would even asbtract away the ramp-up experience when developing the doc
:::tip
The latest version of this pipeline can be found at [github.com/voxowl/particubes/pull/144](https://github.com/voxowl/particubes/blob/2af173596729929cfb7a7a1f78f1ec0d8b685e5e/lua-docs/docs.cue)

View File

@ -76,6 +76,10 @@ import "dagger.io/dagger"
source: string | *"/"
// Destination path (optional)
dest: string | *"/"
// Optionally exclude certain files
include: [...string]
// Optionally include certain files
exclude: [...string]
// Output of the operation
output: dagger.#FS
}

View File

@ -46,7 +46,7 @@ This table compares Dagger core packages, Dagger Universe packages, and the over
### Docker API
*Import path: [`universe.dagger.io/docker`](./universe/docker)*
*Import path: [`universe.dagger.io/docker`](./docker)*
The `docker` package is a native Cue API for Docker. You can use it to build, run, push and pull Docker containers directly from Cue.

View File

@ -15,6 +15,13 @@ common_setup() {
DAGGER_LOG_FORMAT="plain"
export DAGGER_LOG_FORMAT
export DAGGER_LOG_LEVEL="debug"
if [ -n "$GITHUB_ACTIONS" ];
then
export DAGGER_CACHE_TO="type=gha,mode=max,scope=universe-tests-$BATS_TEST_NAME"
export DAGGER_CACHE_FROM="type=gha,scope=universe-tests-$BATS_TEST_NAME"
fi
# cd into the directory containing the bats file
cd "$BATS_TEST_DIRNAME" || exit 1
}

View File

@ -45,6 +45,10 @@ import (
contents: dagger.#FS
source: string | *"/"
dest: string | *"/"
// Optionally exclude certain files
include: [...string]
// Optionally include certain files
exclude: [...string]
// Execute copy operation
_copy: core.#Copy & {
@ -52,6 +56,8 @@ import (
"contents": contents
"source": source
"dest": dest
"include": include
"exclude": exclude
}
output: #Image & {

View File

@ -1,7 +1,7 @@
{
"license": "Apache-2.0",
"scripts": {
"test": "bats --report-formatter junit --jobs 4 $(find . -type f -name '*.bats' -not -path '*/node_modules/*')"
"test": "bats --report-formatter junit --print-output-on-failure --jobs 4 $(find . -type f -name '*.bats' -not -path '*/node_modules/*')"
},
"devDependencies": {
"bats": "^1.5.0",

View File

@ -0,0 +1,24 @@
package rawkode_pulumi_example
import (
"dagger.io/dagger"
"universe.dagger.io/x/david@rawkode.dev/pulumi"
)
dagger.#Plan & {
client: {
filesystem: "./": read: contents: dagger.#FS
env: {
PULUMI_ACCESS_TOKEN: dagger.#Secret
// If not using Pulumi SaaS, use CONFIG_PASSPHRASE
// PULUMI_CONFIG_PASSPHRASE: dagger.#Secret
}
}
actions: rawkode: pulumi.#Up & {
stack: "test"
stackCreate: true
runtime: "nodejs"
accessToken: client.env.PULUMI_ACCESS_TOKEN
source: client.filesystem."./".read.contents
}
}

View File

@ -0,0 +1,80 @@
// Run a Pulumi program
package pulumi
import (
"dagger.io/dagger"
"dagger.io/dagger/core"
"universe.dagger.io/docker"
"universe.dagger.io/bash"
)
// Run a `pulumi up`
#Up: {
// Source code of Pulumi program
source: dagger.#FS
// Pulumi version
version: string | *"latest"
// Pulumi runtime used for this Pulumi program
runtime: "dotnet" | "go" | "nodejs" | "python"
// Name of your Pulumi stack
// Example: "production"
stack: string
// Create the stack if it doesn't exist
stackCreate: *false | true
// API token if you want to use Pulumi SaaS state backend
accessToken?: dagger.#Secret
// Passphrase if you want to use local state backend (Cached by Dagger in buildkit)
passphrase?: dagger.#Secret
// Build a docker image to run the netlify client
_pull_image: docker.#Pull & {
source: "pulumi/pulumi-\(runtime):\(version)"
}
// Run Pulumi up
container: bash.#Run & {
input: *_pull_image.output | docker.#Image
script: {
_load: core.#Source & {
path: "."
include: ["*.sh"]
}
directory: _load.output
filename: "up.sh"
}
env: {
PULUMI_STACK: stack
PULUMI_RUNTIME: runtime
if true == stackCreate {
PULUMI_STACK_CREATE: "1"
}
if passphrase != _|_ {
PULUMI_CONFIG_PASSPHRASE: passphrase
}
if accessToken != _|_ {
PULUMI_ACCESS_TOKEN: accessToken
}
}
workdir: "/src"
mounts: {
src: {
dest: "/src"
contents: source
}
node_modules: {
dest: "/src/node_modules"
contents: core.#CacheDir & {
id: "pulumi-npm-cache"
}
}
}
}
}

View File

@ -0,0 +1,40 @@
#!/usr/bin/env bash
set -xeo pipefail
if test -v PULUMI_CONFIG_PASSPHRASE || test -v PULUMI_CONFIG_PASSPHRASE_FILE; then
echo "PULUMI_CONFIG_PASSPHRASE is set, using a local login"
pulumi login --local
fi
# Using Pulumi SaaS
# We need to check for an existing stack with the name
# If it exists, refresh the config
# If it doesn't, create the stack
if test -v PULUMI_ACCESS_TOKEN; then
if (pulumi stack ls | grep -e "^${STACK_NAME}"); then
echo "Stack exists, let's refresh"
pulumi stack select "${PULUMI_STACK}"
# Could be first deployment, so let's not worry about this failing
pulumi config refresh --force || true
else
echo "Stack does not exist, let's create"
pulumi stack init "${PULUMI_STACK}"
fi
else
# Not using Pulumi SaaS, relying on local stack files
if test -v PULUMI_STACK_CREATE && test ! -f "Pulumi.${PULUMI_STACK}.yaml"; then
pulumi stack init "${PULUMI_STACK}"
fi
fi
case "$PULUMI_RUNTIME" in
nodejs)
npm install
;;
*)
echo -n "unknown"
;;
esac
pulumi up --stack "${PULUMI_STACK}" --yes

View File

@ -168,7 +168,7 @@ func (p *Plan) prepare(ctx context.Context) error {
}
// Do executes an action in the plan
func (p *Plan) Do(ctx context.Context, path cue.Path, s solver.Solver) error {
func (p *Plan) Do(ctx context.Context, path cue.Path, s *solver.Solver) error {
ctx, span := otel.Tracer("dagger").Start(ctx, "plan.Up")
defer span.End()

View File

@ -22,13 +22,13 @@ import (
type Runner struct {
pctx *plancontext.Context
target cue.Path
s solver.Solver
s *solver.Solver
tasks sync.Map
mirror *compiler.Value
l sync.Mutex
}
func NewRunner(pctx *plancontext.Context, target cue.Path, s solver.Solver) *Runner {
func NewRunner(pctx *plancontext.Context, target cue.Path, s *solver.Solver) *Runner {
return &Runner{
pctx: pctx,
target: target,

View File

@ -23,7 +23,7 @@ func init() {
type clientCommandTask struct {
}
func (t clientCommandTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientCommandTask) Run(ctx context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
var opts struct {
Name string
Args []string

View File

@ -19,7 +19,7 @@ func init() {
type clientEnvTask struct {
}
func (t clientEnvTask) Run(ctx context.Context, pctx *plancontext.Context, _ solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientEnvTask) Run(ctx context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
log.Ctx(ctx).Debug().Msg("loading environment variables")
fields, err := v.Fields()

View File

@ -18,17 +18,23 @@ func init() {
Register("ClientFilesystemRead", func() Task { return &clientFilesystemReadTask{} })
}
type clientFilesystemReadTask struct {
}
type clientFilesystemReadTask struct{}
func (t clientFilesystemReadTask) PreRun(ctx context.Context, pctx *plancontext.Context, v *compiler.Value) error {
func (t clientFilesystemReadTask) PreRun(_ context.Context, pctx *plancontext.Context, v *compiler.Value) error {
path, err := t.parsePath(v)
if err != nil {
return err
}
if _, err := os.Stat(path); errors.Is(err, os.ErrNotExist) {
isFS := plancontext.IsFSValue(v.Lookup("contents"))
switch pi, err := os.Stat(path); {
case errors.Is(err, os.ErrNotExist):
return fmt.Errorf("path %q does not exist", path)
case !pi.IsDir() && isFS:
return fmt.Errorf("path %q is not a directory", path)
case pi.IsDir() && !isFS:
return fmt.Errorf("path %q cannot be a directory", path)
}
if plancontext.IsFSValue(v.Lookup("contents")) {
@ -38,7 +44,7 @@ func (t clientFilesystemReadTask) PreRun(ctx context.Context, pctx *plancontext.
return nil
}
func (t clientFilesystemReadTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientFilesystemReadTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
path, err := t.parsePath(v)
if err != nil {
return nil, err
@ -70,7 +76,7 @@ func (t clientFilesystemReadTask) parsePath(v *compiler.Value) (path string, err
return
}
func (t clientFilesystemReadTask) readContents(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value, path string) (interface{}, error) {
func (t clientFilesystemReadTask) readContents(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value, path string) (interface{}, error) {
lg := log.Ctx(ctx)
contents := v.Lookup("contents")
@ -97,7 +103,7 @@ func (t clientFilesystemReadTask) readContents(ctx context.Context, pctx *planco
return nil, fmt.Errorf("unsupported type %q", k)
}
func (t clientFilesystemReadTask) readFS(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value, path string) (*compiler.Value, error) {
func (t clientFilesystemReadTask) readFS(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value, path string) (*compiler.Value, error) {
var dir struct {
Include []string
Exclude []string

View File

@ -21,7 +21,7 @@ func init() {
type clientFilesystemWriteTask struct {
}
func (t clientFilesystemWriteTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientFilesystemWriteTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
path, err := v.Lookup("path").String()
if err != nil {
return nil, err
@ -39,7 +39,7 @@ func (t clientFilesystemWriteTask) Run(ctx context.Context, pctx *plancontext.Co
return compiler.NewValue(), nil
}
func (t clientFilesystemWriteTask) writeContents(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value, path string) error {
func (t clientFilesystemWriteTask) writeContents(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value, path string) error {
lg := log.Ctx(ctx)
contents := v.Lookup("contents")
@ -79,7 +79,7 @@ func (t clientFilesystemWriteTask) writeContents(ctx context.Context, pctx *plan
return fmt.Errorf("unsupported type %q", k)
}
func (t clientFilesystemWriteTask) writeFS(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value, path string) error {
func (t clientFilesystemWriteTask) writeFS(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value, path string) error {
contents, err := pctx.FS.FromValue(v)
if err != nil {
return err

View File

@ -20,7 +20,7 @@ func init() {
type clientNetwork struct {
}
func (t clientNetwork) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientNetwork) Run(ctx context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
addr, err := v.Lookup("address").String()

View File

@ -16,7 +16,7 @@ func init() {
type clientPlatformTask struct {
}
func (t clientPlatformTask) Run(ctx context.Context, pctx *plancontext.Context, _ solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t clientPlatformTask) Run(_ context.Context, _ *plancontext.Context, _ *solver.Solver, _ *compiler.Value) (*compiler.Value, error) {
return compiler.NewValue().FillFields(map[string]interface{}{
"os": runtime.GOOS,
"arch": runtime.GOARCH,

View File

@ -16,7 +16,7 @@ func init() {
type copyTask struct {
}
func (t *copyTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *copyTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
var err error
input, err := pctx.FS.FromValue(v.Lookup("input"))
@ -49,18 +49,31 @@ func (t *copyTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.
return nil, err
}
var filters struct {
Include []string
Exclude []string
}
if err := v.Decode(&filters); err != nil {
return nil, err
}
// FIXME: allow more configurable llb options
// For now we define the following convenience presets.
opts := &llb.CopyInfo{
CopyDirContentsOnly: true,
CreateDestPath: true,
AllowWildcard: true,
IncludePatterns: filters.Include,
ExcludePatterns: filters.Exclude,
}
outputState := inputState.File(
llb.Copy(
contentsState,
sourcePath,
destPath,
// FIXME: allow more configurable llb options
// For now we define the following convenience presets:
&llb.CopyInfo{
CopyDirContentsOnly: true,
CreateDestPath: true,
AllowWildcard: true,
},
opts,
),
withCustomName(v, "Copy %s %s", sourcePath, destPath),
)

View File

@ -20,7 +20,7 @@ func init() {
type decodeSecretTask struct {
}
func (c *decodeSecretTask) Run(ctx context.Context, pctx *plancontext.Context, _ solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *decodeSecretTask) Run(ctx context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
lg.Debug().Msg("decoding secret")

View File

@ -16,7 +16,7 @@ func init() {
type diffTask struct {
}
func (t diffTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *diffTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lowerFS, err := pctx.FS.FromValue(v.Lookup("lower"))
if err != nil {
return nil, err

View File

@ -28,7 +28,7 @@ func init() {
type dockerfileTask struct {
}
func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *dockerfileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
auths, err := v.Lookup("auth").Fields()
if err != nil {

View File

@ -20,7 +20,7 @@ func init() {
type execTask struct {
}
func (t execTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *execTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
// Get input state
input, err := pctx.FS.FromValue(v.Lookup("input"))
if err != nil {
@ -52,7 +52,7 @@ func (t execTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.S
})
}
func (t execTask) getRunOpts(v *compiler.Value, pctx *plancontext.Context) ([]llb.RunOption, error) {
func (t *execTask) getRunOpts(v *compiler.Value, pctx *plancontext.Context) ([]llb.RunOption, error) {
opts := []llb.RunOption{}
var cmd struct {
Args []string
@ -141,7 +141,7 @@ func (t execTask) getRunOpts(v *compiler.Value, pctx *plancontext.Context) ([]ll
return opts, nil
}
func (t execTask) mountAll(pctx *plancontext.Context, mounts *compiler.Value) ([]llb.RunOption, error) {
func (t *execTask) mountAll(pctx *plancontext.Context, mounts *compiler.Value) ([]llb.RunOption, error) {
opts := []llb.RunOption{}
fields, err := mounts.Fields()
if err != nil {
@ -165,7 +165,7 @@ func (t execTask) mountAll(pctx *plancontext.Context, mounts *compiler.Value) ([
return opts, err
}
func (t execTask) mount(pctx *plancontext.Context, dest string, mnt *compiler.Value) (llb.RunOption, error) {
func (t *execTask) mount(pctx *plancontext.Context, dest string, mnt *compiler.Value) (llb.RunOption, error) {
typ, err := mnt.Lookup("type").String()
if err != nil {
return nil, err

View File

@ -25,7 +25,7 @@ func init() {
type exportTask struct {
}
func (t exportTask) PreRun(ctx context.Context, pctx *plancontext.Context, v *compiler.Value) error {
func (t exportTask) PreRun(_ context.Context, pctx *plancontext.Context, v *compiler.Value) error {
dir, err := os.MkdirTemp("", "dagger-export-*")
if err != nil {
return err
@ -37,7 +37,7 @@ func (t exportTask) PreRun(ctx context.Context, pctx *plancontext.Context, v *co
return nil
}
func (t exportTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t exportTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
dir := pctx.TempDirs.Get(v.Path().String())

View File

@ -19,7 +19,7 @@ func init() {
type gitPullTask struct {
}
func (c gitPullTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *gitPullTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
var gitPull struct {
Remote string
Ref string

View File

@ -21,7 +21,7 @@ func init() {
type httpFetchTask struct {
}
func (c httpFetchTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *httpFetchTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
var httpFetch struct {
Source string
Checksum string

View File

@ -16,7 +16,7 @@ func init() {
type mergeTask struct {
}
func (t mergeTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *mergeTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
inputs, err := v.Lookup("inputs").List()
if err != nil {
return nil, err

View File

@ -18,7 +18,7 @@ func init() {
type mkdirTask struct {
}
func (t *mkdirTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *mkdirTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
path, err := v.Lookup("path").String()
if err != nil {
return nil, err

View File

@ -18,7 +18,7 @@ func init() {
type newSecretTask struct {
}
func (t *newSecretTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *newSecretTask) Run(_ context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
path, err := v.Lookup("path").String()
if err != nil {
return nil, err

View File

@ -15,6 +15,6 @@ func init() {
type nopTask struct {
}
func (t *nopTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *nopTask) Run(_ context.Context, _ *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
return v, nil
}

View File

@ -19,7 +19,7 @@ func init() {
type pullTask struct {
}
func (c *pullTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *pullTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
rawRef, err := v.Lookup("source").String()
@ -68,8 +68,8 @@ func (c *pullTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.
if err != nil {
return nil, err
}
fs := pctx.FS.New(result)
fs := pctx.FS.New(result)
return compiler.NewValue().FillFields(map[string]interface{}{
"output": fs.MarshalCUE(),
"digest": digest,

View File

@ -19,7 +19,7 @@ func init() {
type pushTask struct {
}
func (c *pushTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *pushTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
rawDest, err := v.Lookup("dest").String()

View File

@ -18,7 +18,7 @@ func init() {
type readFileTask struct {
}
func (t *readFileTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *readFileTask) Run(_ context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
path, err := v.Lookup("path").String()
if err != nil {
return nil, err

View File

@ -21,7 +21,7 @@ func init() {
type sourceTask struct {
}
func (c *sourceTask) PreRun(ctx context.Context, pctx *plancontext.Context, v *compiler.Value) error {
func (c *sourceTask) PreRun(_ context.Context, pctx *plancontext.Context, v *compiler.Value) error {
origPath, err := v.Lookup("path").String()
if err != nil {
return err
@ -50,7 +50,7 @@ func (c *sourceTask) PreRun(ctx context.Context, pctx *plancontext.Context, v *c
return nil
}
func (c *sourceTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *sourceTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
path, err := v.Lookup("path").AbsPath()

View File

@ -40,7 +40,7 @@ const (
type NewFunc func() Task
type Task interface {
Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error)
Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error)
}
type PreRunner interface {

View File

@ -20,7 +20,7 @@ func init() {
type transformSecretTask struct {
}
func (c *transformSecretTask) Run(ctx context.Context, pctx *plancontext.Context, _ solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (c *transformSecretTask) Run(ctx context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
lg := log.Ctx(ctx)
lg.Debug().Msg("transforming secret")

View File

@ -16,7 +16,7 @@ func init() {
type trimSecretTask struct {
}
func (t *trimSecretTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *trimSecretTask) Run(_ context.Context, pctx *plancontext.Context, _ *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
input, err := pctx.Secrets.FromValue(v.Lookup("input"))
if err != nil {
return nil, err

View File

@ -19,7 +19,7 @@ func init() {
type writeFileTask struct {
}
func (t *writeFileTask) Run(ctx context.Context, pctx *plancontext.Context, s solver.Solver, v *compiler.Value) (*compiler.Value, error) {
func (t *writeFileTask) Run(ctx context.Context, pctx *plancontext.Context, s *solver.Solver, v *compiler.Value) (*compiler.Value, error) {
var contents []byte
var err error
@ -49,19 +49,16 @@ func (t *writeFileTask) Run(ctx context.Context, pctx *plancontext.Context, s so
}
permissions, err := v.Lookup("permissions").Int64()
if err != nil {
return nil, err
}
input, err := pctx.FS.FromValue(v.Lookup("input"))
if err != nil {
return nil, err
}
inputState, err := input.State()
if err != nil {
return nil, err
}
@ -72,7 +69,6 @@ func (t *writeFileTask) Run(ctx context.Context, pctx *plancontext.Context, s so
)
result, err := s.Solve(ctx, outputState, pctx.Platform.Get())
if err != nil {
return nil, err
}
@ -80,7 +76,6 @@ func (t *writeFileTask) Run(ctx context.Context, pctx *plancontext.Context, s so
outputFS := pctx.FS.New(result)
output := compiler.NewValue()
if err := output.FillPath(cue.ParsePath("output"), outputFS.MarshalCUE()); err != nil {
return nil, err
}

View File

@ -25,19 +25,22 @@ type Solver struct {
opts Opts
eventsWg *sync.WaitGroup
closeCh chan *bk.SolveStatus
refs []bkgw.Reference
l sync.RWMutex
}
type Opts struct {
Control *bk.Client
Gateway bkgw.Client
Events chan *bk.SolveStatus
Context *plancontext.Context
Auth *RegistryAuthProvider
NoCache bool
Control *bk.Client
Gateway bkgw.Client
Events chan *bk.SolveStatus
Context *plancontext.Context
Auth *RegistryAuthProvider
NoCache bool
CacheImports []bkgw.CacheOptionsEntry
}
func New(opts Opts) Solver {
return Solver{
func New(opts Opts) *Solver {
return &Solver{
eventsWg: &sync.WaitGroup{},
closeCh: make(chan *bk.SolveStatus),
opts: opts,
@ -63,25 +66,25 @@ func invalidateCache(def *llb.Definition) error {
return nil
}
func (s Solver) GetOptions() Opts {
func (s *Solver) GetOptions() Opts {
return s.opts
}
func (s Solver) NoCache() bool {
func (s *Solver) NoCache() bool {
return s.opts.NoCache
}
func (s Solver) Stop() {
func (s *Solver) Stop() {
close(s.closeCh)
s.eventsWg.Wait()
close(s.opts.Events)
}
func (s Solver) AddCredentials(target, username, secret string) {
func (s *Solver) AddCredentials(target, username, secret string) {
s.opts.Auth.AddCredentials(target, username, secret)
}
func (s Solver) Marshal(ctx context.Context, st llb.State, co ...llb.ConstraintsOpt) (*bkpb.Definition, error) {
func (s *Solver) Marshal(ctx context.Context, st llb.State, co ...llb.ConstraintsOpt) (*bkpb.Definition, error) {
// FIXME: do not hardcode the platform
def, err := st.Marshal(ctx, co...)
if err != nil {
@ -97,11 +100,11 @@ func (s Solver) Marshal(ctx context.Context, st llb.State, co ...llb.Constraints
return def.ToPB(), nil
}
func (s Solver) SessionID() string {
func (s *Solver) SessionID() string {
return s.opts.Gateway.BuildOpts().SessionID
}
func (s Solver) ResolveImageConfig(ctx context.Context, ref string, opts llb.ResolveImageConfigOpt) (dockerfile2llb.Image, digest.Digest, error) {
func (s *Solver) ResolveImageConfig(ctx context.Context, ref string, opts llb.ResolveImageConfigOpt) (dockerfile2llb.Image, digest.Digest, error) {
var image dockerfile2llb.Image
// Load image metadata and convert to to LLB.
@ -119,7 +122,7 @@ func (s Solver) ResolveImageConfig(ctx context.Context, ref string, opts llb.Res
}
// Solve will block until the state is solved and returns a Reference.
func (s Solver) SolveRequest(ctx context.Context, req bkgw.SolveRequest) (*bkgw.Result, error) {
func (s *Solver) SolveRequest(ctx context.Context, req bkgw.SolveRequest) (*bkgw.Result, error) {
// makes Solve() to block until LLB graph is solved. otherwise it will
// return result (that you can for example use for next build) that
// will be evaluated on export or if you access files on it.
@ -131,9 +134,15 @@ func (s Solver) SolveRequest(ctx context.Context, req bkgw.SolveRequest) (*bkgw.
return res, nil
}
func (s *Solver) References() []bkgw.Reference {
s.l.RLock()
defer s.l.RUnlock()
return s.refs
}
// Solve will block until the state is solved and returns a Reference.
// It takes a platform as argument which correspond to the targeted platform.
func (s Solver) Solve(ctx context.Context, st llb.State, platform specs.Platform) (bkgw.Reference, error) {
func (s *Solver) Solve(ctx context.Context, st llb.State, platform specs.Platform) (bkgw.Reference, error) {
def, err := s.Marshal(ctx, st, llb.Platform(platform))
if err != nil {
return nil, err
@ -152,19 +161,29 @@ func (s Solver) Solve(ctx context.Context, st llb.State, platform specs.Platform
// call solve
res, err := s.SolveRequest(ctx, bkgw.SolveRequest{
Definition: def,
Definition: def,
CacheImports: s.opts.CacheImports,
})
if err != nil {
return nil, err
}
return res.SingleRef()
ref, err := res.SingleRef()
if err != nil {
return nil, err
}
s.l.Lock()
defer s.l.Unlock()
s.refs = append(s.refs, ref)
return ref, nil
}
// Forward events from solver to the main events channel
// It creates a task in the solver waiting group to be
// sure that everything will be forward to the main channel
func (s Solver) forwardEvents(ch chan *bk.SolveStatus) {
func (s *Solver) forwardEvents(ch chan *bk.SolveStatus) {
s.eventsWg.Add(1)
defer s.eventsWg.Done()
@ -177,7 +196,7 @@ func (s Solver) forwardEvents(ch chan *bk.SolveStatus) {
// FIXME: this is currently implemented as a hack, starting a new Build session
// within buildkit from the Control API. Ideally the Gateway API should allow to
// Export directly.
func (s Solver) Export(ctx context.Context, st llb.State, img *dockerfile2llb.Image, output bk.ExportEntry, platform specs.Platform) (*bk.SolveResponse, error) {
func (s *Solver) Export(ctx context.Context, st llb.State, img *dockerfile2llb.Image, output bk.ExportEntry, platform specs.Platform) (*bk.SolveResponse, error) {
// Check close event channel and return if we're already done with the main pipeline
select {
case <-s.closeCh:

View File

@ -13,6 +13,13 @@ common_setup() {
DAGGER_TELEMETRY_DISABLE="1"
export DAGGER_TELEMETRY_DISABLE
export DAGGER_LOG_LEVEL="debug"
if [ -n "$GITHUB_ACTIONS" ];
then
export DAGGER_CACHE_TO="type=gha,mode=max,scope=integration-tests-$BATS_TEST_NAME"
export DAGGER_CACHE_FROM="type=gha,scope=integration-tests-$BATS_TEST_NAME"
fi
SOPS_AGE_KEY_FILE=~/.config/dagger/keys.txt
export SOPS_AGE_KEY_FILE
}

View File

@ -1,7 +1,7 @@
{
"license": "Apache-2.0",
"scripts": {
"test": "bats --jobs 4 --show-output-of-passing-tests --print-output-on-failure ."
"test": "bats --jobs 4 --print-output-on-failure --verbose-run ."
},
"devDependencies": {
"bats": "https://github.com/bats-core/bats-core#master",

View File

@ -84,6 +84,24 @@ setup() {
assert_output --partial 'path "/foobar" does not exist'
}
@test "plan/client/filesystem/read/fs/invalid_fs_input" {
cd "$TESTDIR/plan/client/filesystem/read/fs/invalid_fs_input"
run "$DAGGER" "do" -p . test
assert_failure
assert_output --partial 'test.txt" is not a directory'
}
@test "plan/client/filesystem/read/fs/invalid_fs_type" {
cd "$TESTDIR/plan/client/filesystem/read/fs/invalid_fs_type"
run "$DAGGER" "do" -p . test
assert_failure
assert_output --partial 'rootfs" cannot be a directory'
}
@test "plan/client/filesystem/read/fs/relative" {
cd "$TESTDIR/plan/client/filesystem/read/fs/relative"
@ -144,7 +162,7 @@ setup() {
cd "$TESTDIR/plan/client/filesystem/conflict"
echo -n foo > test.txt
run "$DAGGER" "do" --log-level debug -p . test
run "$DAGGER" "do" -p . test
assert_line --regexp "client\.filesystem\..+\.write.+dependency=client\.filesystem\..+\.read"
rm -f test.txt

View File

@ -0,0 +1,12 @@
package main
import (
"dagger.io/dagger"
)
dagger.#Plan & {
// Reading a file into a dagger.#FS should not be possbile
client: filesystem: "../rootfs/test.txt": read: contents: dagger.#FS
actions: test: {
}
}

View File

@ -0,0 +1,21 @@
package main
import (
"dagger.io/dagger"
"dagger.io/dagger/core"
)
dagger.#Plan & {
// Reading a directory into a non-fs should fail
client: filesystem: "../rootfs": read: contents: string
actions: {
image: core.#Pull & {
source: "alpine:3.15.0@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3"
}
test: core.#Exec & {
input: image.output
args: ["test", client.filesystem."../rootfs".read.contents]
}
}
}

View File

@ -25,6 +25,6 @@ setup() {
test -f ./cue.mod/pkg/.gitattributes
run cat ./cue.mod/pkg/.gitattributes
assert_output --partial "generated by dagger"
test ! -f ./cue.mod/pkg/.gitignore
}

View File

@ -47,12 +47,6 @@ module.exports = {
className: "header-github-link hide-target-icon",
"aria-label": "GitHub repository",
},
{
position: "right",
label: "Schedule a demo",
href: "https://savvycal.com/dagger/meet",
className: "button",
},
],
hideOnScroll: true,
},

View File

@ -17,10 +17,10 @@
},
"dependencies": {
"@docusaurus/core": "^2.0.0-beta.18",
"@docusaurus/preset-classic": "^2.0.0-beta.17",
"@docusaurus/preset-classic": "^2.0.0-beta.18",
"@mdx-js/react": "^1.6.22",
"@svgr/webpack": "^6.2.1",
"amplitude-js": "^8.17.0",
"amplitude-js": "^8.18.0",
"clsx": "^1.1.1",
"docusaurus-plugin-sass": "^0.2.2",
"docusaurus2-dotenv": "^1.4.0",
@ -31,7 +31,7 @@
"react-dom": "^17.0.1",
"react-social-login-buttons": "^3.6.0",
"remark-code-import": "^0.4.0",
"sass": "^1.49.9",
"sass": "^1.49.10",
"url-loader": "^4.1.1"
},
"browserslist": {

View File

@ -79,14 +79,11 @@ module.exports = {
type: "category",
label: "Getting Started",
collapsible: false,
collapsed: false,
link: {
type: 'generated-index',
title: 'Getting Started',
description:
"Run your CI/CD pipelines locally, then easily integrate them with any CI environment.",
type: 'doc',
id: 'getting-started/index'
},
items: ["getting-started/local-dev", "getting-started/ci-environment"],
items: ["getting-started/local-dev", "getting-started/ci-environment", "getting-started/vs"],
},
{
type: "category",

View File

@ -101,7 +101,9 @@ h2 {
line-height: 32px;
}
code {
code,
.table-of-contents__link code,
.table-of-contents__link:hover code {
margin: 0 1px;
color: var(--ifm-code-color);
.alert & {
@ -124,7 +126,7 @@ code {
--ifm-h3-vertical-rhythm-top: 2;
}
a {
a:not(.card) {
font-weight: bold;
text-decoration: underline;
color: var(--ifm-color-primary-dark);
@ -279,9 +281,9 @@ h1[class^="h1Heading"] {
line-height: 36px;
@include desktop {
width: 48px;
span {
display: none;
}
text-indent: 9999px;
white-space: nowrap;
overflow: hidden;
}
}
@ -310,15 +312,6 @@ h1[class^="h1Heading"] {
}
/* sidebar */
@include desktop {
aside[class^="docSidebarContainer"] {
width: 340px;
div[class^="sidebar"] {
width: 340px;
}
}
}
a[class^="sidebarLogo"] {
img {
@ -341,7 +334,7 @@ a[class^="sidebarLogo"] {
main[class^="docMainContainer"] {
background: #ffffff;
padding: 2rem 2rem 2rem 5rem;
padding: 2rem;
@include tablet {
padding: 2rem;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 3.8 KiB

After

Width:  |  Height:  |  Size: 4.7 KiB

File diff suppressed because it is too large Load Diff