initial commit
commit
1fc075b141
@ -0,0 +1,14 @@
|
||||
workspace:
|
||||
base: /go
|
||||
path: src/github.com/drone/drone-runtime
|
||||
|
||||
pipeline:
|
||||
deps:
|
||||
image: golang:1.11
|
||||
commands:
|
||||
- go get -u github.com/golang/dep/cmd/dep
|
||||
- dep ensure
|
||||
test:
|
||||
image: golang:1.11
|
||||
commands:
|
||||
- go test -v --cover ./...
|
@ -0,0 +1,5 @@
|
||||
vendor
|
||||
release
|
||||
*.out
|
||||
*.txt
|
||||
.docker/config.json
|
@ -0,0 +1,13 @@
|
||||
1. Install go 1.9 or later
|
||||
2. Install go dep:
|
||||
|
||||
go get -u github.com/golang/dep/cmd/dep
|
||||
|
||||
3. Install dependencies:
|
||||
|
||||
dep ensure
|
||||
|
||||
4. Compile and test:
|
||||
|
||||
go install ./...
|
||||
go test ./...
|
@ -0,0 +1,570 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "docker.io/go-docker"
|
||||
packages = [
|
||||
".",
|
||||
"api",
|
||||
"api/types",
|
||||
"api/types/blkiodev",
|
||||
"api/types/container",
|
||||
"api/types/events",
|
||||
"api/types/filters",
|
||||
"api/types/image",
|
||||
"api/types/mount",
|
||||
"api/types/network",
|
||||
"api/types/registry",
|
||||
"api/types/strslice",
|
||||
"api/types/swarm",
|
||||
"api/types/swarm/runtime",
|
||||
"api/types/time",
|
||||
"api/types/versions",
|
||||
"api/types/volume"
|
||||
]
|
||||
revision = "b3f5b5de7bbce0acc6a7fc0a4c2b88db678e262e"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Microsoft/go-winio"
|
||||
packages = ["."]
|
||||
revision = "97e4973ce50b2ff5f09635a57e2b88a037aae829"
|
||||
version = "v0.4.11"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/distribution"
|
||||
packages = [
|
||||
"digestset",
|
||||
"reference"
|
||||
]
|
||||
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-connections"
|
||||
packages = [
|
||||
"nat",
|
||||
"sockets",
|
||||
"tlsconfig"
|
||||
]
|
||||
revision = "3ede32e2033de7505e6500d6c868c2b9ed9f169d"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/docker/go-units"
|
||||
packages = ["."]
|
||||
revision = "47565b4f722fb6ceae66b95f853feed578a4a51c"
|
||||
version = "v0.3.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/drone/signal"
|
||||
packages = ["."]
|
||||
revision = "8e64eaa3eaf106e8702d6622c43fd78de52ec9d2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"sortkeys"
|
||||
]
|
||||
revision = "100ba4e885062801d56799d78530b73b178a78f3"
|
||||
version = "v0.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/glog"
|
||||
packages = ["."]
|
||||
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/mock"
|
||||
packages = ["gomock"]
|
||||
revision = "c34cdb4725f4c3844d095133c6e40e448b86589b"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp"
|
||||
]
|
||||
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
revision = "4030bb1f1f0c35b30ca7009e9ebd06849dd45306"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/google/go-cmp"
|
||||
packages = [
|
||||
"cmp",
|
||||
"cmp/internal/diff",
|
||||
"cmp/internal/function",
|
||||
"cmp/internal/value"
|
||||
]
|
||||
revision = "3af367b6b30c263d47e8895973edcca9a49cf029"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/google/gofuzz"
|
||||
packages = ["."]
|
||||
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/googleapis/gnostic"
|
||||
packages = [
|
||||
"OpenAPIv2",
|
||||
"compiler",
|
||||
"extensions"
|
||||
]
|
||||
revision = "7c663266750e7d82587642f65e60bc4083f1f84e"
|
||||
version = "v0.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache"
|
||||
]
|
||||
revision = "c63ab54fda8f77302f8d414e19933f2b6026a089"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/hashicorp/golang-lru"
|
||||
packages = [
|
||||
".",
|
||||
"simplelru"
|
||||
]
|
||||
revision = "20f1fb78b0740ba8c3cb143a61e86ba5c8669768"
|
||||
version = "v0.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/imdario/mergo"
|
||||
packages = ["."]
|
||||
revision = "9f23e2d6bd2a77f959b2bf6acdbefd708a83a4a4"
|
||||
version = "v0.3.6"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
|
||||
version = "v1.1.5"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
packages = ["."]
|
||||
revision = "6ca4dbf54d38eea1a992b3c722a76a5d1c4cb25c"
|
||||
version = "v0.0.4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
revision = "bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94"
|
||||
version = "1.0.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/natessilva/dag"
|
||||
packages = ["."]
|
||||
revision = "7194b8dcc5c4ac1f9a86465b7da3dc2f5765c1bf"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opencontainers/go-digest"
|
||||
packages = ["."]
|
||||
revision = "279bed98673dd5bef374d3b6e4b09e2af76183bf"
|
||||
version = "v1.0.0-rc1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/opencontainers/image-spec"
|
||||
packages = [
|
||||
"specs-go",
|
||||
"specs-go/v1"
|
||||
]
|
||||
revision = "d60099175f88c47cd379c4738d158884749ed235"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = ["ssh/terminal"]
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"context/ctxhttp",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/socks",
|
||||
"proxy"
|
||||
]
|
||||
revision = "146acd28ed5894421fb5aac80ca93bc1b1f46f87"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/oauth2"
|
||||
packages = [
|
||||
".",
|
||||
"internal"
|
||||
]
|
||||
revision = "d668ce993890a79bda886613ee587a69dd5da7a6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
packages = ["errgroup"]
|
||||
revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "4497e2df6f9e69048a54498c7affbbec3294ad47"
|
||||
|
||||
[[projects]]
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "85acf8d2951cb2a3bde7632f9ff273ef0379bcbd"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
packages = [
|
||||
"internal",
|
||||
"internal/base",
|
||||
"internal/datastore",
|
||||
"internal/log",
|
||||
"internal/remote_api",
|
||||
"internal/urlfetch",
|
||||
"urlfetch"
|
||||
]
|
||||
revision = "4a4468ece617fc8205e99368fa2200e9d1fad421"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/inf.v0"
|
||||
packages = ["."]
|
||||
revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
|
||||
version = "v2.2.2"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
"authorization/v1beta1",
|
||||
"autoscaling/v1",
|
||||
"autoscaling/v2beta1",
|
||||
"autoscaling/v2beta2",
|
||||
"batch/v1",
|
||||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"networking/v1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
"storage/v1",
|
||||
"storage/v1alpha1",
|
||||
"storage/v1beta1"
|
||||
]
|
||||
revision = "d04500c8c3dda9c980b668c57abc2ca61efcf5c4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/errors",
|
||||
"pkg/api/meta",
|
||||
"pkg/api/resource",
|
||||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
"pkg/fields",
|
||||
"pkg/labels",
|
||||
"pkg/runtime",
|
||||
"pkg/runtime/schema",
|
||||
"pkg/runtime/serializer",
|
||||
"pkg/runtime/serializer/json",
|
||||
"pkg/runtime/serializer/protobuf",
|
||||
"pkg/runtime/serializer/recognizer",
|
||||
"pkg/runtime/serializer/streaming",
|
||||
"pkg/runtime/serializer/versioning",
|
||||
"pkg/selection",
|
||||
"pkg/types",
|
||||
"pkg/util/cache",
|
||||
"pkg/util/clock",
|
||||
"pkg/util/diff",
|
||||
"pkg/util/errors",
|
||||
"pkg/util/framer",
|
||||
"pkg/util/intstr",
|
||||
"pkg/util/json",
|
||||
"pkg/util/naming",
|
||||
"pkg/util/net",
|
||||
"pkg/util/runtime",
|
||||
"pkg/util/sets",
|
||||
"pkg/util/validation",
|
||||
"pkg/util/validation/field",
|
||||
"pkg/util/wait",
|
||||
"pkg/util/yaml",
|
||||
"pkg/version",
|
||||
"pkg/watch",
|
||||
"third_party/forked/golang/reflect"
|
||||
]
|
||||
revision = "18a5ff3097b4b189511742e39151a153ee16988b"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
"informers/autoscaling/v2beta2",
|
||||
"informers/batch",
|
||||
"informers/batch/v1",
|
||||
"informers/batch/v1beta1",
|
||||
"informers/batch/v2alpha1",
|
||||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
"informers/events",
|
||||
"informers/events/v1beta1",
|
||||
"informers/extensions",
|
||||
"informers/extensions/v1beta1",
|
||||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
"informers/rbac/v1",
|
||||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
"informers/settings/v1alpha1",
|
||||
"informers/storage",
|
||||
"informers/storage/v1",
|
||||
"informers/storage/v1alpha1",
|
||||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
"kubernetes/typed/authorization/v1beta1",
|
||||
"kubernetes/typed/autoscaling/v1",
|
||||
"kubernetes/typed/autoscaling/v2beta1",
|
||||
"kubernetes/typed/autoscaling/v2beta2",
|
||||
"kubernetes/typed/batch/v1",
|
||||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
"listers/batch/v1",
|
||||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
"listers/storage/v1",
|
||||
"listers/storage/v1alpha1",
|
||||
"listers/storage/v1beta1",
|
||||
"pkg/apis/clientauthentication",
|
||||
"pkg/apis/clientauthentication/v1alpha1",
|
||||
"pkg/apis/clientauthentication/v1beta1",
|
||||
"pkg/version",
|
||||
"plugin/pkg/client/auth/exec",
|
||||
"rest",
|
||||
"rest/watch",
|
||||
"tools/auth",
|
||||
"tools/cache",
|
||||
"tools/clientcmd",
|
||||
"tools/clientcmd/api",
|
||||
"tools/clientcmd/api/latest",
|
||||
"tools/clientcmd/api/v1",
|
||||
"tools/metrics",
|
||||
"tools/pager",
|
||||
"tools/reference",
|
||||
"transport",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/retry"
|
||||
]
|
||||
revision = "1638f8970cefaa404ff3a62950f88b08292b2696"
|
||||
version = "v9.0.0"
|
||||
|
||||
[[projects]]
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
revision = "a5bc97fbc634d635061f3146511332c7e313a55a"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "826a38bb75868d939208d5378494cc45c03fbf865494ef1fd7c65d9ad55e72d1"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
@ -0,0 +1,54 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project2"
|
||||
# branch = "dev"
|
||||
# source = "github.com/myfork/project2"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "docker.io/go-docker"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/drone/signal"
|
||||
version = "1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/mock"
|
||||
version = "1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/google/go-cmp"
|
||||
version = "0.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/mattn/go-isatty"
|
||||
version = "0.0.4"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/sync"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
@ -0,0 +1,76 @@
|
||||
The drone runtime package implements the execution model for container-based pipelines. It is effectively a lightweight container orchestration engine optimized for pipelines.
|
||||
|
||||
## Definition File
|
||||
|
||||
The runtime package accepts a pipeline definition file as input, which is a simple json file. This file is not intended to be read or written by humans. It is considered an intermediate representation, and should be generated by a computer program from more user-friendly formats such a yaml.
|
||||
|
||||
Example hello world definition file:
|
||||
|
||||
```json
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_AOTCIPBf3XdTFs2j",
|
||||
"namespace": "ns_JVzesGoyteu5koZK",
|
||||
"name": "test_hello_world"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_8a7IJsL9zSJCCchd",
|
||||
"namespace": "ns_JVzesGoyteu5koZK",
|
||||
"name": "greetings"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"echo hello world"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6",
|
||||
"pull_policy": "default"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
||||
```
|
||||
|
||||
## Local Testing
|
||||
|
||||
The runtime package includes a simple command line utility allowing you to test pipeline execution locally. You should use this for local development and testing.
|
||||
|
||||
The runtime package includes sample definition files that you can safely execute on any machine with Docker installed. These sample files should be used for research and testing purposes.
|
||||
|
||||
Example commands:
|
||||
|
||||
```text
|
||||
drone-runtime samples/1_hello_world.json
|
||||
drone-runtime samples/2_on_success.json
|
||||
drone-runtime samples/3_on_failure.json
|
||||
drone-runtime samples/4_volume_host.json
|
||||
drone-runtime samples/5_volume_temp.json
|
||||
drone-runtime samples/6_redis.json
|
||||
drone-runtime samples/7_redis_multi.json
|
||||
drone-runtime samples/8_postgres.json
|
||||
drone-runtime samples/9_working_dir.json
|
||||
drone-runtime samples/10_docker.json
|
||||
```
|
||||
|
||||
Example command tests docker login:
|
||||
|
||||
```text
|
||||
drone-runtime --config=path/to/config.json samples/11_requires_auth.json
|
||||
```
|
||||
|
||||
## Kubernetes Engines
|
||||
|
||||
The default runtime engine targets Docker, however, there is an experimental runtime engine that targets Kubernetes. Pipeline containers are launched as Pods using the Kubernetes API.
|
||||
|
||||
```
|
||||
drone-runtime \
|
||||
--kube-url=https://localhost:6443 \
|
||||
--kube-config=~/.kube/config \
|
||||
samples/kubernetes/1_hello_world.json
|
||||
```
|
@ -0,0 +1,114 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// PullPolicy defines the container image pull policy.
|
||||
type PullPolicy int
|
||||
|
||||
// PullPolicy enumeration.
|
||||
const (
|
||||
PullDefault PullPolicy = iota
|
||||
PullAlways
|
||||
PullIfNotExists
|
||||
PullNever
|
||||
)
|
||||
|
||||
func (p PullPolicy) String() string {
|
||||
return pullPolicyID[p]
|
||||
}
|
||||
|
||||
var pullPolicyID = map[PullPolicy]string{
|
||||
PullDefault: "default",
|
||||
PullAlways: "always",
|
||||
PullIfNotExists: "if-not-exists",
|
||||
PullNever: "never",
|
||||
}
|
||||
|
||||
var pullPolicyName = map[string]PullPolicy{
|
||||
"": PullDefault,
|
||||
"default": PullDefault,
|
||||
"always": PullAlways,
|
||||
"if-not-exists": PullIfNotExists,
|
||||
"never": PullNever,
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the string representation of the
|
||||
// pull type to JSON.
|
||||
func (p *PullPolicy) MarshalJSON() ([]byte, error) {
|
||||
buffer := bytes.NewBufferString(`"`)
|
||||
buffer.WriteString(pullPolicyID[*p])
|
||||
buffer.WriteString(`"`)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals the json representation of the
|
||||
// pull type from a string value.
|
||||
func (p *PullPolicy) UnmarshalJSON(b []byte) error {
|
||||
// unmarshal as string
|
||||
var s string
|
||||
err := json.Unmarshal(b, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// lookup value
|
||||
*p = pullPolicyName[s]
|
||||
return nil
|
||||
}
|
||||
|
||||
// RunPolicy defines the policy for starting containers
|
||||
// based on the point-in-time pass or fail state of
|
||||
// the pipeline.
|
||||
type RunPolicy int
|
||||
|
||||
// RunPolicy enumeration.
|
||||
const (
|
||||
RunOnSuccess RunPolicy = iota
|
||||
RunOnFailure
|
||||
RunAlways
|
||||
RunNever
|
||||
)
|
||||
|
||||
func (r RunPolicy) String() string {
|
||||
return runPolicyID[r]
|
||||
}
|
||||
|
||||
var runPolicyID = map[RunPolicy]string{
|
||||
RunOnSuccess: "on-success",
|
||||
RunOnFailure: "on-failure",
|
||||
RunAlways: "always",
|
||||
RunNever: "never",
|
||||
}
|
||||
|
||||
var runPolicyName = map[string]RunPolicy{
|
||||
"": RunOnSuccess,
|
||||
"on-success": RunOnSuccess,
|
||||
"on-failure": RunOnFailure,
|
||||
"always": RunAlways,
|
||||
"never": RunNever,
|
||||
}
|
||||
|
||||
// MarshalJSON marshals the string representation of the
|
||||
// run type to JSON.
|
||||
func (r *RunPolicy) MarshalJSON() ([]byte, error) {
|
||||
buffer := bytes.NewBufferString(`"`)
|
||||
buffer.WriteString(runPolicyID[*r])
|
||||
buffer.WriteString(`"`)
|
||||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshals the json representation of the
|
||||
// run type from a string value.
|
||||
func (r *RunPolicy) UnmarshalJSON(b []byte) error {
|
||||
// unmarshal as string
|
||||
var s string
|
||||
err := json.Unmarshal(b, &s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// lookup value
|
||||
*r = runPolicyName[s]
|
||||
return nil
|
||||
}
|
@ -0,0 +1,233 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
)
|
||||
|
||||
//
|
||||
// runtime policy unit tests.
|
||||
//
|
||||
|
||||
func TestRunPolicy_Marshal(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy RunPolicy
|
||||
data string
|
||||
}{
|
||||
{
|
||||
policy: RunAlways,
|
||||
data: `"always"`,
|
||||
},
|
||||
{
|
||||
policy: RunOnFailure,
|
||||
data: `"on-failure"`,
|
||||
},
|
||||
{
|
||||
policy: RunOnSuccess,
|
||||
data: `"on-success"`,
|
||||
},
|
||||
{
|
||||
policy: RunNever,
|
||||
data: `"never"`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
data, err := json.Marshal(&test.policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if bytes.Equal([]byte(test.data), data) == false {
|
||||
t.Errorf("Failed to marshal policy %s", test.policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunPolicy_Unmarshal(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy RunPolicy
|
||||
data string
|
||||
}{
|
||||
{
|
||||
policy: RunAlways,
|
||||
data: `"always"`,
|
||||
},
|
||||
{
|
||||
policy: RunOnFailure,
|
||||
data: `"on-failure"`,
|
||||
},
|
||||
{
|
||||
policy: RunOnSuccess,
|
||||
data: `"on-success"`,
|
||||
},
|
||||
{
|
||||
policy: RunNever,
|
||||
data: `"never"`,
|
||||
},
|
||||
{
|
||||
// no policy should default to on-success
|
||||
policy: RunOnSuccess,
|
||||
data: `""`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
var policy RunPolicy
|
||||
err := json.Unmarshal([]byte(test.data), &policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := policy, test.policy; got != want {
|
||||
t.Errorf("Want policy %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunPolicy_UnmarshalTypeError(t *testing.T) {
|
||||
var policy RunPolicy
|
||||
err := json.Unmarshal([]byte("[]"), &policy)
|
||||
if _, ok := err.(*json.UnmarshalTypeError); !ok {
|
||||
t.Errorf("Expect unmarshal error return when JSON invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestRunPolicy_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy RunPolicy
|
||||
value string
|
||||
}{
|
||||
{
|
||||
policy: RunAlways,
|
||||
value: "always",
|
||||
},
|
||||
{
|
||||
policy: RunOnFailure,
|
||||
value: "on-failure",
|
||||
},
|
||||
{
|
||||
policy: RunOnSuccess,
|
||||
value: "on-success",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := test.policy.String(), test.value; got != want {
|
||||
t.Errorf("Want policy string %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// pull policy unit tests.
|
||||
//
|
||||
|
||||
func TestPullPolicy_Marshal(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy PullPolicy
|
||||
data string
|
||||
}{
|
||||
{
|
||||
policy: PullAlways,
|
||||
data: `"always"`,
|
||||
},
|
||||
{
|
||||
policy: PullDefault,
|
||||
data: `"default"`,
|
||||
},
|
||||
{
|
||||
policy: PullIfNotExists,
|
||||
data: `"if-not-exists"`,
|
||||
},
|
||||
{
|
||||
policy: PullNever,
|
||||
data: `"never"`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
data, err := json.Marshal(&test.policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if bytes.Equal([]byte(test.data), data) == false {
|
||||
t.Errorf("Failed to marshal policy %s", test.policy)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullPolicy_Unmarshal(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy PullPolicy
|
||||
data string
|
||||
}{
|
||||
{
|
||||
policy: PullAlways,
|
||||
data: `"always"`,
|
||||
},
|
||||
{
|
||||
policy: PullDefault,
|
||||
data: `"default"`,
|
||||
},
|
||||
{
|
||||
policy: PullIfNotExists,
|
||||
data: `"if-not-exists"`,
|
||||
},
|
||||
{
|
||||
policy: PullNever,
|
||||
data: `"never"`,
|
||||
},
|
||||
{
|
||||
// no policy should default to on-success
|
||||
policy: PullDefault,
|
||||
data: `""`,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
var policy PullPolicy
|
||||
err := json.Unmarshal([]byte(test.data), &policy)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := policy, test.policy; got != want {
|
||||
t.Errorf("Want policy %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullPolicy_UnmarshalTypeError(t *testing.T) {
|
||||
var policy PullPolicy
|
||||
err := json.Unmarshal([]byte("[]"), &policy)
|
||||
if _, ok := err.(*json.UnmarshalTypeError); !ok {
|
||||
t.Errorf("Expect unmarshal error return when JSON invalid")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPullPolicy_String(t *testing.T) {
|
||||
tests := []struct {
|
||||
policy PullPolicy
|
||||
value string
|
||||
}{
|
||||
{
|
||||
policy: PullAlways,
|
||||
value: "always",
|
||||
},
|
||||
{
|
||||
policy: PullDefault,
|
||||
value: "default",
|
||||
},
|
||||
{
|
||||
policy: PullIfNotExists,
|
||||
value: "if-not-exists",
|
||||
},
|
||||
{
|
||||
policy: PullNever,
|
||||
value: "never",
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := test.policy.String(), test.value; got != want {
|
||||
t.Errorf("Want policy string %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,118 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
// config represents the Docker client configuration,
|
||||
// typically located at ~/.docker/config.json
|
||||
type config struct {
|
||||
Auths map[string]auths `json:"auths"`
|
||||
}
|
||||
|
||||
type auths struct {
|
||||
Auth string `json:"auth"`
|
||||
}
|
||||
|
||||
// Parse parses the registry credential from the reader.
|
||||
func Parse(r io.Reader) ([]*engine.DockerAuth, error) {
|
||||
c := new(config)
|
||||
err := json.NewDecoder(r).Decode(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var auths []*engine.DockerAuth
|
||||
for k, v := range c.Auths {
|
||||
username, password := decode(v.Auth)
|
||||
auths = append(auths, &engine.DockerAuth{
|
||||
Address: hostname(k),
|
||||
Username: username,
|
||||
Password: password,
|
||||
})
|
||||
}
|
||||
return auths, nil
|
||||
}
|
||||
|
||||
// ParseFile parses the registry credential file.
|
||||
func ParseFile(filepath string) ([]*engine.DockerAuth, error) {
|
||||
f, err := os.Open(filepath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return Parse(f)
|
||||
}
|
||||
|
||||
// ParseString parses the registry credential file.
|
||||
func ParseString(s string) ([]*engine.DockerAuth, error) {
|
||||
return Parse(strings.NewReader(s))
|
||||
}
|
||||
|
||||
// encode returns the encoded credentials.
|
||||
func encode(username, password string) string {
|
||||
return base64.StdEncoding.EncodeToString(
|
||||
[]byte(username + ":" + password),
|
||||
)
|
||||
}
|
||||
|
||||
// decode returns the decoded credentials.
|
||||
func decode(s string) (username, password string) {
|
||||
d, err := base64.StdEncoding.DecodeString(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
parts := strings.SplitN(string(d), ":", 2)
|
||||
if len(parts) > 0 {
|
||||
username = parts[0]
|
||||
}
|
||||
if len(parts) > 1 {
|
||||
password = parts[1]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func hostname(s string) string {
|
||||
uri, _ := url.Parse(s)
|
||||
if uri.Host != "" {
|
||||
s = uri.Host
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Encode returns the json marshaled, base64 encoded
|
||||
// credential string that can be passed to the docker
|
||||
// registry authentication header.
|
||||
func Encode(username, password string) string {
|
||||
v := struct {
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
buf, _ := json.Marshal(&v)
|
||||
return base64.URLEncoding.EncodeToString(buf)
|
||||
}
|
||||
|
||||
// Marshal marshals the DockerAuth credentials to a
|
||||
// .docker/config.json file.
|
||||
func Marshal(list []*engine.DockerAuth) ([]byte, error) {
|
||||
out := &config{}
|
||||
out.Auths = map[string]auths{}
|
||||
for _, item := range list {
|
||||
out.Auths[item.Address] = auths{
|
||||
Auth: encode(
|
||||
item.Username,
|
||||
item.Password,
|
||||
),
|
||||
}
|
||||
}
|
||||
return json.Marshal(out)
|
||||
}
|
@ -0,0 +1,139 @@
|
||||
package auth
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
got, err := ParseString(sample)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
want := []*engine.DockerAuth{
|
||||
{
|
||||
Address: "index.docker.io",
|
||||
Username: "octocat",
|
||||
Password: "correct-horse-battery-staple",
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(got, want); diff != "" {
|
||||
t.Errorf(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGCR(t *testing.T) {
|
||||
got, err := ParseFile("testdata/config_gcr.json")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
want := []*engine.DockerAuth{
|
||||
{
|
||||
Address: "gcr.io",
|
||||
Username: "_json_key",
|
||||
Password: "xxx:bar\n",
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(got, want); diff != "" {
|
||||
t.Errorf(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseErr(t *testing.T) {
|
||||
_, err := ParseString("")
|
||||
if err == nil {
|
||||
t.Errorf("Expect unmarshal error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFile(t *testing.T) {
|
||||
got, err := ParseFile("./testdata/config.json")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
want := []*engine.DockerAuth{
|
||||
{
|
||||
Address: "index.docker.io",
|
||||
Username: "octocat",
|
||||
Password: "correct-horse-battery-staple",
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(got, want); diff != "" {
|
||||
t.Errorf(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseFileErr(t *testing.T) {
|
||||
_, err := ParseFile("./testdata/x.json")
|
||||
if _, ok := err.(*os.PathError); !ok {
|
||||
t.Errorf("Expect error when file does not exist")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_encodeDecode(t *testing.T) {
|
||||
username := "octocat"
|
||||
password := "correct-horse-battery-staple"
|
||||
|
||||
encoded := encode(username, password)
|
||||
decodedUsername, decodedPassword := decode(encoded)
|
||||
if got, want := decodedUsername, username; got != want {
|
||||
t.Errorf("Want decoded username %s, got %s", want, got)
|
||||
}
|
||||
if got, want := decodedPassword, password; got != want {
|
||||
t.Errorf("Want decoded password %s, got %s", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_decodeInvalid(t *testing.T) {
|
||||
username, password := decode("b2N0b2NhdDp==")
|
||||
if username != "" || password != "" {
|
||||
t.Errorf("Expect decoding error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEncode(t *testing.T) {
|
||||
username := "octocat"
|
||||
password := "correct-horse-battery-staple"
|
||||
result := Encode(username, password)
|
||||
got, err := base64.URLEncoding.DecodeString(result)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
want := []byte(`{"username":"octocat","password":"correct-horse-battery-staple"}`)
|
||||
if bytes.Equal(got, want) == false {
|
||||
t.Errorf("Could not decode credential header")
|
||||
}
|
||||
}
|
||||
|
||||
func TestMarshal(t *testing.T) {
|
||||
auths := []*engine.DockerAuth{
|
||||
{
|
||||
Address: "index.docker.io",
|
||||
Username: "octocat",
|
||||
Password: "correct-horse-battery-staple",
|
||||
},
|
||||
}
|
||||
got, _ := Marshal(auths)
|
||||
want := []byte(`{"auths":{"index.docker.io":{"auth":"b2N0b2NhdDpjb3JyZWN0LWhvcnNlLWJhdHRlcnktc3RhcGxl"}}}`)
|
||||
if bytes.Equal(got, want) == false {
|
||||
t.Errorf("Could not decode credential header")
|
||||
}
|
||||
}
|
||||
|
||||
var sample = `{
|
||||
"auths": {
|
||||
"https://index.docker.io/v1/": {
|
||||
"auth": "b2N0b2NhdDpjb3JyZWN0LWhvcnNlLWJhdHRlcnktc3RhcGxl"
|
||||
}
|
||||
}
|
||||
}`
|
@ -0,0 +1,7 @@
|
||||
{
|
||||
"auths": {
|
||||
"https://index.docker.io/v1/": {
|
||||
"auth": "b2N0b2NhdDpjb3JyZWN0LWhvcnNlLWJhdHRlcnktc3RhcGxl"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,7 @@
|
||||
{
|
||||
"auths": {
|
||||
"gcr.io": {
|
||||
"auth": "X2pzb25fa2V5Onh4eDpiYXIK"
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,234 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
|
||||
"docker.io/go-docker/api/types/container"
|
||||
"docker.io/go-docker/api/types/mount"
|
||||
"docker.io/go-docker/api/types/network"
|
||||
)
|
||||
|
||||
// returns a container configuration.
|
||||
func toConfig(spec *engine.Spec, step *engine.Step) *container.Config {
|
||||
config := &container.Config{
|
||||
Image: step.Docker.Image,
|
||||
Labels: step.Metadata.Labels,
|
||||
WorkingDir: step.WorkingDir,
|
||||
AttachStdin: false,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
ArgsEscaped: false,
|
||||
}
|
||||
|
||||
if len(step.Envs) != 0 {
|
||||
config.Env = toEnv(step.Envs)
|
||||
}
|
||||
for _, sec := range step.Secrets {
|
||||
secret, ok := engine.LookupSecret(spec, sec)
|
||||
if ok {
|
||||
config.Env = append(config.Env, sec.Env+"="+secret.Data)
|
||||
}
|
||||
}
|
||||
if len(step.Docker.Args) != 0 {
|
||||
config.Cmd = step.Docker.Args
|
||||
}
|
||||
if len(step.Docker.Command) != 0 {
|
||||
config.Entrypoint = step.Docker.Command
|
||||
}
|
||||
|
||||
// NOTE it appears this is no longer required,
|
||||
// however this could cause incompatibility with
|
||||
// certain docker versions.
|
||||
//
|
||||
// if len(step.Volumes) != 0 {
|
||||
// config.Volumes = toVolumeSet(spec, step)
|
||||
// }
|
||||
return config
|
||||
}
|
||||
|
||||
// returns a container host configuration.
|
||||
func toHostConfig(spec *engine.Spec, step *engine.Step) *container.HostConfig {
|
||||
config := &container.HostConfig{
|
||||
LogConfig: container.LogConfig{
|
||||
Type: "json-file",
|
||||
},
|
||||
Privileged: step.Docker.Privileged,
|
||||
// TODO(bradrydzewski) set ShmSize
|
||||
}
|
||||
if len(step.Docker.DNS) > 0 {
|
||||
config.DNS = step.Docker.DNS
|
||||
}
|
||||
if len(step.Docker.DNSSearch) > 0 {
|
||||
config.DNSSearch = step.Docker.DNSSearch
|
||||
}
|
||||
if len(step.Docker.ExtraHosts) > 0 {
|
||||
config.ExtraHosts = step.Docker.ExtraHosts
|
||||
}
|
||||
if step.Resources != nil {
|
||||
config.Resources = container.Resources{}
|
||||
if limits := step.Resources.Limits; limits != nil {
|
||||
config.Resources.Memory = limits.Memory
|
||||
// TODO(bradrydewski) set config.Resources.CPUPercent
|
||||
|
||||
// IMPORTANT docker and kubernetes use
|
||||
// different units of measure for cpu limits.
|
||||
// we need to figure out how to convert from
|
||||
// the kubernetes unit of measure to the docker
|
||||
// unit of measure.
|
||||
}
|
||||
}
|
||||
|
||||
// IMPORTANT before we implement devices for docker we
|
||||
// need to implement devices for kubernetes. This might
|
||||
// also require changes to the drone yaml format.
|
||||
if len(step.Devices) != 0 {
|
||||
// TODO(bradrydzewski) set Devices
|
||||
}
|
||||
|
||||
if len(step.Volumes) != 0 {
|
||||
config.Binds = toVolumeSlice(spec, step)
|
||||
config.Mounts = toVolumeMounts(spec, step)
|
||||
}
|
||||
return config
|
||||
}
|
||||
|
||||
// helper function returns the container network configuration.
|
||||
func toNetConfig(spec *engine.Spec, proc *engine.Step) *network.NetworkingConfig {
|
||||
endpoints := map[string]*network.EndpointSettings{}
|
||||
endpoints[spec.Metadata.UID] = &network.EndpointSettings{
|
||||
NetworkID: spec.Metadata.UID,
|
||||
Aliases: []string{proc.Metadata.Name},
|
||||
}
|
||||
return &network.NetworkingConfig{
|
||||
EndpointsConfig: endpoints,
|
||||
}
|
||||
}
|
||||
|
||||
// helper function returns a slice of volume mounts.
|
||||
func toVolumeSlice(spec *engine.Spec, step *engine.Step) []string {
|
||||
// this entire function should be deprecated in
|
||||
// favor of toVolumeMounts, however, I am unable
|
||||
// to get it working with data volumes.
|
||||
var to []string
|
||||
for _, mount := range step.Volumes {
|
||||
volume, ok := engine.LookupVolume(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if isDataVolume(volume) == false {
|
||||
continue
|
||||
}
|
||||
path := volume.Metadata.UID + ":" + mount.Path
|
||||
to = append(to, path)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
// helper function returns a slice of docker mount
|
||||
// configurations.
|
||||
func toVolumeMounts(spec *engine.Spec, step *engine.Step) []mount.Mount {
|
||||
var mounts []mount.Mount
|
||||
for _, target := range step.Volumes {
|
||||
source, ok := engine.LookupVolume(spec, target.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
// HACK: this condition can be removed once
|
||||
// toVolumeSlice has been fully replaced. at this
|
||||
// time, I cannot figure out how to get mounts
|
||||
// working with data volumes :(
|
||||
if isDataVolume(source) {
|
||||
continue
|
||||
}
|
||||
mounts = append(mounts, toMount(source, target))
|
||||
}
|
||||
if len(mounts) == 0 {
|
||||
return nil
|
||||
}
|
||||
return mounts
|
||||
}
|
||||
|
||||
// helper function converts the volume declaration to a
|
||||
// docker mount structure.
|
||||
func toMount(source *engine.Volume, target *engine.VolumeMount) mount.Mount {
|
||||
to := mount.Mount{
|
||||
Target: target.Path,
|
||||
Type: toVolumeType(source),
|
||||
}
|
||||
if isBindMount(source) || isNamedPipe(source) {
|
||||
to.Source = source.HostPath.Path
|
||||
}
|
||||
if isTempfs(source) {
|
||||
to.TmpfsOptions = &mount.TmpfsOptions{
|
||||
SizeBytes: source.EmptyDir.SizeLimit,
|
||||
Mode: 0700,
|
||||
}
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
// helper function returns the docker volume enumeration
|
||||
// for the given volume.
|
||||
func toVolumeType(from *engine.Volume) mount.Type {
|
||||
switch {
|
||||
case isDataVolume(from):
|
||||
return mount.TypeVolume
|
||||
case isTempfs(from):
|
||||
return mount.TypeTmpfs
|
||||
case isNamedPipe(from):
|
||||
return mount.TypeNamedPipe
|
||||
default:
|
||||
return mount.TypeBind
|
||||
}
|
||||
}
|
||||
|
||||
// helper function that converts a key value map of
|
||||
// environment variables to a string slice in key=value
|
||||
// format.
|
||||
func toEnv(env map[string]string) []string {
|
||||
var envs []string
|
||||
for k, v := range env {
|
||||
envs = append(envs, k+"="+v)
|
||||
}
|
||||
return envs
|
||||
}
|
||||
|
||||
// returns true if the volume is a bind mount.
|
||||
func isBindMount(volume *engine.Volume) bool {
|
||||
return volume.HostPath != nil
|
||||
}
|
||||
|
||||
// returns true if the volume is in-memory.
|
||||
func isTempfs(volume *engine.Volume) bool {
|
||||
return volume.EmptyDir != nil && volume.EmptyDir.Medium == "memory"
|
||||
}
|
||||
|
||||
// returns true if the volume is a data-volume.
|
||||
func isDataVolume(volume *engine.Volume) bool {
|
||||
return volume.EmptyDir != nil && volume.EmptyDir.Medium != "memory"
|
||||
}
|
||||
|
||||
// returns true if the volume is a named pipe.
|
||||
func isNamedPipe(volume *engine.Volume) bool {
|
||||
return volume.HostPath != nil &&
|
||||
strings.HasPrefix(volume.HostPath.Path, `\\.\pipe\`)
|
||||
}
|
||||
|
||||
// // helper function that converts a slice of device paths to a slice of
|
||||
// // container.DeviceMapping.
|
||||
// func toDevices(from []*engine.DeviceMapping) []container.DeviceMapping {
|
||||
// var to []container.DeviceMapping
|
||||
// for _, device := range from {
|
||||
// to = append(to, container.DeviceMapping{
|
||||
// PathOnHost: device.Source,
|
||||
// PathInContainer: device.Target,
|
||||
// CgroupPermissions: "rwm",
|
||||
// })
|
||||
// }
|
||||
// return to
|
||||
// }
|
@ -0,0 +1,430 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
|
||||
"docker.io/go-docker/api/types/container"
|
||||
"docker.io/go-docker/api/types/mount"
|
||||
"docker.io/go-docker/api/types/network"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestToConfig(t *testing.T) {
|
||||
step := &engine.Step{
|
||||
Metadata: engine.Metadata{
|
||||
UID: "123",
|
||||
Name: "test",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Envs: map[string]string{
|
||||
"GOOS": "linux",
|
||||
},
|
||||
Docker: &engine.DockerStep{
|
||||
Image: "golang:latest",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "go build; go test -v"},
|
||||
},
|
||||
WorkingDir: "/workspace",
|
||||
Secrets: []*engine.SecretVar{
|
||||
{
|
||||
Name: "password",
|
||||
Env: "HTTP_PASSWORD",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec := &engine.Spec{
|
||||
Metadata: engine.Metadata{
|
||||
UID: "abc123",
|
||||
},
|
||||
Steps: []*engine.Step{step},
|
||||
Secrets: []*engine.Secret{
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "password"},
|
||||
Data: "correct-horse-battery-staple",
|
||||
},
|
||||
},
|
||||
}
|
||||
a := &container.Config{
|
||||
Image: step.Docker.Image,
|
||||
Labels: step.Metadata.Labels,
|
||||
WorkingDir: step.WorkingDir,
|
||||
AttachStdin: false,
|
||||
AttachStdout: true,
|
||||
AttachStderr: true,
|
||||
Tty: false,
|
||||
OpenStdin: false,
|
||||
StdinOnce: false,
|
||||
ArgsEscaped: false,
|
||||
Entrypoint: step.Docker.Command,
|
||||
Cmd: step.Docker.Args,
|
||||
Env: []string{
|
||||
"GOOS=linux",
|
||||
"HTTP_PASSWORD=correct-horse-battery-staple",
|
||||
},
|
||||
}
|
||||
b := toConfig(spec, step)
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("Unexpected container.Config")
|
||||
t.Log(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToHostConfig(t *testing.T) {
|
||||
step := &engine.Step{
|
||||
Metadata: engine.Metadata{
|
||||
UID: "123",
|
||||
Name: "test",
|
||||
Labels: map[string]string{},
|
||||
},
|
||||
Docker: &engine.DockerStep{
|
||||
Image: "golang:latest",
|
||||
Command: []string{"/bin/sh"},
|
||||
Args: []string{"-c", "go build; go test -v"},
|
||||
Privileged: true,
|
||||
ExtraHosts: []string{"host.company.com"},
|
||||
DNS: []string{"8.8.8.8"},
|
||||
DNSSearch: []string{"dns.company.com"},
|
||||
},
|
||||
Resources: &engine.Resources{
|
||||
Limits: &engine.ResourceObject{
|
||||
Memory: 10000,
|
||||
},
|
||||
},
|
||||
Volumes: []*engine.VolumeMount{
|
||||
{Name: "foo", Path: "/foo"},
|
||||
{Name: "bar", Path: "/baz"},
|
||||
},
|
||||
}
|
||||
spec := &engine.Spec{
|
||||
Metadata: engine.Metadata{
|
||||
UID: "abc123",
|
||||
},
|
||||
Steps: []*engine.Step{step},
|
||||
Docker: &engine.DockerConfig{
|
||||
Volumes: []*engine.Volume{
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "foo", UID: "1"},
|
||||
EmptyDir: &engine.VolumeEmptyDir{},
|
||||
},
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "bar", UID: "2"},
|
||||
HostPath: &engine.VolumeHostPath{Path: "/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := &container.HostConfig{
|
||||
Privileged: true,
|
||||
LogConfig: container.LogConfig{
|
||||
Type: "json-file",
|
||||
},
|
||||
Binds: []string{"1:/foo"},
|
||||
DNS: []string{"8.8.8.8"},
|
||||
DNSSearch: []string{"dns.company.com"},
|
||||
ExtraHosts: []string{"host.company.com"},
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeBind,
|
||||
Source: "/bar",
|
||||
Target: "/baz",
|
||||
},
|
||||
},
|
||||
Resources: container.Resources{
|
||||
Memory: 10000,
|
||||
},
|
||||
}
|
||||
b := toHostConfig(spec, step)
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("Unexpected container.HostConfig")
|
||||
t.Log(diff)
|
||||
}
|
||||
|
||||
// we ensure that privileged mode is always mapped
|
||||
// correctly. better to be safe ...
|
||||
|
||||
step.Docker.Privileged = false
|
||||
b = toHostConfig(spec, step)
|
||||
if b.Privileged {
|
||||
t.Errorf("Expect privileged mode disabled")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToNetConfig(t *testing.T) {
|
||||
step := &engine.Step{
|
||||
Metadata: engine.Metadata{
|
||||
Name: "redis",
|
||||
},
|
||||
}
|
||||
spec := &engine.Spec{
|
||||
Metadata: engine.Metadata{
|
||||
UID: "abc123",
|
||||
},
|
||||
Steps: []*engine.Step{step},
|
||||
}
|
||||
a := toNetConfig(spec, step)
|
||||
b := &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{
|
||||
"abc123": &network.EndpointSettings{
|
||||
Aliases: []string{"redis"},
|
||||
NetworkID: "abc123"},
|
||||
},
|
||||
}
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("Unexpected network configuration")
|
||||
t.Log(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToVolumeSlice(t *testing.T) {
|
||||
step := &engine.Step{
|
||||
Volumes: []*engine.VolumeMount{
|
||||
{Name: "foo", Path: "/foo"},
|
||||
{Name: "bar", Path: "/bar"},
|
||||
{Name: "baz", Path: "/baz"},
|
||||
},
|
||||
}
|
||||
spec := &engine.Spec{
|
||||
Steps: []*engine.Step{step},
|
||||
Docker: &engine.DockerConfig{
|
||||
Volumes: []*engine.Volume{
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "foo", UID: "1"},
|
||||
EmptyDir: &engine.VolumeEmptyDir{},
|
||||
},
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "bar", UID: "2"},
|
||||
HostPath: &engine.VolumeHostPath{Path: "/bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
a := toVolumeSlice(spec, step)
|
||||
b := []string{"1:/foo"}
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("Unexpected volume slice")
|
||||
t.Log(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToVolumeMounts(t *testing.T) {
|
||||
step := &engine.Step{
|
||||
Volumes: []*engine.VolumeMount{
|
||||
{Name: "foo", Path: "/foo"},
|
||||
{Name: "bar", Path: "/bar"},
|
||||
{Name: "baz", Path: "/baz"},
|
||||
},
|
||||
}
|
||||
spec := &engine.Spec{
|
||||
Steps: []*engine.Step{step},
|
||||
Docker: &engine.DockerConfig{
|
||||
Volumes: []*engine.Volume{
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "foo", UID: "1"},
|
||||
EmptyDir: &engine.VolumeEmptyDir{},
|
||||
},
|
||||
{
|
||||
Metadata: engine.Metadata{Name: "bar", UID: "2"},
|
||||
HostPath: &engine.VolumeHostPath{Path: "/tmp"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
a := toVolumeMounts(spec, step)
|
||||
b := []mount.Mount{
|
||||
{Type: mount.TypeBind, Source: "/tmp", Target: "/bar"},
|
||||
}
|
||||
if diff := cmp.Diff(a, b); diff != "" {
|
||||
t.Errorf("Unexpected volume mounts")
|
||||
t.Log(diff)
|
||||
}
|
||||
|
||||
step.Volumes = []*engine.VolumeMount{}
|
||||
if toVolumeMounts(spec, step) != nil {
|
||||
t.Errorf("Expect nil volume mount")
|
||||
}
|
||||
}
|
||||
|
||||
func TestToEnv(t *testing.T) {
|
||||
kv := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
want := []string{"foo=bar"}
|
||||
got := toEnv(kv)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Errorf("Want environment variables %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToMount(t *testing.T) {
|
||||
tests := []struct {
|
||||
source *engine.Volume
|
||||
target *engine.VolumeMount
|
||||
result mount.Mount
|
||||
}{
|
||||
// volume mount
|
||||
{
|
||||
source: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{}},
|
||||
target: &engine.VolumeMount{Path: "/foo"},
|
||||
result: mount.Mount{Type: mount.TypeVolume, Target: "/foo"},
|
||||
},
|
||||
// tmpfs mount
|
||||
{
|
||||
source: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{Medium: "memory", SizeLimit: 10000}},
|
||||
target: &engine.VolumeMount{Path: "/foo"},
|
||||
result: mount.Mount{Type: mount.TypeTmpfs, Target: "/foo", TmpfsOptions: &mount.TmpfsOptions{SizeBytes: 10000, Mode: 0700}},
|
||||
},
|
||||
// bind mount
|
||||
{
|
||||
source: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: "/foo"}},
|
||||
target: &engine.VolumeMount{Path: "/bar"},
|
||||
result: mount.Mount{Type: mount.TypeBind, Source: "/foo", Target: "/bar"},
|
||||
},
|
||||
// named pipe
|
||||
{
|
||||
source: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: `\\.\pipe\docker_engine`}},
|
||||
target: &engine.VolumeMount{Path: `\\.\pipe\docker_engine`},
|
||||
result: mount.Mount{Type: mount.TypeNamedPipe, Source: `\\.\pipe\docker_engine`, Target: `\\.\pipe\docker_engine`},
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
result := toMount(test.source, test.target)
|
||||
if diff := cmp.Diff(result, test.result); diff != "" {
|
||||
t.Error("Unexpected mount value")
|
||||
t.Log(diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToVolumeType(t *testing.T) {
|
||||
tests := []struct {
|
||||
volume *engine.Volume
|
||||
value mount.Type
|
||||
}{
|
||||
{
|
||||
volume: &engine.Volume{},
|
||||
value: mount.TypeBind,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: "/tmp"}},
|
||||
value: mount.TypeBind,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: `\\.\pipe\docker_engine`}},
|
||||
value: mount.TypeNamedPipe,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{Medium: "memory"}},
|
||||
value: mount.TypeTmpfs,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{}},
|
||||
value: mount.TypeVolume,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := toVolumeType(test.volume), test.value; got != want {
|
||||
t.Errorf("Want mount type %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsBindMount(t *testing.T) {
|
||||
tests := []struct {
|
||||
volume *engine.Volume
|
||||
value bool
|
||||
}{
|
||||
{
|
||||
volume: &engine.Volume{},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: "/tmp"}},
|
||||
value: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := isBindMount(test.volume), test.value; got != want {
|
||||
t.Errorf("Want is bind mount %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsNamedPipe(t *testing.T) {
|
||||
tests := []struct {
|
||||
volume *engine.Volume
|
||||
value bool
|
||||
}{
|
||||
{
|
||||
volume: &engine.Volume{},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: "/tmp"}},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{HostPath: &engine.VolumeHostPath{Path: `\\.\pipe\docker_engine`}},
|
||||
value: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := isNamedPipe(test.volume), test.value; got != want {
|
||||
t.Errorf("Want is named pipe %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsTempfs(t *testing.T) {
|
||||
tests := []struct {
|
||||
volume *engine.Volume
|
||||
value bool
|
||||
}{
|
||||
{
|
||||
volume: &engine.Volume{},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{}},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{Medium: "memory"}},
|
||||
value: true,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
if got, want := isTempfs(test.volume), test.value; got != want {
|
||||
t.Errorf("Want is temp fs %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsDataVolume(t *testing.T) {
|
||||
tests := []struct {
|
||||
volume *engine.Volume
|
||||
value bool
|
||||
}{
|
||||
{
|
||||
volume: &engine.Volume{},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{Medium: "memory"}},
|
||||
value: false,
|
||||
},
|
||||
{
|
||||
volume: &engine.Volume{EmptyDir: &engine.VolumeEmptyDir{}},
|
||||
value: true,
|
||||
},
|
||||
}
|
||||
for i, test := range tests {
|
||||
if got, want := isDataVolume(test.volume), test.value; got != want {
|
||||
t.Errorf("Want is data volume %v, got %v at index %d", want, got, i)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,249 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/drone/drone-runtime/engine/docker/auth"
|
||||
"github.com/drone/drone-runtime/engine/docker/stdcopy"
|
||||
|
||||
"docker.io/go-docker"
|
||||
"docker.io/go-docker/api/types"
|
||||
"docker.io/go-docker/api/types/volume"
|
||||
)
|
||||
|
||||
type dockerEngine struct {
|
||||
client docker.APIClient
|
||||
}
|
||||
|
||||
// NewEnv returns a new Engine from the environment.
|
||||
func NewEnv() (engine.Engine, error) {
|
||||
cli, err := docker.NewEnvClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return New(cli), nil
|
||||
}
|
||||
|
||||
// New returns a new Engine using the Docker API Client.
|
||||
func New(client docker.APIClient) engine.Engine {
|
||||
return &dockerEngine{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Setup(ctx context.Context, spec *engine.Spec) error {
|
||||
if spec.Docker != nil {
|
||||
// creates the default temporary (local) volumes
|
||||
// that are mounted into each container step.
|
||||
for _, vol := range spec.Docker.Volumes {
|
||||
if vol.EmptyDir == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := e.client.VolumeCreate(ctx, volume.VolumesCreateBody{
|
||||
Name: vol.Metadata.UID,
|
||||
Driver: "local",
|
||||
Labels: spec.Metadata.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// creates the default pod network. All containers
|
||||
// defined in the pipeline are attached to this network.
|
||||
driver := "bridge"
|
||||
if spec.Platform.OS == "windows" {
|
||||
driver = "nat"
|
||||
}
|
||||
_, err := e.client.NetworkCreate(ctx, spec.Metadata.UID, types.NetworkCreate{
|
||||
Driver: driver,
|
||||
Labels: spec.Metadata.Labels,
|
||||
})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Create(ctx context.Context, spec *engine.Spec, step *engine.Step) error {
|
||||
if step.Docker == nil {
|
||||
return errors.New("engine: missing docker configuration")
|
||||
}
|
||||
|
||||
// parse the docker image name. We need to extract the
|
||||
// image domain name and match to registry credentials
|
||||
// stored in the .docker/config.json object.
|
||||
_, domain, latest, err := parseImage(step.Docker.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create pull options with encoded authorization credentials.
|
||||
pullopts := types.ImagePullOptions{}
|
||||
auths, ok := engine.LookupAuth(spec, domain)
|
||||
if ok {
|
||||
pullopts.RegistryAuth = auth.Encode(auths.Username, auths.Password)
|
||||
}
|
||||
|
||||
// automatically pull the latest version of the image if requested
|
||||
// by the process configuration.
|
||||
if step.Docker.PullPolicy == engine.PullAlways ||
|
||||
(step.Docker.PullPolicy == engine.PullDefault && latest) {
|
||||
// TODO(bradrydzewski) implement the PullDefault strategy to pull
|
||||
// the image if the tag is :latest
|
||||
rc, perr := e.client.ImagePull(ctx, step.Docker.Image, pullopts)
|
||||
if perr == nil {
|
||||
io.Copy(ioutil.Discard, rc)
|
||||
rc.Close()
|
||||
}
|
||||
if perr != nil {
|
||||
return perr
|
||||
}
|
||||
}
|
||||
|
||||
_, err = e.client.ContainerCreate(ctx,
|
||||
toConfig(spec, step),
|
||||
toHostConfig(spec, step),
|
||||
toNetConfig(spec, step),
|
||||
step.Metadata.UID,
|
||||
)
|
||||
|
||||
// automatically pull and try to re-create the image if the
|
||||
// failure is caused because the image does not exist.
|
||||
if docker.IsErrImageNotFound(err) && step.Docker.PullPolicy != engine.PullNever {
|
||||
rc, perr := e.client.ImagePull(ctx, step.Docker.Image, pullopts)
|
||||
if perr != nil {
|
||||
return perr
|
||||
}
|
||||
io.Copy(ioutil.Discard, rc)
|
||||
rc.Close()
|
||||
|
||||
// once the image is successfully pulled we attempt to
|
||||
// re-create the container.
|
||||
_, err = e.client.ContainerCreate(ctx,
|
||||
toConfig(spec, step),
|
||||
toHostConfig(spec, step),
|
||||
toNetConfig(spec, step),
|
||||
step.Metadata.UID,
|
||||
)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
copyOpts := types.CopyToContainerOptions{}
|
||||
copyOpts.AllowOverwriteDirWithFile = false
|
||||
for _, mount := range step.Files {
|
||||
file, ok := engine.LookupFile(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
tar := createTarfile(file, mount)
|
||||
|
||||
// TODO(bradrydzewski) this path is probably different on windows.
|
||||
err := e.client.CopyToContainer(ctx, step.Metadata.UID, "/", bytes.NewReader(tar), copyOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Start(ctx context.Context, spec *engine.Spec, step *engine.Step) error {
|
||||
return e.client.ContainerStart(ctx, step.Metadata.UID, types.ContainerStartOptions{})
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Wait(ctx context.Context, spec *engine.Spec, step *engine.Step) (*engine.State, error) {
|
||||
wait, errc := e.client.ContainerWait(ctx, step.Metadata.UID, "")
|
||||
select {
|
||||
case <-wait:
|
||||
case <-errc:
|
||||
}
|
||||
|
||||
info, err := e.client.ContainerInspect(ctx, step.Metadata.UID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if info.State.Running {
|
||||
// TODO(bradrydewski) if the state is still running
|
||||
// we should call wait again.
|
||||
}
|
||||
|
||||
return &engine.State{
|
||||
Exited: true,
|
||||
ExitCode: info.State.ExitCode,
|
||||
OOMKilled: info.State.OOMKilled,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Tail(ctx context.Context, spec *engine.Spec, step *engine.Step) (io.ReadCloser, error) {
|
||||
opts := types.ContainerLogsOptions{
|
||||
Follow: true,
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Details: false,
|
||||
Timestamps: false,
|
||||
}
|
||||
|
||||
logs, err := e.client.ContainerLogs(ctx, step.Metadata.UID, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rc, wc := io.Pipe()
|
||||
|
||||
go func() {
|
||||
stdcopy.StdCopy(wc, wc, logs)
|
||||
logs.Close()
|
||||
wc.Close()
|
||||
rc.Close()
|
||||
}()
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
func (e *dockerEngine) Destroy(ctx context.Context, spec *engine.Spec) error {
|
||||
removeOpts := types.ContainerRemoveOptions{
|
||||
Force: true,
|
||||
RemoveLinks: false,
|
||||
RemoveVolumes: true,
|
||||
}
|
||||
|
||||
// stop all containers
|
||||
for _, step := range spec.Steps {
|
||||
e.client.ContainerKill(ctx, step.Metadata.UID, "9")
|
||||
}
|
||||
|
||||
// cleanup all containers
|
||||
for _, step := range spec.Steps {
|
||||
e.client.ContainerRemove(ctx, step.Metadata.UID, removeOpts)
|
||||
}
|
||||
|
||||
// cleanup all volumes
|
||||
if spec.Docker != nil {
|
||||
for _, vol := range spec.Docker.Volumes {
|
||||
if vol.EmptyDir == nil {
|
||||
continue
|
||||
}
|
||||
// tempfs volumes do not have a volume entry,
|
||||
// and therefore do not require removal.
|
||||
if vol.EmptyDir.Medium == "memory" {
|
||||
continue
|
||||
}
|
||||
e.client.VolumeRemove(ctx, vol.Metadata.UID, true)
|
||||
}
|
||||
}
|
||||
|
||||
// cleanup the network
|
||||
e.client.NetworkRemove(ctx, spec.Metadata.UID)
|
||||
|
||||
// notice that we never collect or return any errors.
|
||||
// this is because we silently ignore cleanup failures
|
||||
// and instead ask the system admin to periodically run
|
||||
// `docker prune` commands.
|
||||
return nil
|
||||
}
|
@ -0,0 +1 @@
|
||||
package docker
|
@ -0,0 +1,30 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
)
|
||||
|
||||
// helper function parses the image and returns the
|
||||
// canonical image name, domain name, and whether or not
|
||||
// the image tag is :latest.
|
||||
func parseImage(s string) (canonical, domain string, latest bool, err error) {
|
||||
// parse the docker image name. We need to extract the
|
||||
// image domain name and match to registry credentials
|
||||
// stored in the .docker/config.json object.
|
||||
named, err := reference.ParseNormalizedNamed(s)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// the canonical image name, for some reason, excludes
|
||||
// the tag name. So we need to make sure it is included
|
||||
// in the image name so we can determine if the :latest
|
||||
// tag is specified
|
||||
named = reference.TagNameOnly(named)
|
||||
|
||||
return named.String(),
|
||||
reference.Domain(named),
|
||||
strings.HasSuffix(named.String(), ":latest"),
|
||||
nil
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
package docker
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestParseImage(t *testing.T) {
|
||||
tests := []struct {
|
||||
image string
|
||||
canonical string
|
||||
domain string
|
||||
latest bool
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
image: "golang",
|
||||
canonical: "docker.io/library/golang:latest",
|
||||
domain: "docker.io",
|
||||
latest: true,
|
||||
},
|
||||
{
|
||||
image: "golang:1.11",
|
||||
canonical: "docker.io/library/golang:1.11",
|
||||
domain: "docker.io",
|
||||
latest: false,
|
||||
},
|
||||
{
|
||||
image: "",
|
||||
err: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
canonical, domain, latest, err := parseImage(test.image)
|
||||
if test.err {
|
||||
if err == nil {
|
||||
t.Errorf("Expect error parsing image %s", test.image)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if got, want := canonical, test.canonical; got != want {
|
||||
t.Errorf("Want image %s, got %s", want, got)
|
||||
}
|
||||
if got, want := domain, test.domain; got != want {
|
||||
t.Errorf("Want image domain %s, got %s", want, got)
|
||||
}
|
||||
if got, want := latest, test.latest; got != want {
|
||||
t.Errorf("Want image latest %v, got %v", want, got)
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,174 @@
|
||||
package stdcopy
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// StdType is the type of standard stream
|
||||
// a writer can multiplex to.
|
||||
type StdType byte
|
||||
|
||||
const (
|
||||
// Stdin represents standard input stream type.
|
||||
Stdin StdType = iota
|
||||
// Stdout represents standard output stream type.
|
||||
Stdout
|
||||
// Stderr represents standard error steam type.
|
||||
Stderr
|
||||
|
||||
stdWriterPrefixLen = 8
|
||||
stdWriterFdIndex = 0
|
||||
stdWriterSizeIndex = 4
|
||||
|
||||
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
|
||||
)
|
||||
|
||||
var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
|
||||
|
||||
// stdWriter is wrapper of io.Writer with extra customized info.
|
||||
type stdWriter struct {
|
||||
io.Writer
|
||||
prefix byte
|
||||
}
|
||||
|
||||
// Write sends the buffer to the underneath writer.
|
||||
// It inserts the prefix header before the buffer,
|
||||
// so stdcopy.StdCopy knows where to multiplex the output.
|
||||
// It makes stdWriter to implement io.Writer.
|
||||
func (w *stdWriter) Write(p []byte) (n int, err error) {
|
||||
if w == nil || w.Writer == nil {
|
||||
return 0, errors.New("Writer not instantiated")
|
||||
}
|
||||
if p == nil {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
|
||||
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
buf.Write(header[:])
|
||||
buf.Write(p)
|
||||
|
||||
n, err = w.Writer.Write(buf.Bytes())
|
||||
n -= stdWriterPrefixLen
|
||||
if n < 0 {
|
||||
n = 0
|
||||
}
|
||||
|
||||
buf.Reset()
|
||||
bufPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewStdWriter instantiates a new Writer.
|
||||
// Everything written to it will be encapsulated using a custom format,
|
||||
// and written to the underlying `w` stream.
|
||||
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
||||
// `t` indicates the id of the stream to encapsulate.
|
||||
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
|
||||
func NewStdWriter(w io.Writer, t StdType) io.Writer {
|
||||
return &stdWriter{
|
||||
Writer: w,
|
||||
prefix: byte(t),
|
||||
}
|
||||
}
|
||||
|
||||
// StdCopy is a modified version of io.Copy.
|
||||
//
|
||||
// StdCopy will demultiplex `src`, assuming that it contains two streams,
|
||||
// previously multiplexed together using a StdWriter instance.
|
||||
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
|
||||
//
|
||||
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
|
||||
// In other words: if `err` is non nil, it indicates a real underlying error.
|
||||
//
|
||||
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
|
||||
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
|
||||
var (
|
||||
buf = make([]byte, startingBufLen)
|
||||
bufLen = len(buf)
|
||||
nr, nw int
|
||||
er, ew error
|
||||
out io.Writer
|
||||
frameSize int
|
||||
)
|
||||
|
||||
for {
|
||||
// Make sure we have at least a full header
|
||||
for nr < stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < stdWriterPrefixLen {
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
// Check the first byte to know where to write
|
||||
switch StdType(buf[stdWriterFdIndex]) {
|
||||
case Stdin:
|
||||
fallthrough
|
||||
case Stdout:
|
||||
// Write on stdout
|
||||
out = dstout
|
||||
case Stderr:
|
||||
// Write on stderr
|
||||
out = dsterr
|
||||
default:
|
||||
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
|
||||
}
|
||||
|
||||
// Retrieve the size of the frame
|
||||
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
|
||||
|
||||
// Check if the buffer is big enough to read the frame.
|
||||
// Extend it if necessary.
|
||||
if frameSize+stdWriterPrefixLen > bufLen {
|
||||
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
|
||||
bufLen = len(buf)
|
||||
}
|
||||
|
||||
// While the amount of bytes read is less than the size of the frame + header, we keep reading
|
||||
for nr < frameSize+stdWriterPrefixLen {
|
||||
var nr2 int
|
||||
nr2, er = src.Read(buf[nr:])
|
||||
nr += nr2
|
||||
if er == io.EOF {
|
||||
if nr < frameSize+stdWriterPrefixLen {
|
||||
return written, nil
|
||||
}
|
||||
break
|
||||
}
|
||||
if er != nil {
|
||||
return 0, er
|
||||
}
|
||||
}
|
||||
|
||||
// Write the retrieved frame (without header)
|
||||
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
|
||||
if ew != nil {
|
||||
return 0, ew
|
||||
}
|
||||
// If the frame has not been fully written: error
|
||||
if nw != frameSize {
|
||||
return 0, io.ErrShortWrite
|
||||
}
|
||||
written += int64(nw)
|
||||
|
||||
// Move the rest of the buffer to the beginning
|
||||
copy(buf, buf[frameSize+stdWriterPrefixLen:])
|
||||
// Move the index
|
||||
nr -= frameSize + stdWriterPrefixLen
|
||||
}
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
// helper function creates a tarfile that can be uploaded
|
||||
// into the Docker container.
|
||||
func createTarfile(file *engine.File, mount *engine.FileMount) []byte {
|
||||
w := new(bytes.Buffer)
|
||||
t := tar.NewWriter(w)
|
||||
h := &tar.Header{
|
||||
Name: mount.Path,
|
||||
Mode: mount.Mode,
|
||||
Size: int64(len(file.Data)),
|
||||
ModTime: time.Now(),
|
||||
}
|
||||
t.WriteHeader(h)
|
||||
t.Write(file.Data)
|
||||
return w.Bytes()
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"bytes"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
func TestCreateTarfile(t *testing.T) {
|
||||
file := &engine.File{
|
||||
Data: []byte("hello world"),
|
||||
}
|
||||
mount := &engine.FileMount{
|
||||
Path: "/tmp/greeting.txt",
|
||||
Mode: 0644,
|
||||
}
|
||||
d := createTarfile(file, mount)
|
||||
|
||||
r := bytes.NewReader(d)
|
||||
tr := tar.NewReader(r)
|
||||
|
||||
hdr, err := tr.Next()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if got, want := hdr.Mode, mount.Mode; got != want {
|
||||
t.Errorf("Unexpected file mode. Want %d got %d", want, got)
|
||||
}
|
||||
if got, want := hdr.Size, len(file.Data); got != int64(want) {
|
||||
t.Errorf("Unexpected file size. Want %d got %d", want, got)
|
||||
}
|
||||
if got, want := hdr.Name, mount.Path; got != want {
|
||||
t.Errorf("Unexpected file name. Want %s got %s", want, got)
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
io.Copy(buf, tr)
|
||||
if got, want := buf.String(), string(file.Data); got != want {
|
||||
t.Errorf("Unexpected file contents. Want %q got %q", want, got)
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
package engine
|
||||
|
||||
//go:generate mockgen -source=engine.go -destination=mocks/engine.go
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Engine defines a runtime engine for pipeline execution.
|
||||
type Engine interface {
|
||||
// Setup the pipeline environment.
|
||||
Setup(context.Context, *Spec) error
|
||||
|
||||
// Create creates the pipeline state.
|
||||
Create(context.Context, *Spec, *Step) error
|
||||
|
||||
// Start the pipeline step.
|
||||
Start(context.Context, *Spec, *Step) error
|
||||
|
||||
// Wait for the pipeline step to complete and returns the completion results.
|
||||
Wait(context.Context, *Spec, *Step) (*State, error)
|
||||
|
||||
// Tail the pipeline step logs.
|
||||
Tail(context.Context, *Spec, *Step) (io.ReadCloser, error)
|
||||
|
||||
// Destroy the pipeline environment.
|
||||
Destroy(context.Context, *Spec) error
|
||||
}
|
@ -0,0 +1,80 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/ghodss/yaml"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
documentBegin = "---\n"
|
||||
documentEnd = "...\n"
|
||||
)
|
||||
|
||||
// Print encodes returns specification as a Kubernetes
|
||||
// multi-document yaml configuration file, in string format.
|
||||
func Print(spec *engine.Spec) string {
|
||||
buf := new(bytes.Buffer)
|
||||
|
||||
//
|
||||
// Secret Encoding.
|
||||
//
|
||||
|
||||
for _, secret := range spec.Secrets {
|
||||
buf.WriteString(documentBegin)
|
||||
res := toSecret(spec, secret)
|
||||
res.Namespace = spec.Metadata.Namespace
|
||||
res.Kind = "Secret"
|
||||
res.Type = "Opaque"
|
||||
raw, _ := yaml.Marshal(res)
|
||||
buf.Write(raw)
|
||||
}
|
||||
|
||||
//
|
||||
// Config Map Encoding.
|
||||
//
|
||||
|
||||
for _, file := range spec.Files {
|
||||
res := &v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: file.Metadata.UID,
|
||||
},
|
||||
Data: map[string]string{
|
||||
file.Metadata.UID: string(file.Data),
|
||||
},
|
||||
}
|
||||
res.Namespace = spec.Metadata.Namespace
|
||||
res.Kind = "ConfigMap"
|
||||
buf.WriteString(documentBegin)
|
||||
raw, _ := yaml.Marshal(res)
|
||||
buf.Write(raw)
|
||||
}
|
||||
|
||||
//
|
||||
// Step Encoding.
|
||||
//
|
||||
|
||||
for _, step := range spec.Steps {
|
||||
buf.WriteString(documentBegin)
|
||||
res := toPod(spec, step)
|
||||
res.Namespace = spec.Metadata.Namespace
|
||||
res.Kind = "Pod"
|
||||
raw, _ := yaml.Marshal(res)
|
||||
buf.Write(raw)
|
||||
|
||||
if len(step.Docker.Ports) != 0 {
|
||||
buf.WriteString(documentBegin)
|
||||
res := toService(spec, step)
|
||||
res.Namespace = spec.Metadata.Namespace
|
||||
res.Kind = "Service"
|
||||
raw, _ := yaml.Marshal(res)
|
||||
buf.Write(raw)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(documentEnd)
|
||||
return buf.String()
|
||||
}
|
@ -0,0 +1,273 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/drone/drone-runtime/engine/docker/auth"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
type kubeEngine struct {
|
||||
client *kubernetes.Clientset
|
||||
node string
|
||||
}
|
||||
|
||||
// NewFile returns a new Kubernetes engine from a
|
||||
// Kubernetes configuration file (~/.kube/config).
|
||||
func NewFile(url, path, node string) (engine.Engine, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags(url, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &kubeEngine{client: client, node: node}, nil
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Setup(ctx context.Context, spec *engine.Spec) error {
|
||||
ns := toNamespace(spec)
|
||||
|
||||
// create the project namespace. all pods and
|
||||
// containers are created within the namespace, and
|
||||
// are removed when the pipeline execution completes.
|
||||
_, err := e.client.CoreV1().Namespaces().Create(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create all secrets
|
||||
for _, secret := range spec.Secrets {
|
||||
_, err := e.client.CoreV1().Secrets(ns.Name).Create(
|
||||
toSecret(spec, secret),
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// create all registry credentials as secrets.
|
||||
if spec.Docker != nil && len(spec.Docker.Auths) > 0 {
|
||||
out, err := auth.Marshal(spec.Docker.Auths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = e.client.CoreV1().Secrets(ns.Name).Create(
|
||||
&v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "docker-auth-config",
|
||||
},
|
||||
Type: "kubernetes.io/dockerconfigjson",
|
||||
StringData: map[string]string{
|
||||
".dockerconfigjson": string(out),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// create all files as config maps.
|
||||
for _, file := range spec.Files {
|
||||
_, err := e.client.CoreV1().ConfigMaps(ns.Name).Create(
|
||||
&v1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: file.Metadata.UID,
|
||||
},
|
||||
Data: map[string]string{
|
||||
file.Metadata.UID: string(file.Data),
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// pv := toPersistentVolume(e.node, spec.Metadata.Namespace, spec.Metadata.Namespace, filepath.Join("/tmp", spec.Metadata.Namespace))
|
||||
// _, err = e.client.CoreV1().PersistentVolumes().Create(pv)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// pvc := toPersistentVolumeClaim(spec.Metadata.Namespace, spec.Metadata.Namespace)
|
||||
// _, err = e.client.CoreV1().PersistentVolumeClaims(spec.Metadata.Namespace).Create(pvc)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Create(_ context.Context, _ *engine.Spec, _ *engine.Step) error {
|
||||
// no-op
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Start(ctx context.Context, spec *engine.Spec, step *engine.Step) error {
|
||||
pod := toPod(spec, step)
|
||||
if len(step.Docker.Ports) != 0 {
|
||||
service := toService(spec, step)
|
||||
_, err := e.client.CoreV1().Services(spec.Metadata.Namespace).Create(service)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if e.node != "" {
|
||||
pod.Spec.Affinity = &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{e.node},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
_, err := e.client.CoreV1().Pods(spec.Metadata.Namespace).Create(pod)
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Wait(ctx context.Context, spec *engine.Spec, step *engine.Step) (*engine.State, error) {
|
||||
stopper := make(chan struct{})
|
||||
updater := func(old interface{}, new interface{}) {
|
||||
pod := new.(*v1.Pod)
|
||||
// ignore events that do not come from the
|
||||
// current pod namespace.
|
||||
if pod.ObjectMeta.Namespace != step.Metadata.Namespace {
|
||||
return
|
||||
}
|
||||
if pod.Name == step.Metadata.UID {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodSucceeded, v1.PodFailed, v1.PodUnknown:
|
||||
// TODO need to understand if this could be
|
||||
// invoked multiple times.
|
||||
select {
|
||||
case stopper <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(e.client, time.Second)
|
||||
informer := factory.Core().V1().Pods().Informer()
|
||||
informer.AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: updater,
|
||||
},
|
||||
)
|
||||
factory.Start(wait.NeverStop)
|
||||
|
||||
// TODO Cancel on ctx.Done
|
||||
<-stopper
|
||||
|
||||
pod, err := e.client.CoreV1().Pods(spec.Metadata.Namespace).Get(step.Metadata.UID, metav1.GetOptions{
|
||||
IncludeUninitialized: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
state := &engine.State{
|
||||
ExitCode: int(pod.Status.ContainerStatuses[0].State.Terminated.ExitCode),
|
||||
Exited: true,
|
||||
OOMKilled: false,
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Tail(ctx context.Context, spec *engine.Spec, step *engine.Step) (io.ReadCloser, error) {
|
||||
ns := spec.Metadata.Namespace
|
||||
podName := step.Metadata.UID
|
||||
|
||||
up := make(chan bool)
|
||||
|
||||
var podUpdated = func(old interface{}, new interface{}) {
|
||||
pod := new.(*v1.Pod)
|
||||
if pod.Name == podName {
|
||||
switch pod.Status.Phase {
|
||||
case v1.PodRunning, v1.PodSucceeded, v1.PodFailed:
|
||||
up <- true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
si := informers.NewSharedInformerFactory(e.client, 5*time.Minute)
|
||||
si.Core().V1().Pods().Informer().AddEventHandler(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: podUpdated,
|
||||
},
|
||||
)
|
||||
si.Start(wait.NeverStop)
|
||||
|
||||
select {
|
||||
case <-up:
|
||||
case <-ctx.Done():
|
||||
}
|
||||
|
||||
opts := &v1.PodLogOptions{
|
||||
Follow: true,
|
||||
}
|
||||
|
||||
return e.client.CoreV1().RESTClient().Get().
|
||||
Namespace(ns).
|
||||
Name(podName).
|
||||
Resource("pods").
|
||||
SubResource("log").
|
||||
VersionedParams(opts, scheme.ParameterCodec).
|
||||
Stream()
|
||||
}
|
||||
|
||||
func (e *kubeEngine) Destroy(ctx context.Context, spec *engine.Spec) error {
|
||||
// err := e.client.CoreV1().PersistentVolumes().Delete(spec.Metadata.Namespace, nil)
|
||||
// if err != nil {
|
||||
// // TODO show error message
|
||||
// }
|
||||
|
||||
// err = e.client.CoreV1().PersistentVolumeClaims(spec.Metadata.Namespace).Delete("workspace", nil)
|
||||
// if err != nil {
|
||||
// // TODO show error message
|
||||
// }
|
||||
|
||||
// this is a complete hack. we are creating a host machine
|
||||
// directory which should be handled by a persistent volume.
|
||||
// I am planning to switch to a persistent volume, but am
|
||||
// leaving this in place as a temporary workaround in the short
|
||||
// term.
|
||||
os.RemoveAll(
|
||||
filepath.Join(
|
||||
"/tmp",
|
||||
"drone",
|
||||
spec.Metadata.Namespace,
|
||||
),
|
||||
)
|
||||
|
||||
// deleting the namespace should destroy all secrets,
|
||||
// volumes, configuration files and more.
|
||||
return e.client.CoreV1().Namespaces().Delete(
|
||||
spec.Metadata.Namespace,
|
||||
&metav1.DeleteOptions{},
|
||||
)
|
||||
}
|
@ -0,0 +1,321 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
// TODO(bradrydzewski) enable container resource limits.
|
||||
|
||||
// helper function converts environment variable
|
||||
// string data to kubernetes variables.
|
||||
func toEnv(spec *engine.Spec, step *engine.Step) []v1.EnvVar {
|
||||
var to []v1.EnvVar
|
||||
for k, v := range step.Envs {
|
||||
to = append(to, v1.EnvVar{
|
||||
Name: k,
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
to = append(to, v1.EnvVar{
|
||||
Name: "KUBERNETES_NODE",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
})
|
||||
for _, secret := range step.Secrets {
|
||||
sec, ok := engine.LookupSecret(spec, secret)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
optional := true
|
||||
to = append(to, v1.EnvVar{
|
||||
Name: secret.Env,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: sec.Metadata.UID,
|
||||
},
|
||||
Key: sec.Metadata.UID,
|
||||
Optional: &optional,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
// helper function converts the engine pull policy
|
||||
// to the kubernetes pull policy constant.
|
||||
func toPullPolicy(from engine.PullPolicy) v1.PullPolicy {
|
||||
switch from {
|
||||
case engine.PullAlways:
|
||||
return v1.PullAlways
|
||||
case engine.PullNever:
|
||||
return v1.PullNever
|
||||
case engine.PullIfNotExists:
|
||||
return v1.PullIfNotPresent
|
||||
default:
|
||||
return v1.PullIfNotPresent
|
||||
}
|
||||
}
|
||||
|
||||
// helper function converts the engine secret object
|
||||
// to the kubernetes secret object.
|
||||
func toSecret(spec *engine.Spec, from *engine.Secret) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: from.Metadata.UID,
|
||||
},
|
||||
Type: "Opaque",
|
||||
StringData: map[string]string{
|
||||
from.Metadata.UID: from.Data,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toConfigVolumes(spec *engine.Spec, step *engine.Step) []v1.Volume {
|
||||
var to []v1.Volume
|
||||
for _, mount := range step.Files {
|
||||
file, ok := engine.LookupFile(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
mode := int32(mount.Mode)
|
||||
volume := v1.Volume{Name: file.Metadata.UID}
|
||||
|
||||
optional := false
|
||||
volume.ConfigMap = &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: file.Metadata.UID,
|
||||
},
|
||||
Optional: &optional,
|
||||
Items: []v1.KeyToPath{
|
||||
{
|
||||
Key: file.Metadata.UID,
|
||||
Path: path.Base(mount.Path), // use the base path. document this.
|
||||
Mode: &mode,
|
||||
},
|
||||
},
|
||||
}
|
||||
to = append(to, volume)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
func toConfigMounts(spec *engine.Spec, step *engine.Step) []v1.VolumeMount {
|
||||
var to []v1.VolumeMount
|
||||
for _, mount := range step.Files {
|
||||
file, ok := engine.LookupFile(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
volume := v1.VolumeMount{
|
||||
Name: file.Metadata.UID,
|
||||
MountPath: path.Dir(mount.Path), // mount the config map here, using the base path
|
||||
}
|
||||
to = append(to, volume)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
func toVolumes(spec *engine.Spec, step *engine.Step) []v1.Volume {
|
||||
var to []v1.Volume
|
||||
for _, mount := range step.Volumes {
|
||||
vol, ok := engine.LookupVolume(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
volume := v1.Volume{Name: vol.Metadata.UID}
|
||||
source := v1.HostPathDirectoryOrCreate
|
||||
if vol.HostPath != nil {
|
||||
volume.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: vol.HostPath.Path,
|
||||
Type: &source,
|
||||
}
|
||||
}
|
||||
if vol.EmptyDir != nil {
|
||||
// volume.EmptyDir = &v1.EmptyDirVolumeSource{}
|
||||
|
||||
// NOTE the empty_dir cannot be shared across multiple
|
||||
// pods so we emulate its behavior, and mount a temp
|
||||
// directory on the host machine that can be shared
|
||||
// between pods. This means we are responsible for deleting
|
||||
// these directories.
|
||||
volume.HostPath = &v1.HostPathVolumeSource{
|
||||
Path: filepath.Join("/tmp", "drone", spec.Metadata.Namespace, vol.Metadata.UID),
|
||||
Type: &source,
|
||||
}
|
||||
}
|
||||
to = append(to, volume)
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
func toVolumeMounts(spec *engine.Spec, step *engine.Step) []v1.VolumeMount {
|
||||
var to []v1.VolumeMount
|
||||
for _, mount := range step.Volumes {
|
||||
vol, ok := engine.LookupVolume(spec, mount.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
to = append(to, v1.VolumeMount{
|
||||
Name: vol.Metadata.UID,
|
||||
MountPath: mount.Path,
|
||||
})
|
||||
}
|
||||
return to
|
||||
}
|
||||
|
||||
func toPorts(step *engine.Step) []v1.ContainerPort {
|
||||
if len(step.Docker.Ports) == 0 {
|
||||
return nil
|
||||
}
|
||||
var ports []v1.ContainerPort
|
||||
for _, port := range step.Docker.Ports {
|
||||
ports = append(ports, v1.ContainerPort{
|
||||
ContainerPort: int32(port.Port),
|
||||
})
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
// helper function returns a kubernetes namespace
|
||||
// for the given specification.
|
||||
func toNamespace(spec *engine.Spec) *v1.Namespace {
|
||||
return &v1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: spec.Metadata.Namespace,
|
||||
Labels: spec.Metadata.Labels,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toResources(step *engine.Step) v1.ResourceRequirements {
|
||||
var resources v1.ResourceRequirements
|
||||
if step.Resources != nil && step.Resources.Limits != nil {
|
||||
resources.Limits = v1.ResourceList{}
|
||||
if step.Resources.Limits.Memory > int64(0) {
|
||||
resources.Limits[v1.ResourceMemory] = *resource.NewQuantity(
|
||||
step.Resources.Limits.Memory, resource.BinarySI)
|
||||
}
|
||||
if step.Resources.Limits.CPU > int64(0) {
|
||||
resources.Limits[v1.ResourceCPU] = *resource.NewMilliQuantity(
|
||||
step.Resources.Limits.CPU, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
if step.Resources != nil && step.Resources.Requests != nil {
|
||||
resources.Requests = v1.ResourceList{}
|
||||
if step.Resources.Requests.Memory > int64(0) {
|
||||
resources.Requests[v1.ResourceMemory] = *resource.NewQuantity(
|
||||
step.Resources.Requests.Memory, resource.BinarySI)
|
||||
}
|
||||
if step.Resources.Requests.CPU > int64(0) {
|
||||
resources.Requests[v1.ResourceCPU] = *resource.NewMilliQuantity(
|
||||
step.Resources.Requests.CPU, resource.DecimalSI)
|
||||
}
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
// helper function returns a kubernetes pod for the
|
||||
// given step and specification.
|
||||
func toPod(spec *engine.Spec, step *engine.Step) *v1.Pod {
|
||||
var volumes []v1.Volume
|
||||
volumes = append(volumes, toVolumes(spec, step)...)
|
||||
volumes = append(volumes, toConfigVolumes(spec, step)...)
|
||||
|
||||
var mounts []v1.VolumeMount
|
||||
mounts = append(mounts, toVolumeMounts(spec, step)...)
|
||||
mounts = append(mounts, toConfigMounts(spec, step)...)
|
||||
|
||||
var pullSecrets []v1.LocalObjectReference
|
||||
if len(spec.Docker.Auths) > 0 {
|
||||
pullSecrets = []v1.LocalObjectReference{{
|
||||
Name: "docker-auth-config", // TODO move name to a const
|
||||
}}
|
||||
}
|
||||
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: step.Metadata.UID,
|
||||
Namespace: step.Metadata.Namespace,
|
||||
Labels: step.Metadata.Labels,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
AutomountServiceAccountToken: boolptr(false),
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{{
|
||||
Name: step.Metadata.UID,
|
||||
Image: step.Docker.Image,
|
||||
ImagePullPolicy: toPullPolicy(step.Docker.PullPolicy),
|
||||
Command: step.Docker.Command,
|
||||
Args: step.Docker.Args,
|
||||
WorkingDir: step.WorkingDir,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Privileged: &step.Docker.Privileged,
|
||||
},
|
||||
Env: toEnv(spec, step),
|
||||
VolumeMounts: mounts,
|
||||
Ports: toPorts(step),
|
||||
Resources: toResources(step),
|
||||
}},
|
||||
ImagePullSecrets: pullSecrets,
|
||||
Volumes: volumes,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// helper function returns a kubernetes service for the
|
||||
// given step and specification.
|
||||
func toService(spec *engine.Spec, step *engine.Step) *v1.Service {
|
||||
var ports []v1.ServicePort
|
||||
for _, p := range step.Docker.Ports {
|
||||
source := p.Port
|
||||
target := p.Host
|
||||
if target == 0 {
|
||||
target = source
|
||||
}
|
||||
ports = append(ports, v1.ServicePort{
|
||||
Port: int32(source),
|
||||
TargetPort: intstr.IntOrString{
|
||||
IntVal: int32(target),
|
||||
},
|
||||
})
|
||||
}
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: toDNS(step.Metadata.Name),
|
||||
Namespace: step.Metadata.Namespace,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"io.drone.step.name": step.Metadata.Name,
|
||||
},
|
||||
Ports: ports,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toDNS(i string) string {
|
||||
return strings.Replace(i, "_", "-", -1)
|
||||
}
|
||||
|
||||
func boolptr(v bool) *bool {
|
||||
return &v
|
||||
}
|
||||
|
||||
func stringptr(v string) *string {
|
||||
return &v
|
||||
}
|
@ -0,0 +1,62 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var defaultVolumeSize = resource.MustParse("5Gi")
|
||||
|
||||
func toPersistentVolume(node, namespace, name, path string) *v1.PersistentVolume {
|
||||
return &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: defaultVolumeSize,
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
|
||||
StorageClassName: "local-storage",
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
Path: path,
|
||||
},
|
||||
},
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
Required: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Values: []string{node},
|
||||
}},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func toPersistentVolumeClaim(namespace, name string) *v1.PersistentVolumeClaim {
|
||||
localStorageClass := "local-storage"
|
||||
|
||||
return &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteMany},
|
||||
StorageClassName: &localStorageClass,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: defaultVolumeSize,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
@ -0,0 +1,77 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// LookupVolume is a helper function that will lookup the
|
||||
// named volume.
|
||||
func LookupVolume(spec *Spec, name string) (*Volume, bool) {
|
||||
if spec.Docker == nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, vol := range spec.Docker.Volumes {
|
||||
if vol.Metadata.Name == name {
|
||||
return vol, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LookupSecret is a helper function that will lookup the
|
||||
// named secret.
|
||||
func LookupSecret(spec *Spec, secret *SecretVar) (*Secret, bool) {
|
||||
for _, sec := range spec.Secrets {
|
||||
if sec.Metadata.Name == secret.Name {
|
||||
return sec, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LookupFile is a helper function that will lookup the
|
||||
// named file.
|
||||
func LookupFile(spec *Spec, name string) (*File, bool) {
|
||||
for _, file := range spec.Files {
|
||||
if file.Metadata.Name == name {
|
||||
return file, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// LookupAuth is a helper function that will lookup the
|
||||
// docker credentials by hostname.
|
||||
func LookupAuth(spec *Spec, domain string) (*DockerAuth, bool) {
|
||||
if spec.Docker == nil {
|
||||
return nil, false
|
||||
}
|
||||
for _, auth := range spec.Docker.Auths {
|
||||
host := auth.Address
|
||||
|
||||
// the auth address could be a fully qualified
|
||||
// url in which case, we should parse so we can
|
||||
// extract the domain name.
|
||||
if strings.HasPrefix(host, "http://") ||
|
||||
strings.HasPrefix(host, "https://") {
|
||||
uri, err := url.Parse(auth.Address)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
host = uri.Host
|
||||
}
|
||||
|
||||
// we need to account for the legacy docker
|
||||
// index domain name, which should match the
|
||||
// normalized domain name.
|
||||
if host == "index.docker.io" {
|
||||
host = "docker.io"
|
||||
}
|
||||
|
||||
if host == domain {
|
||||
return auth, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
@ -0,0 +1,169 @@
|
||||
package engine
|
||||
|
||||
import "testing"
|
||||
|
||||
//
|
||||
// File Lookup Tests
|
||||
//
|
||||
|
||||
func TestLookupFile(t *testing.T) {
|
||||
want := &File{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Files: []*File{want},
|
||||
}
|
||||
got, ok := LookupFile(spec, "foo")
|
||||
if !ok {
|
||||
t.Errorf("Expect file found")
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("Expect file returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupFile_NotFound(t *testing.T) {
|
||||
want := &File{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Files: []*File{want},
|
||||
}
|
||||
got, ok := LookupFile(spec, "bar")
|
||||
if ok {
|
||||
t.Errorf("Expect file not found")
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("Expect file not returned")
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Secret Lookup Tests
|
||||
//
|
||||
|
||||
func TestLookupSecret(t *testing.T) {
|
||||
want := &Secret{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Secrets: []*Secret{want},
|
||||
}
|
||||
got, ok := LookupSecret(spec, &SecretVar{Name: "foo"})
|
||||
if !ok {
|
||||
t.Errorf("Expect secret found")
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("Expect secret returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupSecret_NotFound(t *testing.T) {
|
||||
want := &Secret{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Secrets: []*Secret{want},
|
||||
}
|
||||
got, ok := LookupSecret(spec, &SecretVar{Name: "bar"})
|
||||
if ok {
|
||||
t.Errorf("Expect volume not found")
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("Expect volume not returned")
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Volume Lookup Tests
|
||||
//
|
||||
|
||||
func TestLookupVolume(t *testing.T) {
|
||||
want := &Volume{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Docker: &DockerConfig{
|
||||
Volumes: []*Volume{want},
|
||||
},
|
||||
}
|
||||
got, ok := LookupVolume(spec, "foo")
|
||||
if !ok {
|
||||
t.Errorf("Expect volume found")
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("Expect volume returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupVolume_NotFound(t *testing.T) {
|
||||
volume := &Volume{Metadata: Metadata{Name: "foo"}}
|
||||
spec := &Spec{
|
||||
Docker: &DockerConfig{
|
||||
Volumes: []*Volume{volume},
|
||||
},
|
||||
}
|
||||
got, ok := LookupVolume(spec, "bar")
|
||||
if ok {
|
||||
t.Errorf("Expect volume not found")
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("Expect volume not returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupVolume_NotDocker(t *testing.T) {
|
||||
_, ok := LookupVolume(&Spec{}, "foo")
|
||||
if ok {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Auth Lookup Tests
|
||||
//
|
||||
|
||||
func TestLookupAuth(t *testing.T) {
|
||||
tests := []string{"docker.io", "index.docker.io", "https://index.docker.io/v1", "http://docker.io/v2"}
|
||||
for _, test := range tests {
|
||||
want := &DockerAuth{Address: test}
|
||||
spec := &Spec{
|
||||
Docker: &DockerConfig{
|
||||
Auths: []*DockerAuth{want},
|
||||
},
|
||||
}
|
||||
got, ok := LookupAuth(spec, "docker.io")
|
||||
if !ok {
|
||||
t.Errorf("Expect auth found for %s", test)
|
||||
}
|
||||
if got != want {
|
||||
t.Errorf("Expect auth returned for %s", test)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupAuth_NotFound(t *testing.T) {
|
||||
want := &DockerAuth{Address: "gcr.io"}
|
||||
spec := &Spec{
|
||||
Docker: &DockerConfig{
|
||||
Auths: []*DockerAuth{want},
|
||||
},
|
||||
}
|
||||
got, ok := LookupAuth(spec, "docker.io")
|
||||
if ok {
|
||||
t.Errorf("Expect auth not found")
|
||||
}
|
||||
if got != nil {
|
||||
t.Errorf("Expect auth not returned")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupAuth_NotDocker(t *testing.T) {
|
||||
_, ok := LookupAuth(&Spec{}, "foo")
|
||||
if ok {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestLookupAuth_InvalidRegistry(t *testing.T) {
|
||||
want := &DockerAuth{Address: "http://192.168.0.%31"}
|
||||
spec := &Spec{
|
||||
Docker: &DockerConfig{
|
||||
Auths: []*DockerAuth{want},
|
||||
},
|
||||
}
|
||||
_, ok := LookupAuth(spec, "192.168.0.%31")
|
||||
if ok {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
@ -0,0 +1,110 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: engine.go
|
||||
|
||||
// Package mock_engine is a generated GoMock package.
|
||||
package mock_engine
|
||||
|
||||
import (
|
||||
context "context"
|
||||
engine "github.com/drone/drone-runtime/engine"
|
||||
gomock "github.com/golang/mock/gomock"
|
||||
io "io"
|
||||
reflect "reflect"
|
||||
)
|
||||
|
||||
// MockEngine is a mock of Engine interface
|
||||
type MockEngine struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockEngineMockRecorder
|
||||
}
|
||||
|
||||
// MockEngineMockRecorder is the mock recorder for MockEngine
|
||||
type MockEngineMockRecorder struct {
|
||||
mock *MockEngine
|
||||
}
|
||||
|
||||
// NewMockEngine creates a new mock instance
|
||||
func NewMockEngine(ctrl *gomock.Controller) *MockEngine {
|
||||
mock := &MockEngine{ctrl: ctrl}
|
||||
mock.recorder = &MockEngineMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use
|
||||
func (m *MockEngine) EXPECT() *MockEngineMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// Setup mocks base method
|
||||
func (m *MockEngine) Setup(arg0 context.Context, arg1 *engine.Spec) error {
|
||||
ret := m.ctrl.Call(m, "Setup", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Setup indicates an expected call of Setup
|
||||
func (mr *MockEngineMockRecorder) Setup(arg0, arg1 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Setup", reflect.TypeOf((*MockEngine)(nil).Setup), arg0, arg1)
|
||||
}
|
||||
|
||||
// Create mocks base method
|
||||
func (m *MockEngine) Create(arg0 context.Context, arg1 *engine.Spec, arg2 *engine.Step) error {
|
||||
ret := m.ctrl.Call(m, "Create", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Create indicates an expected call of Create
|
||||
func (mr *MockEngineMockRecorder) Create(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Create", reflect.TypeOf((*MockEngine)(nil).Create), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Start mocks base method
|
||||
func (m *MockEngine) Start(arg0 context.Context, arg1 *engine.Spec, arg2 *engine.Step) error {
|
||||
ret := m.ctrl.Call(m, "Start", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Start indicates an expected call of Start
|
||||
func (mr *MockEngineMockRecorder) Start(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Start", reflect.TypeOf((*MockEngine)(nil).Start), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Wait mocks base method
|
||||
func (m *MockEngine) Wait(arg0 context.Context, arg1 *engine.Spec, arg2 *engine.Step) (*engine.State, error) {
|
||||
ret := m.ctrl.Call(m, "Wait", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(*engine.State)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Wait indicates an expected call of Wait
|
||||
func (mr *MockEngineMockRecorder) Wait(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Wait", reflect.TypeOf((*MockEngine)(nil).Wait), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Tail mocks base method
|
||||
func (m *MockEngine) Tail(arg0 context.Context, arg1 *engine.Spec, arg2 *engine.Step) (io.ReadCloser, error) {
|
||||
ret := m.ctrl.Call(m, "Tail", arg0, arg1, arg2)
|
||||
ret0, _ := ret[0].(io.ReadCloser)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// Tail indicates an expected call of Tail
|
||||
func (mr *MockEngineMockRecorder) Tail(arg0, arg1, arg2 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Tail", reflect.TypeOf((*MockEngine)(nil).Tail), arg0, arg1, arg2)
|
||||
}
|
||||
|
||||
// Destroy mocks base method
|
||||
func (m *MockEngine) Destroy(arg0 context.Context, arg1 *engine.Spec) error {
|
||||
ret := m.ctrl.Call(m, "Destroy", arg0, arg1)
|
||||
ret0, _ := ret[0].(error)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// Destroy indicates an expected call of Destroy
|
||||
func (mr *MockEngineMockRecorder) Destroy(arg0, arg1 interface{}) *gomock.Call {
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Destroy", reflect.TypeOf((*MockEngine)(nil).Destroy), arg0, arg1)
|
||||
}
|
@ -0,0 +1,35 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse parses the pipeline config from an io.Reader.
|
||||
func Parse(r io.Reader) (*Spec, error) {
|
||||
cfg := Spec{}
|
||||
err := json.NewDecoder(r).Decode(&cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// ParseFile parses the pipeline config from a file.
|
||||
func ParseFile(path string) (*Spec, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
return Parse(f)
|
||||
}
|
||||
|
||||
// ParseString parses the pipeline config from a string.
|
||||
func ParseString(s string) (*Spec, error) {
|
||||
return Parse(
|
||||
strings.NewReader(s),
|
||||
)
|
||||
}
|
@ -0,0 +1,180 @@
|
||||
package engine
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
)
|
||||
|
||||
func TestParse(t *testing.T) {
|
||||
spec, err := ParseString(mockSpecJSON)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
if diff := cmp.Diff(mockSpec, spec); diff != "" {
|
||||
t.Errorf("Unxpected Parse results")
|
||||
t.Log(diff)
|
||||
}
|
||||
|
||||
_, err = ParseString("[]")
|
||||
if err == nil {
|
||||
t.Errorf("Want parse error, got nil")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseFile(t *testing.T) {
|
||||
f, err := ioutil.TempFile(os.TempDir(), "drone")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer os.Remove(f.Name())
|
||||
f.WriteString(mockSpecJSON)
|
||||
f.Close()
|
||||
|
||||
_, err = ParseFile(f.Name())
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = ParseFile("/tmp/this/path/does/not/exist")
|
||||
if err == nil {
|
||||
t.Errorf("Want parse error, got nil")
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
// when the test package initializes, encode
|
||||
// the spec and snapshot the value.
|
||||
data, _ := json.Marshal(mockSpec)
|
||||
mockSpecJSON = string(data)
|
||||
}
|
||||
|
||||
var mockSpecJSON string
|
||||
|
||||
// this is a sample runtime specification file.
|
||||
var mockSpec = &Spec{
|
||||
Metadata: Metadata{
|
||||
UID: "metadata.uid",
|
||||
Namespace: "metadata.namespace",
|
||||
Name: "metadata.name",
|
||||
Labels: map[string]string{
|
||||
"metadata.labels.key": "metadata.labels.value",
|
||||
},
|
||||
},
|
||||
Platform: Platform{
|
||||
OS: "platform.os",
|
||||
Arch: "platform.arch",
|
||||
Version: "platform.version",
|
||||
Variant: "platform.variant",
|
||||
},
|
||||
Secrets: []*Secret{
|
||||
{
|
||||
Metadata: Metadata{Name: "secrets.1.name"},
|
||||
Data: "secrets.1.data",
|
||||
},
|
||||
},
|
||||
Files: []*File{
|
||||
{
|
||||
Metadata: Metadata{Name: "files.1.name"},
|
||||
Data: []byte("files.1.data"),
|
||||
},
|
||||
},
|
||||
Docker: &DockerConfig{
|
||||
Volumes: []*Volume{
|
||||
{
|
||||
Metadata: Metadata{
|
||||
UID: "volumes.1.metadata.uid",
|
||||
Namespace: "volumes.1.metadata.namespace",
|
||||
Name: "volumes.1.metadata.name",
|
||||
Labels: map[string]string{
|
||||
"volumes.1.metadata.labels.key": "volumes.1.metadata.labels.value",
|
||||
},
|
||||
},
|
||||
EmptyDir: &VolumeEmptyDir{},
|
||||
},
|
||||
{
|
||||
Metadata: Metadata{
|
||||
UID: "volumes.2.metadata.uid",
|
||||
Namespace: "volumes.2.metadata.namespace",
|
||||
Name: "volumes.2.metadata.name",
|
||||
Labels: map[string]string{
|
||||
"volumes.2.metadata.labels.key": "volumes.2.metadata.labels.value",
|
||||
},
|
||||
},
|
||||
HostPath: &VolumeHostPath{
|
||||
Path: "volumes.2.host.path",
|
||||
},
|
||||
},
|
||||
},
|
||||
Auths: []*DockerAuth{
|
||||
{
|
||||
Address: "auths.1.address",
|
||||
Username: "auths.1.username",
|
||||
Password: "auths.1.password",
|
||||
},
|
||||
},
|
||||
},
|
||||
Steps: []*Step{
|
||||
{
|
||||
Metadata: Metadata{
|
||||
UID: "steps.1.metadata.uid",
|
||||
Namespace: "steps.1.metadata.namespace",
|
||||
Name: "steps.1.metadata.name",
|
||||
Labels: map[string]string{
|
||||
"steps.1.metadata.labesl.key": "steps.1.metadata.labels.value",
|
||||
},
|
||||
},
|
||||
Detach: true,
|
||||
DependsOn: []string{"steps.1.depends_on.1"},
|
||||
Docker: &DockerStep{
|
||||
Args: []string{"steps.1.args.1"},
|
||||
Command: []string{"steps.1.command.1"},
|
||||
Image: "steps.1.image",
|
||||
Networks: []string{"steps.1.network"},
|
||||
Ports: []*Port{
|
||||
{
|
||||
Port: 3306,
|
||||
Host: 3307,
|
||||
Protocol: "TPC",
|
||||
},
|
||||
},
|
||||
Privileged: true,
|
||||
PullPolicy: PullIfNotExists,
|
||||
},
|
||||
Envs: map[string]string{
|
||||
"steps.1.envs.key": "steps.1.envs.value",
|
||||
},
|
||||
Files: []*FileMount{
|
||||
{
|
||||
Name: "steps.1.files.1.name",
|
||||
Path: "steps.1.files.1.path",
|
||||
},
|
||||
},
|
||||
IgnoreErr: true,
|
||||
IgnoreStdout: true,
|
||||
IgnoreStderr: true,
|
||||
Resources: &Resources{},
|
||||
RunPolicy: RunAlways,
|
||||
Secrets: []*SecretVar{
|
||||
{
|
||||
Name: "steps.1.secrets.1.name",
|
||||
Env: "steps.1.secrets.1.env",
|
||||
},
|
||||
},
|
||||
Volumes: []*VolumeMount{
|
||||
{
|
||||
Name: "steps.1.volumes.1.name",
|
||||
Path: "steps.1.volumes.1.path",
|
||||
},
|
||||
},
|
||||
WorkingDir: "steps.1.working_dir",
|
||||
},
|
||||
},
|
||||
}
|
@ -0,0 +1,19 @@
|
||||
// +build !linux
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
// Symbol the symbol name used to lookup the plugin provider value.
|
||||
const Symbol = "Engine"
|
||||
|
||||
// Open returns a Engine dynamically loaded from a plugin.
|
||||
func Open(path string) (engine.Engine, error) {
|
||||
panic(
|
||||
errors.New("unsupported operating system"),
|
||||
)
|
||||
}
|
@ -0,0 +1,25 @@
|
||||
// +build go1.8,linux
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"plugin"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
// Symbol the symbol name used to lookup the plugin provider value.
|
||||
const Symbol = "Engine"
|
||||
|
||||
// Open returns a Factory dynamically loaded from a plugin.
|
||||
func Open(path string) (engine.Engine, error) {
|
||||
lib, err := plugin.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
provider, err := lib.Lookup(Symbol)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return provider.(func() (engine.Engine, error))()
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
package engine
|
||||
|
||||
type (
|
||||
// Metadata provides execution metadata.
|
||||
Metadata struct {
|
||||
UID string `json:"uid,omitempty"`
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
}
|
||||
|
||||
// Spec provides the pipeline spec. This provides the
|
||||
// required instructions for reproducable pipeline
|
||||
// execution.
|
||||
Spec struct {
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
Platform Platform `json:"platform,omitempty"`
|
||||
Secrets []*Secret `json:"secrets,omitempty"`
|
||||
Steps []*Step `json:"steps,omitempty"`
|
||||
Files []*File `json:"files,omitempty"`
|
||||
|
||||
// Docker-specific settings. These settings are
|
||||
// only used by the Docker and Kubernetes runtime
|
||||
// drivers.
|
||||
Docker *DockerConfig `json:"docker,omitempty"`
|
||||
|
||||
// Qemu-specific settings. These settings are only
|
||||
// used by the qemu runtime driver.
|
||||
Qemu *QemuConfig `json:"qemu,omitempty"`
|
||||
|
||||
// VMWare Fusion settings. These settings are only
|
||||
// used by the VMWare runtime driver.
|
||||
Fusion *FusionConfig `json:"fusion,omitempty"`
|
||||
}
|
||||
|
||||
// Step defines a pipeline step.
|
||||
Step struct {
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
Detach bool `json:"detach,omitempty"`
|
||||
DependsOn []string `json:"depends_on,omitempty"`
|
||||
Devices []*VolumeDevice `json:"devices,omitempty"`
|
||||
Envs map[string]string `json:"environment,omitempty"`
|
||||
Files []*FileMount `json:"files,omitempty"`
|
||||
IgnoreErr bool `json:"ignore_err,omitempty"`
|
||||
IgnoreStdout bool `json:"ignore_stderr,omitempty"`
|
||||
IgnoreStderr bool `json:"ignore_stdout,omitempty"`
|
||||
Resources *Resources `json:"resources,omitempty"`
|
||||
RunPolicy RunPolicy `json:"run_policy,omitempty"`
|
||||
Secrets []*SecretVar `json:"secrets,omitempty"`
|
||||
Volumes []*VolumeMount `json:"volumes,omitempty"`
|
||||
WorkingDir string `json:"working_dir,omitempty"`
|
||||
|
||||
// Docker-specific settings. These settings are
|
||||
// only used by the Docker and Kubernetes runtime
|
||||
// drivers.
|
||||
Docker *DockerStep `json:"docker,omitempty"`
|
||||
}
|
||||
|
||||
// DockerAuth defines dockerhub authentication credentials.
|
||||
DockerAuth struct {
|
||||
Address string `json:"address,omitempty"`
|
||||
Username string `json:"username,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
}
|
||||
|
||||
// DockerConfig configures a Docker-based pipeline.
|
||||
DockerConfig struct {
|
||||
Auths []*DockerAuth `json:"auths,omitempty"`
|
||||
Volumes []*Volume `json:"volumes,omitempty"`
|
||||
}
|
||||
|
||||
// DockerStep configures a docker step.
|
||||
DockerStep struct {
|
||||
Args []string `json:"args,omitempty"`
|
||||
Command []string `json:"command,omitempty"`
|
||||
DNS []string `json:"dns,omitempty"`
|
||||
DNSSearch []string `json:"dns_search,omitempty"`
|
||||
ExtraHosts []string `json:"extra_hosts,omitempty"`
|
||||
Image string `json:"image,omitempty"`
|
||||
Networks []string `json:"networks,omitempty"`
|
||||
Ports []*Port `json:"ports,omitempty"`
|
||||
Privileged bool `json:"privileged,omitempty"`
|
||||
PullPolicy PullPolicy `json:"pull_policy,omitempty"`
|
||||
}
|
||||
|
||||
// File defines a file that should be uploaded or
|
||||
// mounted somewhere in the step container or virtual
|
||||
// machine prior to command execution.
|
||||
File struct {
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
Data []byte `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// FileMount defines how a file resource should be
|
||||
// mounted or included in the runtime environment.
|
||||
FileMount struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
Mode int64 `json:"mode,omitempty"`
|
||||
|
||||
// Base string `json:"base,omitempty"`
|
||||
}
|
||||
|
||||
// FusionConfig configures a VMWare Fusion-based pipeline.
|
||||
FusionConfig struct {
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
// Platform defines the target platform.
|
||||
Platform struct {
|
||||
OS string `json:"os,omitempty"`
|
||||
Arch string `json:"arch,omitempty"`
|
||||
Variant string `json:"variant,omitempty"`
|
||||
Version string `json:"version,omitempty"`
|
||||
}
|
||||
|
||||
// Port represents a network port in a single container.
|
||||
Port struct {
|
||||
Port int `json:"port,omitempty"`
|
||||
Host int `json:"host,omitempty"`
|
||||
Protocol string `json:"protocol,omitempty"`
|
||||
}
|
||||
|
||||
// QemuConfig configures a Qemu-based pipeline.
|
||||
QemuConfig struct {
|
||||
Image string `json:"image,omitempty"`
|
||||
}
|
||||
|
||||
// Resources describes the compute resource
|
||||
// requirements.
|
||||
Resources struct {
|
||||
// Limits describes the maximum amount of compute
|
||||
// resources allowed.
|
||||
Limits *ResourceObject `json:"limits,omitempty"`
|
||||
|
||||
// Requests describes the minimum amount of
|
||||
// compute resources required.
|
||||
Requests *ResourceObject `json:"requests,omitempty"`
|
||||
}
|
||||
|
||||
// ResourceObject describes compute resource
|
||||
// requirements.
|
||||
ResourceObject struct {
|
||||
CPU int64 `json:"cpu,omitempty"`
|
||||
Memory int64 `json:"memory,omitempty"`
|
||||
}
|
||||
|
||||
// Secret represents a secret variable.
|
||||
Secret struct {
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
Data string `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// SecretVar represents an environment variable
|
||||
// sources from a secret.
|
||||
SecretVar struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Env string `json:"env,omitempty"`
|
||||
}
|
||||
|
||||
// State represents the container state.
|
||||
State struct {
|
||||
ExitCode int // Container exit code
|
||||
Exited bool // Container exited
|
||||
OOMKilled bool // Container is oom killed
|
||||
}
|
||||
|
||||
// Volume that can be mounted by containers.
|
||||
Volume struct {
|
||||
Metadata Metadata `json:"metadata,omitempty"`
|
||||
EmptyDir *VolumeEmptyDir `json:"temp,omitempty"`
|
||||
HostPath *VolumeHostPath `json:"host,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeDevice describes a mapping of a raw block
|
||||
// device within a container.
|
||||
VolumeDevice struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
DevicePath string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeMount describes a mounting of a Volume
|
||||
// within a container.
|
||||
VolumeMount struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeEmptyDir mounts a temporary directory from the
|
||||
// host node's filesystem into the container. This can
|
||||
// be used as a shared scratch space.
|
||||
VolumeEmptyDir struct {
|
||||
Medium string `json:"medium,omitempty"`
|
||||
SizeLimit int64 `json:"size_limit,omitempty"`
|
||||
}
|
||||
|
||||
// VolumeHostPath mounts a file or directory from the
|
||||
// host node's filesystem into your container.
|
||||
VolumeHostPath struct {
|
||||
Path string `json:"path,omitempty"`
|
||||
}
|
||||
)
|
@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/mattn/go-isatty"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/drone/drone-runtime/engine/docker"
|
||||
"github.com/drone/drone-runtime/engine/docker/auth"
|
||||
"github.com/drone/drone-runtime/engine/kube"
|
||||
"github.com/drone/drone-runtime/runtime"
|
||||
"github.com/drone/drone-runtime/runtime/term"
|
||||
"github.com/drone/signal"
|
||||
)
|
||||
|
||||
var tty = isatty.IsTerminal(os.Stdout.Fd())
|
||||
|
||||
func main() {
|
||||
c := flag.String("config", "", "")
|
||||
k := flag.String("kube-config", "", "")
|
||||
u := flag.String("kube-url", "", "")
|
||||
n := flag.String("kube-node", "", "")
|
||||
d := flag.Bool("kube-debug", false, "")
|
||||
t := flag.Duration("timeout", time.Hour, "")
|
||||
h := flag.Bool("help", false, "")
|
||||
|
||||
flag.BoolVar(h, "h", false, "")
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if *h {
|
||||
flag.Usage()
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
var source string
|
||||
if flag.NArg() > 0 {
|
||||
source = flag.Args()[0]
|
||||
}
|
||||
|
||||
config, err := engine.ParseFile(source)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
if *c != "" {
|
||||
auths, err := auth.ParseFile(*c)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
config.Docker.Auths = append(config.Docker.Auths, auths...)
|
||||
}
|
||||
|
||||
if *d == true {
|
||||
println(kube.Print(config))
|
||||
return
|
||||
}
|
||||
|
||||
var engine engine.Engine
|
||||
if *k == "" {
|
||||
engine, err = docker.NewEnv()
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
} else {
|
||||
engine, err = kube.NewFile(*u, *k, *n)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
hooks := &runtime.Hook{}
|
||||
hooks.GotLine = term.WriteLine(os.Stdout)
|
||||
if tty {
|
||||
hooks.GotLine = term.WriteLinePretty(os.Stdout)
|
||||
}
|
||||
|
||||
r := runtime.New(
|
||||
runtime.WithEngine(engine),
|
||||
runtime.WithConfig(config),
|
||||
runtime.WithHooks(hooks),
|
||||
)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), *t)
|
||||
ctx = signal.WithContext(ctx)
|
||||
defer cancel()
|
||||
|
||||
err = r.Run(ctx)
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Println(`Usage: drone-runtime [OPTION]... [SOURCE]
|
||||
--config loads a docker config.json file
|
||||
--plugin loads a runtime engine from a .so file
|
||||
--kube-config loads a kubernetes config file
|
||||
--kube-url sets a kubernetes endpoint
|
||||
--kube-debug writes a kubernetes configuration to stdout
|
||||
--timeout sets an execution timeout
|
||||
-h, --help display this help and exit`)
|
||||
}
|
@ -0,0 +1,45 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrSkip is used as a return value when container
|
||||
// execution should be skipped at runtime. It is not
|
||||
// returned as an error by any function.
|
||||
ErrSkip = errors.New("Skipped")
|
||||
|
||||
// ErrCancel is used as a return value when the
|
||||
// container execution receives a cancellation signal
|
||||
// from the context.
|
||||
ErrCancel = errors.New("Cancelled")
|
||||
|
||||
// ErrInterrupt is used to signal an interrupt and
|
||||
// gracefully exit the runtime execution.
|
||||
ErrInterrupt = errors.New("Interrupt")
|
||||
)
|
||||
|
||||
// An ExitError reports an unsuccessful exit.
|
||||
type ExitError struct {
|
||||
Name string
|
||||
Code int
|
||||
}
|
||||
|
||||
// Error returns the error message in string format.
|
||||
func (e *ExitError) Error() string {
|
||||
return fmt.Sprintf("%s : exit code %d", e.Name, e.Code)
|
||||
}
|
||||
|
||||
// An OomError reports the process received an OOMKill from
|
||||
// the kernel.
|
||||
type OomError struct {
|
||||
Name string
|
||||
Code int
|
||||
}
|
||||
|
||||
// Error reteurns the error message in string format.
|
||||
func (e *OomError) Error() string {
|
||||
return fmt.Sprintf("%s : received oom kill", e.Name)
|
||||
}
|
@ -0,0 +1,26 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestExitError(t *testing.T) {
|
||||
err := ExitError{
|
||||
Name: "build",
|
||||
Code: 255,
|
||||
}
|
||||
got, want := err.Error(), "build : exit code 255"
|
||||
if got != want {
|
||||
t.Errorf("Want error message %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestOomError(t *testing.T) {
|
||||
err := OomError{
|
||||
Name: "build",
|
||||
}
|
||||
got, want := err.Error(), "build : received oom kill"
|
||||
if got != want {
|
||||
t.Errorf("Want error message %q, got %q", want, got)
|
||||
}
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
package runtime
|
||||
|
||||
import "github.com/drone/drone-runtime/engine"
|
||||
|
||||
type status struct {
|
||||
step *engine.Step
|
||||
state *engine.State
|
||||
}
|
||||
|
||||
// isSerial returns true if the steps are to be executed
|
||||
// in serial mode, with no graph dependencies defined.
|
||||
func isSerial(spec *engine.Spec) bool {
|
||||
for _, step := range spec.Steps {
|
||||
// if a single dependency is defined we can exit
|
||||
// and return false.
|
||||
if len(step.DependsOn) != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// nextStep returns the next step in the dependency graph.
|
||||
// If no steps are ready for execution, a nil value is
|
||||
// returned.
|
||||
func nextStep(spec *engine.Spec, states map[string]*status) *engine.Step {
|
||||
LOOP:
|
||||
for _, step := range spec.Steps {
|
||||
// if the step has already stated, move to the
|
||||
// next step in the list.
|
||||
state := states[step.Metadata.Name]
|
||||
if state.state != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the step has zero dependencies and has not
|
||||
// started, it can be started immediately.
|
||||
if len(step.DependsOn) == 0 {
|
||||
return step
|
||||
}
|
||||
// if the step has dependencies, we check to ensure
|
||||
// all dependent steps are complete. If no, move on
|
||||
// to test the next step.
|
||||
for _, name := range step.DependsOn {
|
||||
state, ok := states[name]
|
||||
// if the dependency does not exist in the
|
||||
// state map it is considered fulfilled to
|
||||
// avoid deadlock.
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
// if the dependency is running in detached
|
||||
// mode it is considered fulfilled to avoid
|
||||
// deadlock.
|
||||
if state.step.Detach {
|
||||
continue
|
||||
}
|
||||
// if the dependency is skipped (never executed)
|
||||
// it is considered fulfilled to avoid deadlock.
|
||||
if state.step.RunPolicy == engine.RunNever {
|
||||
continue
|
||||
}
|
||||
// if the dependency has not executed, the step
|
||||
// is not ready for execution. Break to the
|
||||
// next step in the specification list.
|
||||
if state.state == nil || state.state.Exited == false {
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
// if all dependencies are completed, the step
|
||||
// can be started.
|
||||
return step
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,24 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
func TestIsSerial(t *testing.T) {
|
||||
spec := &engine.Spec{
|
||||
Steps: []*engine.Step{
|
||||
{Metadata: engine.Metadata{Name: "build"}},
|
||||
{Metadata: engine.Metadata{Name: "test"}},
|
||||
},
|
||||
}
|
||||
if isSerial(spec) == false {
|
||||
t.Errorf("Expect is serial true")
|
||||
}
|
||||
|
||||
spec.Steps[1].DependsOn = []string{"build"}
|
||||
if isSerial(spec) == true {
|
||||
t.Errorf("Expect is serial false")
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package runtime
|
||||
|
||||
// Hook provides a set of hooks to run at various stages of
|
||||
// runtime execution.
|
||||
type Hook struct {
|
||||
// Before is called before all all steps are executed.
|
||||
Before func(*State) error
|
||||
|
||||
// BeforeEach is called before each step is executed.
|
||||
BeforeEach func(*State) error
|
||||
|
||||
// After is called after all steps are executed.
|
||||
After func(*State) error
|
||||
|
||||
// AfterEach is called after each step is executed.
|
||||
AfterEach func(*State) error
|
||||
|
||||
// GotLine is called when a line is logged.
|
||||
GotLine func(*State, *Line) error
|
||||
|
||||
// GotLogs is called when the logs are completed.
|
||||
GotLogs func(*State, []*Line) error
|
||||
}
|
@ -0,0 +1,102 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
// Line represents a line in the container logs.
|
||||
type Line struct {
|
||||
Number int `json:"pos,omitempty"`
|
||||
Message string `json:"out,omitempty"`
|
||||
Timestamp int64 `json:"time,omitempty"`
|
||||
}
|
||||
|
||||
type lineWriter struct {
|
||||
num int
|
||||
now time.Time
|
||||
rep *strings.Replacer
|
||||
state *State
|
||||
lines []*Line
|
||||
size int
|
||||
limit int
|
||||
}
|
||||
|
||||
func newWriter(state *State) *lineWriter {
|
||||
w := &lineWriter{}
|
||||
w.num = 0
|
||||
w.now = time.Now().UTC()
|
||||
w.state = state
|
||||
w.rep = newReplacer(state.config.Secrets)
|
||||
w.limit = 5242880 // 5MB max log size
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *lineWriter) Write(p []byte) (n int, err error) {
|
||||
// if the maximum log size has been exceeded, the
|
||||
// log entry is silently ignored.
|
||||
if w.size >= w.limit {
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
out := string(p)
|
||||
if w.rep != nil {
|
||||
out = w.rep.Replace(out)
|
||||
}
|
||||
|
||||
parts := []string{out}
|
||||
|
||||
// kubernetes buffers the output and may combine
|
||||
// multiple lines into a single block of output.
|
||||
// Split into multiple lines.
|
||||
//
|
||||
// note that docker output always inclines a line
|
||||
// feed marker. This needs to be accounted for when
|
||||
// splitting the output into multiple lines.
|
||||
if strings.Contains(strings.TrimSuffix(out, "\n"), "\n") {
|
||||
parts = strings.SplitAfter(out, "\n")
|
||||
}
|
||||
|
||||
for _, part := range parts {
|
||||
line := &Line{
|
||||
Number: w.num,
|
||||
Message: part,
|
||||
Timestamp: int64(time.Since(w.now).Seconds()),
|
||||
}
|
||||
|
||||
if w.state.hook.GotLine != nil {
|
||||
w.state.hook.GotLine(w.state, line)
|
||||
}
|
||||
w.size = w.size + len(part)
|
||||
w.num++
|
||||
|
||||
w.lines = append(w.lines, line)
|
||||
}
|
||||
|
||||
// if the write exceeds the maximum output we should
|
||||
// write a single line to the end of the logs that
|
||||
// indicates the output is being truncated.
|
||||
if w.size >= w.limit {
|
||||
w.lines = append(w.lines, &Line{
|
||||
Number: w.num,
|
||||
Message: "warning: maximum output exceeded",
|
||||
Timestamp: int64(time.Since(w.now).Seconds()),
|
||||
})
|
||||
}
|
||||
|
||||
return len(p), nil
|
||||
}
|
||||
|
||||
func newReplacer(secrets []*engine.Secret) *strings.Replacer {
|
||||
var oldnew []string
|
||||
for _, secret := range secrets {
|
||||
oldnew = append(oldnew, secret.Data)
|
||||
oldnew = append(oldnew, "********")
|
||||
}
|
||||
if len(oldnew) == 0 {
|
||||
return nil
|
||||
}
|
||||
return strings.NewReplacer(oldnew...)
|
||||
}
|
@ -0,0 +1,112 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
func TestLineWriter(t *testing.T) {
|
||||
line := &Line{}
|
||||
hook := &Hook{}
|
||||
state := &State{}
|
||||
|
||||
hook.GotLine = func(_ *State, l *Line) error {
|
||||
line = l
|
||||
return nil
|
||||
}
|
||||
state.hook = hook
|
||||
state.Step = &engine.Step{}
|
||||
state.config = &engine.Spec{}
|
||||
state.config.Secrets = []*engine.Secret{
|
||||
{Metadata: engine.Metadata{Name: "foo"}, Data: "bar"},
|
||||
}
|
||||
|
||||
newWriter(state).Write([]byte("foobar"))
|
||||
|
||||
if line == nil {
|
||||
t.Error("Expect LineFunc invoked")
|
||||
}
|
||||
if got, want := line.Message, "foo********"; got != want {
|
||||
t.Errorf("Got line %q, want %q", got, want)
|
||||
}
|
||||
if got, want := line.Number, 0; got != want {
|
||||
t.Errorf("Got line %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineWriterSingle(t *testing.T) {
|
||||
line := &Line{}
|
||||
hook := &Hook{}
|
||||
state := &State{}
|
||||
|
||||
hook.GotLine = func(_ *State, l *Line) error {
|
||||
line = l
|
||||
return nil
|
||||
}
|
||||
state.hook = hook
|
||||
state.Step = &engine.Step{}
|
||||
state.config = &engine.Spec{}
|
||||
|
||||
lw := newWriter(state)
|
||||
lw.num = 5
|
||||
lw.Write([]byte("foo\n"))
|
||||
|
||||
if line == nil {
|
||||
t.Error("Expect LineFunc invoked")
|
||||
}
|
||||
if got, want := line.Message, "foo\n"; got != want {
|
||||
t.Errorf("Got line %q, want %q", got, want)
|
||||
}
|
||||
if got, want := line.Number, 5; got != want {
|
||||
t.Errorf("Got line %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineWriterMulti(t *testing.T) {
|
||||
var lines []*Line
|
||||
hook := &Hook{}
|
||||
state := &State{}
|
||||
|
||||
hook.GotLine = func(_ *State, l *Line) error {
|
||||
lines = append(lines, l)
|
||||
return nil
|
||||
}
|
||||
state.hook = hook
|
||||
state.Step = &engine.Step{}
|
||||
state.config = &engine.Spec{}
|
||||
|
||||
newWriter(state).Write([]byte("foo\nbar\nbaz"))
|
||||
|
||||
if len(lines) != 3 {
|
||||
t.Error("Expect LineFunc invoked")
|
||||
}
|
||||
if got, want := lines[1].Message, "bar\n"; got != want {
|
||||
t.Errorf("Got line %q, want %q", got, want)
|
||||
}
|
||||
if got, want := lines[1].Number, 1; got != want {
|
||||
t.Errorf("Got line %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineReplacer(t *testing.T) {
|
||||
secrets := []*engine.Secret{
|
||||
{Metadata: engine.Metadata{Name: "foo"}, Data: "bar"},
|
||||
}
|
||||
replacer := newReplacer(secrets)
|
||||
if replacer == nil {
|
||||
t.Errorf("Expect non-nil replacer when masked secrets")
|
||||
}
|
||||
if got, want := replacer.Replace("foobar"), "foo********"; got != want {
|
||||
t.Errorf("Expect %q replaced with value %q", got, want)
|
||||
}
|
||||
|
||||
// ensure the replacer is nil when the secret list is empty
|
||||
// or contains no masked secrets.
|
||||
|
||||
secrets = []*engine.Secret{}
|
||||
replacer = newReplacer(secrets)
|
||||
if replacer != nil {
|
||||
t.Errorf("Expect nil replacer when no masked secrets")
|
||||
}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
package runtime
|
||||
|
||||
import "github.com/drone/drone-runtime/engine"
|
||||
|
||||
// Option configures a Runtime option.
|
||||
type Option func(*Runtime)
|
||||
|
||||
// WithEngine sets the Runtime engine.
|
||||
func WithEngine(e engine.Engine) Option {
|
||||
return func(r *Runtime) {
|
||||
r.engine = e
|
||||
}
|
||||
}
|
||||
|
||||
// WithConfig sets the Runtime configuration.
|
||||
func WithConfig(c *engine.Spec) Option {
|
||||
return func(r *Runtime) {
|
||||
r.config = c
|
||||
}
|
||||
}
|
||||
|
||||
// WithHooks sets the Runtime tracer.
|
||||
func WithHooks(h *Hook) Option {
|
||||
return func(r *Runtime) {
|
||||
if h != nil {
|
||||
r.hook = h
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,23 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
)
|
||||
|
||||
func TestWithHooks(t *testing.T) {
|
||||
h := &Hook{}
|
||||
r := New(WithHooks(h))
|
||||
if r.hook != h {
|
||||
t.Errorf("Option does not set runtime hooks")
|
||||
}
|
||||
}
|
||||
|
||||
func TestWithConfig(t *testing.T) {
|
||||
c := &engine.Spec{}
|
||||
r := New(WithConfig(c))
|
||||
if r.config != c {
|
||||
t.Errorf("Option does not set runtime configuration")
|
||||
}
|
||||
}
|
@ -0,0 +1,257 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/natessilva/dag"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
// Runtime executes a pipeline configuration.
|
||||
type Runtime struct {
|
||||
mu sync.Mutex
|
||||
|
||||
engine engine.Engine
|
||||
config *engine.Spec
|
||||
hook *Hook
|
||||
start int64
|
||||
error error
|
||||
}
|
||||
|
||||
// New returns a new runtime using the specified runtime
|
||||
// configuration and runtime engine.
|
||||
func New(opts ...Option) *Runtime {
|
||||
r := &Runtime{}
|
||||
r.hook = &Hook{}
|
||||
for _, opts := range opts {
|
||||
opts(r)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// Run starts the pipeline and waits for it to complete.
|
||||
func (r *Runtime) Run(ctx context.Context) error {
|
||||
return r.Resume(ctx, 0)
|
||||
}
|
||||
|
||||
// Resume starts the pipeline at the specified stage and
|
||||
// waits for it to complete.
|
||||
func (r *Runtime) Resume(ctx context.Context, start int) error {
|
||||
defer func() {
|
||||
// note that we use a new context to destroy the
|
||||
// environment to ensure it is not in a canceled
|
||||
// state.
|
||||
r.engine.Destroy(context.Background(), r.config)
|
||||
}()
|
||||
|
||||
r.error = nil
|
||||
r.start = time.Now().Unix()
|
||||
|
||||
if r.hook.Before != nil {
|
||||
state := snapshot(r, nil, nil)
|
||||
if err := r.hook.Before(state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.engine.Setup(ctx, r.config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if isSerial(r.config) {
|
||||
for i, step := range r.config.Steps {
|
||||
steps := []*engine.Step{step}
|
||||
if i < start {
|
||||
continue
|
||||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrCancel
|
||||
case err := <-r.execAll(steps):
|
||||
if err != nil {
|
||||
r.error = err
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
err := r.execGraph(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if r.hook.After != nil {
|
||||
state := snapshot(r, nil, nil)
|
||||
if err := r.hook.After(state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return r.error
|
||||
}
|
||||
|
||||
func (r *Runtime) execGraph(ctx context.Context) error {
|
||||
var d dag.Runner
|
||||
for _, s := range r.config.Steps {
|
||||
step := s
|
||||
d.AddVertex(step.Metadata.Name, func() error {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ErrCancel
|
||||
default:
|
||||
}
|
||||
err := r.exec(step)
|
||||
if err != nil {
|
||||
r.mu.Lock()
|
||||
r.error = err
|
||||
r.mu.Unlock()
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
for _, s := range r.config.Steps {
|
||||
for _, dep := range s.DependsOn {
|
||||
d.AddEdge(dep, s.Metadata.Name)
|
||||
}
|
||||
}
|
||||
return d.Run()
|
||||
}
|
||||
|
||||
func (r *Runtime) execAll(group []*engine.Step) <-chan error {
|
||||
var g errgroup.Group
|
||||
done := make(chan error)
|
||||
|
||||
for _, step := range group {
|
||||
step := step
|
||||
g.Go(func() error {
|
||||
return r.exec(step)
|
||||
})
|
||||
}
|
||||
|
||||
go func() {
|
||||
done <- g.Wait()
|
||||
close(done)
|
||||
}()
|
||||
return done
|
||||
}
|
||||
|
||||
func (r *Runtime) exec(step *engine.Step) error {
|
||||
ctx := context.Background()
|
||||
|
||||
switch {
|
||||
case step.RunPolicy == engine.RunNever:
|
||||
return nil
|
||||
case r.error != nil && step.RunPolicy == engine.RunOnSuccess:
|
||||
return nil
|
||||
case r.error == nil && step.RunPolicy == engine.RunOnFailure:
|
||||
return nil
|
||||
}
|
||||
|
||||
if r.hook.BeforeEach != nil {
|
||||
state := snapshot(r, step, nil)
|
||||
if err := r.hook.BeforeEach(state); err == ErrSkip {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.engine.Create(ctx, r.config, step); err != nil {
|
||||
// TODO(bradrydzewski) refactor duplicate code
|
||||
if r.hook.AfterEach != nil {
|
||||
r.hook.AfterEach(
|
||||
snapshot(r, step, &engine.State{
|
||||
ExitCode: 255, Exited: true,
|
||||
}),
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if err := r.engine.Start(ctx, r.config, step); err != nil {
|
||||
// TODO(bradrydzewski) refactor duplicate code
|
||||
if r.hook.AfterEach != nil {
|
||||
r.hook.AfterEach(
|
||||
snapshot(r, step, &engine.State{
|
||||
ExitCode: 255, Exited: true,
|
||||
}),
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
rc, err := r.engine.Tail(ctx, r.config, step)
|
||||
if err != nil {
|
||||
// TODO(bradrydzewski) refactor duplicate code
|
||||
if r.hook.AfterEach != nil {
|
||||
r.hook.AfterEach(
|
||||
snapshot(r, step, &engine.State{
|
||||
ExitCode: 255, Exited: true,
|
||||
}),
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
var g errgroup.Group
|
||||
state := snapshot(r, step, nil)
|
||||
g.Go(func() error {
|
||||
return stream(state, rc)
|
||||
})
|
||||
|
||||
if step.Detach {
|
||||
return nil // do not wait for service containers to complete.
|
||||
}
|
||||
|
||||
defer func() {
|
||||
g.Wait() // wait for background tasks to complete.
|
||||
rc.Close()
|
||||
}()
|
||||
|
||||
wait, err := r.engine.Wait(ctx, r.config, step)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = g.Wait() // wait for background tasks to complete.
|
||||
|
||||
if wait.OOMKilled {
|
||||
err = &OomError{
|
||||
Name: step.Metadata.Name,
|
||||
Code: wait.ExitCode,
|
||||
}
|
||||
} else if wait.ExitCode != 0 {
|
||||
err = &ExitError{
|
||||
Name: step.Metadata.Name,
|
||||
Code: wait.ExitCode,
|
||||
}
|
||||
}
|
||||
|
||||
if r.hook.AfterEach != nil {
|
||||
state := snapshot(r, step, wait)
|
||||
if err := r.hook.AfterEach(state); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if step.IgnoreErr {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// helper function exports a single file or folder.
|
||||
func stream(state *State, rc io.ReadCloser) error {
|
||||
defer rc.Close()
|
||||
|
||||
w := newWriter(state)
|
||||
io.Copy(w, rc)
|
||||
|
||||
if state.hook.GotLogs != nil {
|
||||
return state.hook.GotLogs(state, w.lines)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -0,0 +1,375 @@
|
||||
package runtime
|
||||
|
||||
// import (
|
||||
// "bytes"
|
||||
// "context"
|
||||
// "errors"
|
||||
// "io"
|
||||
// "io/ioutil"
|
||||
// "testing"
|
||||
|
||||
// "github.com/drone/autoscaler/mocks"
|
||||
// "github.com/drone/drone-runtime/engine"
|
||||
// "github.com/golang/mock/gomock"
|
||||
// )
|
||||
|
||||
// func TestRun(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Spec{
|
||||
// Steps: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {
|
||||
// Name: "step_0",
|
||||
// Exports: []*engine.File{
|
||||
// {Path: "/etc/hosts", Mime: "text/plain"},
|
||||
// },
|
||||
// OnSuccess: true,
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// buf := ioutil.NopCloser(bytes.NewBufferString(""))
|
||||
|
||||
// state := new(engine.State)
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
// mock.EXPECT().Tail(ctx, conf.Stages[0].Steps[0]).Return(buf, nil)
|
||||
// mock.EXPECT().Wait(ctx, conf.Stages[0].Steps[0]).Return(state, nil)
|
||||
// mock.EXPECT().Create(ctx, conf.Stages[0].Steps[0])
|
||||
// mock.EXPECT().Start(ctx, conf.Stages[0].Steps[0])
|
||||
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
// err := run.Run(context.Background())
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
|
||||
// // TODO test Before
|
||||
// // TODO test BeforeEach
|
||||
// // TODO test After
|
||||
// // TODO test AfterEach
|
||||
// // TODO test GotFile
|
||||
// // TODO test GotLine
|
||||
// // TODO test GotLogs
|
||||
// }
|
||||
|
||||
// // TestResume verifies the runtime resumes execution at the specified stage
|
||||
// // and skips previous stages.
|
||||
// func TestResume(t *testing.T) {
|
||||
// t.Skip()
|
||||
// }
|
||||
|
||||
// // TestRunOnSuccessTrue verifies the runtime executes a container if the
|
||||
// // OnSuccess flag is True and the pipeline is in a passing state.
|
||||
// func TestRunOnSuccessTrue(t *testing.T) {
|
||||
// t.Skip()
|
||||
// }
|
||||
|
||||
// // TestRunOnSuccessFalse verifies the runtime skips a container if the
|
||||
// // OnSuccess flag is False and the pipeline is in a passing state.
|
||||
// func TestRunOnSuccessFalse(t *testing.T) {
|
||||
// t.Skip()
|
||||
// }
|
||||
|
||||
// // TestRunOnFailureTrue verifies the runtime executes a container if the
|
||||
// // OnFailure flag is True and the pipeline is in a failing state.
|
||||
// func TestRunOnFailureTrue(t *testing.T) {
|
||||
// t.Skip()
|
||||
// }
|
||||
|
||||
// // TestRunOnFailureFalse verifies the runtime skips a container if the
|
||||
// // OnFailure flag is False and the pipeline is in a failing state.
|
||||
// func TestRunOnFailureFalse(t *testing.T) {
|
||||
// t.Skip()
|
||||
// }
|
||||
|
||||
// // TestRunDetached verifies the runtime executes a container in the background
|
||||
// // and does not wait for it to execute when the detached flag is true.
|
||||
// func TestRunDetached(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true, Detached: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// buf := ioutil.NopCloser(bytes.NewBufferString(""))
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
// mock.EXPECT().Tail(ctx, conf.Stages[0].Steps[0]).Return(buf, nil)
|
||||
// mock.EXPECT().Create(ctx, conf.Stages[0].Steps[0])
|
||||
// mock.EXPECT().Start(ctx, conf.Stages[0].Steps[0])
|
||||
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
|
||||
// err := run.Run(context.Background())
|
||||
// if err != nil {
|
||||
// t.Error(err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // TestRunError verifies the runtime exits when the docker engine returns an
|
||||
// // error doing a routine operation, like waiting for a container to exit.
|
||||
// func TestRunError(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// err := errors.New("dummy error")
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
// mock.EXPECT().Tail(ctx, conf.Stages[0].Steps[0]).Return(nil, err)
|
||||
// mock.EXPECT().Create(ctx, conf.Stages[0].Steps[0])
|
||||
// mock.EXPECT().Start(ctx, conf.Stages[0].Steps[0])
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
|
||||
// if got, want := run.Run(context.Background()), err; got != want {
|
||||
// t.Error("Want Engine error returned from runtime")
|
||||
// }
|
||||
// }
|
||||
|
||||
// // TestRunErrorExit verifies the runtime exits when a step returns a non-zero
|
||||
// // exit code. The runtime must return an ExitError with the container exit code.
|
||||
// func TestRunErrorExit(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// buf := ioutil.NopCloser(bytes.NewBufferString(""))
|
||||
|
||||
// state := &engine.State{
|
||||
// ExitCode: 255,
|
||||
// }
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
// mock.EXPECT().Tail(ctx, conf.Stages[0].Steps[0]).Return(buf, nil)
|
||||
// mock.EXPECT().Wait(ctx, conf.Stages[0].Steps[0]).Return(state, nil)
|
||||
// mock.EXPECT().Create(ctx, conf.Stages[0].Steps[0])
|
||||
// mock.EXPECT().Start(ctx, conf.Stages[0].Steps[0])
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
|
||||
// err := run.Run(context.Background())
|
||||
// if err == nil {
|
||||
// t.Errorf("Want error returned from runtime, got nil")
|
||||
// }
|
||||
// errExit, ok := err.(*ExitError)
|
||||
// if !ok {
|
||||
// t.Errorf("Want ExitError returned from runtime")
|
||||
// return
|
||||
// }
|
||||
// if got, want := errExit.Code, state.ExitCode; got != want {
|
||||
// t.Errorf("Want exit code %d, got %d", want, got)
|
||||
// }
|
||||
// if got, want := errExit.Name, "step_0"; got != want {
|
||||
// t.Errorf("Want step name %s, got %s", want, got)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // TestRunErrorOom verifies the runtime exits when a step returns with
|
||||
// // out-of-memory killed is true. The runtime must return an OomError.
|
||||
// func TestRunErrorOom(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// buf := ioutil.NopCloser(bytes.NewBufferString(""))
|
||||
|
||||
// state := &engine.State{
|
||||
// OOMKilled: true,
|
||||
// ExitCode: 255,
|
||||
// }
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
// mock.EXPECT().Tail(ctx, conf.Stages[0].Steps[0]).Return(buf, nil)
|
||||
// mock.EXPECT().Wait(ctx, conf.Stages[0].Steps[0]).Return(state, nil)
|
||||
// mock.EXPECT().Create(ctx, conf.Stages[0].Steps[0])
|
||||
// mock.EXPECT().Start(ctx, conf.Stages[0].Steps[0])
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
|
||||
// err := run.Run(context.Background())
|
||||
// if err == nil {
|
||||
// t.Errorf("Want error returned from runtime, got nil")
|
||||
// }
|
||||
// errOOM, ok := err.(*OomError)
|
||||
// if !ok {
|
||||
// t.Errorf("Want OomError returned from runtime")
|
||||
// return
|
||||
// }
|
||||
// if got, want := errOOM.Name, "step_0"; got != want {
|
||||
// t.Errorf("Want step name %s, got %s", want, got)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // TestRunCancel verifies the runtime exits when context.Done and returns an
|
||||
// // ErrCancel. It also verifies the runtime exits immediately and does not
|
||||
// // execute additional steps.
|
||||
// func TestRunCancel(t *testing.T) {
|
||||
// t.Skipf("this test panics when cancel() is invoked")
|
||||
// t.SkipNow()
|
||||
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(ctx, conf)
|
||||
// mock.EXPECT().Destroy(ctx, conf)
|
||||
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// )
|
||||
|
||||
// ctx, cancel := context.WithCancel(context.Background())
|
||||
// cancel() // cancel immediately
|
||||
|
||||
// err := run.Run(ctx)
|
||||
// if err != ErrCancel {
|
||||
// t.Errorf("Expect ErrCancel when context is cancelled, got %s", err)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // TestRunStartErr verifies the runtime exits and returns an error if there
|
||||
// // is a failure to create or start a container step.
|
||||
// func TestRunStartErr(t *testing.T) {
|
||||
// c := gomock.NewController(t)
|
||||
// defer c.Finish()
|
||||
|
||||
// conf := &engine.Config{
|
||||
// Stages: []*engine.Stage{
|
||||
// {
|
||||
// Name: "stage_0",
|
||||
// Steps: []*engine.Step{
|
||||
// {Name: "step_0", OnSuccess: true},
|
||||
// },
|
||||
// },
|
||||
// },
|
||||
// }
|
||||
|
||||
// ctx := context.TODO()
|
||||
|
||||
// mock := mocks.NewMockEngine(c)
|
||||
// mock.EXPECT().Setup(gomock.Any(), conf)
|
||||
// mock.EXPECT().Destroy(gomock.Any(), conf)
|
||||
// mock.EXPECT().Create(gomock.Any(), conf.Stages[0].Steps[0]).Return(io.EOF)
|
||||
|
||||
// hookInvoked := false
|
||||
// hooks := &Hook{
|
||||
// AfterEach: func(state *State) error {
|
||||
// hookInvoked = true
|
||||
|
||||
// if state.State.ExitCode != 255 {
|
||||
// t.Errorf("Want Exit Code 255 on container error")
|
||||
// }
|
||||
// if state.State.Exited == false {
|
||||
// t.Errorf("Want Exited true on container error")
|
||||
// }
|
||||
// return nil
|
||||
// },
|
||||
// }
|
||||
|
||||
// run := New(
|
||||
// WithEngine(mock),
|
||||
// WithConfig(conf),
|
||||
// WithHooks(hooks),
|
||||
// )
|
||||
|
||||
// err := run.Run(ctx)
|
||||
// if err != io.EOF {
|
||||
// t.Errorf("Expect Exit Error")
|
||||
// }
|
||||
|
||||
// if !hookInvoked {
|
||||
// t.Errorf("Expect AfterEach hook invoked")
|
||||
// }
|
||||
// }
|
@ -0,0 +1,38 @@
|
||||
package runtime
|
||||
|
||||
import "github.com/drone/drone-runtime/engine"
|
||||
|
||||
// State defines the pipeline and process state.
|
||||
type State struct {
|
||||
hook *Hook
|
||||
config *engine.Spec
|
||||
engine engine.Engine
|
||||
|
||||
// Global state of the runtime.
|
||||
Runtime struct {
|
||||
// Runtime time started
|
||||
Time int64
|
||||
|
||||
// Runtime pipeline error state
|
||||
Error error
|
||||
}
|
||||
|
||||
// Runtime pipeline step
|
||||
Step *engine.Step
|
||||
|
||||
// Current process state.
|
||||
State *engine.State
|
||||
}
|
||||
|
||||
// snapshot makes a snapshot of the runtime state.
|
||||
func snapshot(r *Runtime, step *engine.Step, state *engine.State) *State {
|
||||
s := new(State)
|
||||
s.Runtime.Error = r.error
|
||||
s.Runtime.Time = r.start
|
||||
s.config = r.config
|
||||
s.hook = r.hook
|
||||
s.engine = r.engine
|
||||
s.Step = step
|
||||
s.State = state
|
||||
return s
|
||||
}
|
@ -0,0 +1,59 @@
|
||||
package term
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/drone/drone-runtime/runtime"
|
||||
)
|
||||
|
||||
const (
|
||||
linePlain = "[%s:%d] %s"
|
||||
linePretty = "\033[%s[%s:%d]\033[0m %s"
|
||||
)
|
||||
|
||||
// available terminal colors
|
||||
var colors = []string{
|
||||
"32m", // green
|
||||
"33m", // yellow
|
||||
"34m", // blue
|
||||
"35m", // magenta
|
||||
"36m", // cyan
|
||||
}
|
||||
|
||||
// WriteLineFunc defines a function responsible for writing indidual
|
||||
// lines of log output.
|
||||
type WriteLineFunc func(*runtime.State, *runtime.Line) error
|
||||
|
||||
// WriteLine writes log lines to io.Writer w in plain text format.
|
||||
func WriteLine(w io.Writer) WriteLineFunc {
|
||||
return func(state *runtime.State, line *runtime.Line) error {
|
||||
fmt.Fprintf(w, linePlain, state.Step.Metadata.Name, line.Number, line.Message)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WriteLinePretty writes pretty-printed log lines to io.Writer w.
|
||||
func WriteLinePretty(w io.Writer) WriteLineFunc {
|
||||
var (
|
||||
mutex sync.Mutex
|
||||
steps = map[string]string{}
|
||||
)
|
||||
|
||||
return func(state *runtime.State, line *runtime.Line) error {
|
||||
mutex.Lock()
|
||||
color, ok := steps[state.Step.Metadata.Name]
|
||||
mutex.Unlock()
|
||||
|
||||
if !ok {
|
||||
color = colors[len(steps)%len(colors)]
|
||||
mutex.Lock()
|
||||
steps[state.Step.Metadata.Name] = color
|
||||
mutex.Unlock()
|
||||
}
|
||||
|
||||
fmt.Fprintf(w, linePretty, color, state.Step.Metadata.Name, line.Number, line.Message)
|
||||
return nil
|
||||
}
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
package term
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"testing"
|
||||
|
||||
"github.com/drone/drone-runtime/engine"
|
||||
"github.com/drone/drone-runtime/runtime"
|
||||
)
|
||||
|
||||
func TestWriteLine(t *testing.T) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
step = &engine.Step{Metadata: engine.Metadata{Name: "test"}}
|
||||
line = &runtime.Line{Number: 1, Message: "hello"}
|
||||
state = &runtime.State{Step: step}
|
||||
)
|
||||
|
||||
WriteLine(&buf)(state, line)
|
||||
|
||||
if got, want := buf.String(), "[test:1] hello"; got != want {
|
||||
t.Errorf("Want line %q, got %q", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWriteLinePretty(t *testing.T) {
|
||||
var (
|
||||
buf bytes.Buffer
|
||||
step = &engine.Step{Metadata: engine.Metadata{Name: "test"}}
|
||||
line = &runtime.Line{Number: 1, Message: "hello"}
|
||||
state = &runtime.State{Step: step}
|
||||
)
|
||||
|
||||
WriteLinePretty(&buf)(state, line)
|
||||
|
||||
if got, want := buf.String(), "\x1b[32m[test:1]\x1b[0m hello"; got != want {
|
||||
t.Errorf("Want line %q, got %q", want, got)
|
||||
}
|
||||
}
|
@ -0,0 +1,47 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GPzK101Yka7Hf9JD",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "docker_test"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_gkZrU925ZbAWEDoy",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "ping"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; docker info"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "docker:18"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "dockersock",
|
||||
"path": "/var/run/docker.sock"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_NttbpIbQLKCyG8uI",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "dockersock"
|
||||
},
|
||||
"host": {
|
||||
"path": "/var/run/docker.sock"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,30 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_AOTCIPBf3XdTFs2j",
|
||||
"namespace": "ns_nvPNAIL16QyAMVtF",
|
||||
"name": "auth_test"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_8a7IJsL9zSJCCchd",
|
||||
"namespace": "ns_nvPNAIL16QyAMVtF",
|
||||
"name": "greetings"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"echo hello world"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "bradrydzewski/private-test:latest",
|
||||
"pull_policy": "default"
|
||||
},
|
||||
"run_policy": "on-success"
|
||||
}
|
||||
],
|
||||
"files": [],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,41 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_AOTCIPBf3XdTFs2j",
|
||||
"namespace": "ns_JVzesGoyteu5koZK",
|
||||
"name": "test_hello_world"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_8a7IJsL9zSJCCchd",
|
||||
"namespace": "ns_JVzesGoyteu5koZK",
|
||||
"name": "greetings"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"/usr/local/bin/droneinit"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6",
|
||||
"pull_policy": "default"
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"name": "greetings_script",
|
||||
"path": "/usr/local/bin/droneinit",
|
||||
"mode": 511
|
||||
}
|
||||
],
|
||||
"run_policy": "on-success"
|
||||
}
|
||||
],
|
||||
"files": [
|
||||
{
|
||||
"Name": "greetings_script",
|
||||
"Data": "ZWNobyBoZWxsbyB3b3JsZAo="
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_W6kaKXlwtVX4gV6L",
|
||||
"namespace": "ns_gyOk2PMIgw3xJfPn",
|
||||
"name": "test_on_success",
|
||||
"labels": null
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_IGDaqvs2upuEPY7E",
|
||||
"namespace": "ns_gyOk2PMIgw3xJfPn",
|
||||
"name": "step_0"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -e; set -x; echo this step will exit the pipeline; exit 1"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"run_policy": "on-success"
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_vOqlrhPQZ5kv45iF",
|
||||
"namespace": "ns_gyOk2PMIgw3xJfPn",
|
||||
"name": "step_1"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -e; set -x; echo this step must not execute"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"run_policy": "on-success"
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,46 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_eY3eTAq6fdaARDRi",
|
||||
"namespace": "ns_C72TOXpVkqoAmojM",
|
||||
"name": "test_on_failure"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_ObSCalCTDCTASaXG",
|
||||
"namespace": "ns_C72TOXpVkqoAmojM",
|
||||
"name": "step_0"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -e; set -x; echo this step will exit the pipeline; exit 1"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"run_policy": "on-success"
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_2stvc2NEzyB2rqLf",
|
||||
"namespace": "ns_C72TOXpVkqoAmojM",
|
||||
"name": "step_1"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -e; set -x; echo this step must execute on failure"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"run_policy": "on-failure"
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,70 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GPzK101Yka7Hf9JD",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "test_host_volume"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_cbuOsqukzBEMAG1X",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "write"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; echo 'hello world' > /tmp/greetings.txt"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp",
|
||||
"path": "/tmp"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_OA3Q3LNmILpj0WKG",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "read"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; cat /tmp/greetings.txt"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp",
|
||||
"path": "/tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_NttbpIbQLKCyG8uI",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "tmp"
|
||||
},
|
||||
"host": {
|
||||
"path": "/tmp/test-drone-runtime"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,68 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GPzK101Yka7Hf9JD",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "test_temp_volume"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_cbuOsqukzBEMAG1X",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "write"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; echo 'hello world' > /tmp/greetings.txt"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp",
|
||||
"path": "/tmp"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_OA3Q3LNmILpj0WKG",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "read"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; cat /tmp/greetings.txt"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp",
|
||||
"path": "/tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_NttbpIbQLKCyG8uI",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "tmp"
|
||||
},
|
||||
"temp": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,48 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GPzK101Yka7Hf9JD",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "test_temp_volume"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_cbuOsqukzBEMAG1X",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "write"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; echo 'hello world' > /tmp/greetings.txt; ls -la /tmp"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "tmp",
|
||||
"path": "/tmp"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_NttbpIbQLKCyG8uI",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "tmp"
|
||||
},
|
||||
"temp": {
|
||||
"medium": "memory",
|
||||
"size_limit": 1024
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,39 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GPzK101Yka7Hf9JD",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "redis_test"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_V3UjFaWH0IV9UDbI",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "redis"
|
||||
},
|
||||
"detach": true,
|
||||
"docker": {
|
||||
"image": "redis:4-alpine"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_gkZrU925ZbAWEDoy",
|
||||
"namespace": "ns_cwOiiTAoLeYVHWVv",
|
||||
"name": "ping"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; sleep 5; redis-cli -h redis ping; redis-cli -h redis set HELLO hello; redis-cli -h redis get HELLO"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "redis:4-alpine"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,49 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_Rsd12QpHu5zInGVL",
|
||||
"namespace": "ns_vshOGHFAAhv32ETm",
|
||||
"name": "redis_multi_test"
|
||||
},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_RTh2dwjZUOvz1KIn",
|
||||
"namespace": "ns_vshOGHFAAhv32ETm",
|
||||
"name": "redis_1"
|
||||
},
|
||||
"detach": true,
|
||||
"docker": {
|
||||
"image": "redis:4-alpine"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_GMQ61mzQ1zSTSwVj",
|
||||
"namespace": "ns_vshOGHFAAhv32ETm",
|
||||
"name": "redis_2"
|
||||
},
|
||||
"detach": true,
|
||||
"docker": {
|
||||
"image": "redis:4-alpine"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_bkAb4hbBJF0vC5bM",
|
||||
"namespace": "ns_vshOGHFAAhv32ETm",
|
||||
"name": "ping"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -x; set -e; sleep 5; redis-cli -h redis_1 ping; redis-cli -h redis_2 ping; redis-cli -h redis_1 set HELLO hello; redis-cli -h redis_2 set HELLO hola; redis-cli -h redis_1 get HELLO; redis-cli -h redis_2 get HELLO"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "redis:4-alpine"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,44 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_ntsKKapGMZilZ3yQ",
|
||||
"namespace": "ns_ntsKKapGMZilZ3yQ",
|
||||
"name": "postgres_test"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_Oxx4WbJUQ3Uc1g7L",
|
||||
"namespace": "ns_ntsKKapGMZilZ3yQ",
|
||||
"name": "postgres"
|
||||
},
|
||||
"detach": true,
|
||||
"docker": {
|
||||
"image": "postgres:9-alpine",
|
||||
"pull_policy": "default"
|
||||
},
|
||||
"environment": {
|
||||
"POSTGRES_DB": "test",
|
||||
"POSTGRES_USER": "postgres"
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_QKUv7AEQq67t9eTH",
|
||||
"namespace": "ns_ntsKKapGMZilZ3yQ",
|
||||
"name": "pinger"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"sleep 15 && psql -U postgres -d test -h postgres -c 'SELECT table_name FROM information_schema.tables;'"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "postgres:9-alpine"
|
||||
}
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,29 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_W6kaKXlwtVX4gV6L",
|
||||
"namespace": "ns_gyOk2PMIgw3xJfPn",
|
||||
"name": "working_dir_test"
|
||||
},
|
||||
"secrets": null,
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "uid_vOqlrhPQZ5kv45iF",
|
||||
"namespace": "ns_gyOk2PMIgw3xJfPn",
|
||||
"name": "step_0"
|
||||
},
|
||||
"docker": {
|
||||
"args": [
|
||||
"-c",
|
||||
"set -e;\nset -x;\npwd"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "alpine:3.6"
|
||||
},
|
||||
"working_dir": "/root/src"
|
||||
}
|
||||
],
|
||||
"docker": {}
|
||||
}
|
@ -0,0 +1,91 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"namespace": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"name": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"labels": {
|
||||
"io.drone.pipeline.kind": "pipeline",
|
||||
"io.drone.pipeline.name": "default",
|
||||
"io.drone.pipeline.type": ""
|
||||
}
|
||||
},
|
||||
"platform": {},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "hczqch6uzentfwtl7ocrtffjhjcz9yee",
|
||||
"namespace": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"name": "greetings",
|
||||
"labels": {
|
||||
"io.drone.step.name": "greetings"
|
||||
}
|
||||
},
|
||||
"environment": {
|
||||
"CI_WORKSPACE": "/drone/src",
|
||||
"CI_WORKSPACE_BASE": "/drone",
|
||||
"CI_WORKSPACE_PATH": "src",
|
||||
"DRONE_BUILD_EVENT": "",
|
||||
"DRONE_COMMIT_BRANCH": "",
|
||||
"DRONE_COMMIT_REF": "",
|
||||
"DRONE_COMMIT_SHA": "",
|
||||
"DRONE_REMOTE_URL": "",
|
||||
"DRONE_WORKSPACE": "/drone/src",
|
||||
"DRONE_WORKSPACE_BASE": "/drone",
|
||||
"DRONE_WORKSPACE_PATH": "src"
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"name": "greetings",
|
||||
"path": "/usr/drone/bin/init",
|
||||
"mode": 511
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "workspace",
|
||||
"path": "/drone"
|
||||
}
|
||||
],
|
||||
"working_dir": "/drone/src",
|
||||
"docker": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "docker.io/library/alpine:latest"
|
||||
},
|
||||
"kubernetes": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"files": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "6hgg9lkszo2hwril6zd05ansk90whrkw",
|
||||
"namespace": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"name": "greetings"
|
||||
},
|
||||
"data": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IERST05FX05FVFJDX1VTRVJOQU1FCnVuc2V0IERST05FX05FVFJDX1BBU1NXT1JECnNldCAtZQoKZWNobyArICJlY2hvIGhlbGxvIgplY2hvIGhlbGxvCgplY2hvICsgImVjaG8gd29ybGQiCmVjaG8gd29ybGQKCg=="
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "qm2ua64xtfc26wmwt4bk91yf982mpi06",
|
||||
"namespace": "810r59j9lvmcflafw7g2oinvmzbrs5zr",
|
||||
"name": "workspace"
|
||||
},
|
||||
"temp": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -0,0 +1,192 @@
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"labels": {
|
||||
"io.drone.pipeline.kind": "pipeline",
|
||||
"io.drone.pipeline.name": "",
|
||||
"io.drone.pipeline.type": ""
|
||||
}
|
||||
},
|
||||
"platform": {},
|
||||
"steps": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "ksreb5z2ybkpa5kzey3w7ip29i8gkbt6",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "redis",
|
||||
"labels": {
|
||||
"io.drone.step.name": "redis"
|
||||
}
|
||||
},
|
||||
"detach": true,
|
||||
"environment": {
|
||||
"CI_WORKSPACE": "/drone/src",
|
||||
"CI_WORKSPACE_BASE": "/drone",
|
||||
"CI_WORKSPACE_PATH": "src",
|
||||
"DRONE_BUILD_EVENT": "",
|
||||
"DRONE_COMMIT_BRANCH": "",
|
||||
"DRONE_COMMIT_REF": "",
|
||||
"DRONE_COMMIT_SHA": "",
|
||||
"DRONE_REMOTE_URL": "",
|
||||
"DRONE_WORKSPACE": "/drone/src",
|
||||
"DRONE_WORKSPACE_BASE": "/drone",
|
||||
"DRONE_WORKSPACE_PATH": "src"
|
||||
},
|
||||
"volumes": [
|
||||
{
|
||||
"name": "workspace",
|
||||
"path": "/drone"
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"image": "docker.io/library/redis:4-alpine",
|
||||
"ports": [
|
||||
{
|
||||
"port": 6379
|
||||
}
|
||||
]
|
||||
},
|
||||
"kubernetes": {}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "tzp4ouvgm6x7lsigbhilkiekrs4gk988",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "ping",
|
||||
"labels": {
|
||||
"io.drone.step.name": "ping"
|
||||
}
|
||||
},
|
||||
"environment": {
|
||||
"CI_WORKSPACE": "/drone/src",
|
||||
"CI_WORKSPACE_BASE": "/drone",
|
||||
"CI_WORKSPACE_PATH": "src",
|
||||
"DRONE_BUILD_EVENT": "",
|
||||
"DRONE_COMMIT_BRANCH": "",
|
||||
"DRONE_COMMIT_REF": "",
|
||||
"DRONE_COMMIT_SHA": "",
|
||||
"DRONE_REMOTE_URL": "",
|
||||
"DRONE_WORKSPACE": "/drone/src",
|
||||
"DRONE_WORKSPACE_BASE": "/drone",
|
||||
"DRONE_WORKSPACE_PATH": "src"
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"name": "ping",
|
||||
"path": "/usr/drone/bin/init",
|
||||
"mode": 511
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "workspace",
|
||||
"path": "/drone"
|
||||
}
|
||||
],
|
||||
"working_dir": "/drone/src",
|
||||
"docker": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "docker.io/library/redis:4-alpine"
|
||||
},
|
||||
"kubernetes": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "7r5eivubtvgyloclxlqfii5bb04sjzqb",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "greetings",
|
||||
"labels": {
|
||||
"io.drone.step.name": "greetings"
|
||||
}
|
||||
},
|
||||
"environment": {
|
||||
"CI_WORKSPACE": "/drone/src",
|
||||
"CI_WORKSPACE_BASE": "/drone",
|
||||
"CI_WORKSPACE_PATH": "src",
|
||||
"DRONE_BUILD_EVENT": "",
|
||||
"DRONE_COMMIT_BRANCH": "",
|
||||
"DRONE_COMMIT_REF": "",
|
||||
"DRONE_COMMIT_SHA": "",
|
||||
"DRONE_REMOTE_URL": "",
|
||||
"DRONE_WORKSPACE": "/drone/src",
|
||||
"DRONE_WORKSPACE_BASE": "/drone",
|
||||
"DRONE_WORKSPACE_PATH": "src"
|
||||
},
|
||||
"files": [
|
||||
{
|
||||
"name": "greetings",
|
||||
"path": "/usr/drone/bin/init",
|
||||
"mode": 511
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "workspace",
|
||||
"path": "/drone"
|
||||
}
|
||||
],
|
||||
"working_dir": "/drone/src",
|
||||
"docker": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
],
|
||||
"image": "docker.io/library/golang:1.11"
|
||||
},
|
||||
"kubernetes": {
|
||||
"args": [
|
||||
"/usr/drone/bin/init"
|
||||
],
|
||||
"command": [
|
||||
"/bin/sh"
|
||||
]
|
||||
}
|
||||
}
|
||||
],
|
||||
"files": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "0ko8zg28dw0kl9j9bb1ecoek8cgfaspi",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "ping"
|
||||
},
|
||||
"data": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IERST05FX05FVFJDX1VTRVJOQU1FCnVuc2V0IERST05FX05FVFJDX1BBU1NXT1JECnNldCAtZQoKZWNobyArICJzbGVlcCA1IgpzbGVlcCA1CgplY2hvICsgImVjaG8gXCRSRURJU19TRVJWSUNFX0hPU1QiCmVjaG8gJFJFRElTX1NFUlZJQ0VfSE9TVAoKZWNobyArICJyZWRpcy1jbGkgLWggXCRSRURJU19TRVJWSUNFX0hPU1QgcGluZyIKcmVkaXMtY2xpIC1oICRSRURJU19TRVJWSUNFX0hPU1QgcGluZwoK"
|
||||
},
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "aiagbxuvgt5rbbtxsava8s0uiq8mybf2",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "greetings"
|
||||
},
|
||||
"data": "CmlmIFsgLW4gIiRDSV9ORVRSQ19NQUNISU5FIiBdOyB0aGVuCmNhdCA8PEVPRiA+ICRIT01FLy5uZXRyYwptYWNoaW5lICRDSV9ORVRSQ19NQUNISU5FCmxvZ2luICRDSV9ORVRSQ19VU0VSTkFNRQpwYXNzd29yZCAkQ0lfTkVUUkNfUEFTU1dPUkQKRU9GCmNobW9kIDA2MDAgJEhPTUUvLm5ldHJjCmZpCnVuc2V0IENJX05FVFJDX1VTRVJOQU1FCnVuc2V0IENJX05FVFJDX1BBU1NXT1JECnVuc2V0IERST05FX05FVFJDX1VTRVJOQU1FCnVuc2V0IERST05FX05FVFJDX1BBU1NXT1JECnNldCAtZQoKZWNobyArICJlY2hvIGhlbGxvIgplY2hvIGhlbGxvCgplY2hvICsgImVjaG8gd29ybGQiCmVjaG8gd29ybGQKCg=="
|
||||
}
|
||||
],
|
||||
"docker": {
|
||||
"volumes": [
|
||||
{
|
||||
"metadata": {
|
||||
"uid": "8cx1najyo07pzz5upznp2s6wx8zf81fs",
|
||||
"namespace": "lfjucpr8oj42d4raoo88mor0ueq7aipk",
|
||||
"name": "workspace"
|
||||
},
|
||||
"temp": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue