agola/internal/services/executor/driver/docker.go

666 lines
17 KiB
Go
Raw Normal View History

2019-02-21 14:54:50 +00:00
// Copyright 2019 Sorint.lab
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
// See the License for the specific language governing permissions and
// limitations under the License.
package driver
import (
"context"
"encoding/base64"
"encoding/json"
2019-02-21 14:54:50 +00:00
"fmt"
"io"
"io/ioutil"
"path/filepath"
"runtime"
"sort"
2019-02-21 14:54:50 +00:00
"strconv"
"strings"
"time"
2019-07-01 09:40:20 +00:00
"agola.io/agola/internal/services/executor/registry"
"agola.io/agola/services/types"
errors "golang.org/x/xerrors"
2019-02-21 14:54:50 +00:00
dockertypes "github.com/docker/docker/api/types"
2019-02-21 14:54:50 +00:00
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/volume"
2019-02-21 14:54:50 +00:00
"github.com/docker/docker/client"
"github.com/docker/docker/pkg/archive"
"github.com/docker/docker/pkg/stdcopy"
"github.com/rs/zerolog"
2019-02-21 14:54:50 +00:00
)
type DockerDriver struct {
log zerolog.Logger
client *client.Client
toolboxPath string
initImage string
initDockerConfig *registry.DockerConfig
executorID string
arch types.Arch
2019-02-21 14:54:50 +00:00
}
func NewDockerDriver(log zerolog.Logger, executorID, toolboxPath, initImage string, initDockerConfig *registry.DockerConfig) (*DockerDriver, error) {
cli, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.26"))
2019-02-21 14:54:50 +00:00
if err != nil {
return nil, err
}
2019-02-21 14:54:50 +00:00
return &DockerDriver{
log: log,
client: cli,
toolboxPath: toolboxPath,
initImage: initImage,
initDockerConfig: initDockerConfig,
executorID: executorID,
arch: types.ArchFromString(runtime.GOARCH),
2019-02-21 14:54:50 +00:00
}, nil
}
func (d *DockerDriver) Setup(ctx context.Context) error {
return nil
}
func (d *DockerDriver) createToolboxVolume(ctx context.Context, podID string, out io.Writer) (*dockertypes.Volume, error) {
if err := d.fetchImage(ctx, d.initImage, false, d.initDockerConfig, out); err != nil {
return nil, err
}
labels := map[string]string{}
labels[agolaLabelKey] = agolaLabelValue
labels[executorIDKey] = d.executorID
labels[podIDKey] = podID
toolboxVol, err := d.client.VolumeCreate(ctx, volume.VolumeCreateBody{Driver: "local", Labels: labels})
if err != nil {
return nil, err
}
2019-02-21 14:54:50 +00:00
resp, err := d.client.ContainerCreate(ctx, &container.Config{
Entrypoint: []string{"cat"},
Image: d.initImage,
2019-02-21 14:54:50 +00:00
Tty: true,
}, &container.HostConfig{
Binds: []string{fmt.Sprintf("%s:%s", toolboxVol.Name, "/tmp/agola")},
2019-02-21 14:54:50 +00:00
}, nil, "")
if err != nil {
return nil, err
2019-02-21 14:54:50 +00:00
}
containerID := resp.ID
if err := d.client.ContainerStart(ctx, containerID, dockertypes.ContainerStartOptions{}); err != nil {
return nil, err
2019-02-21 14:54:50 +00:00
}
toolboxExecPath, err := toolboxExecPath(d.toolboxPath, d.arch)
if err != nil {
return nil, errors.Errorf("failed to get toolbox path for arch %q: %w", d.arch, err)
}
srcInfo, err := archive.CopyInfoSourcePath(toolboxExecPath, false)
2019-02-21 14:54:50 +00:00
if err != nil {
return nil, err
2019-02-21 14:54:50 +00:00
}
srcInfo.RebaseName = "agola-toolbox"
2019-02-21 14:54:50 +00:00
srcArchive, err := archive.TarResource(srcInfo)
if err != nil {
return nil, err
2019-02-21 14:54:50 +00:00
}
defer srcArchive.Close()
options := dockertypes.CopyToContainerOptions{
2019-02-21 14:54:50 +00:00
AllowOverwriteDirWithFile: false,
CopyUIDGID: false,
}
if err := d.client.CopyToContainer(ctx, containerID, "/tmp/agola", srcArchive, options); err != nil {
return nil, err
2019-02-21 14:54:50 +00:00
}
// ignore remove error
_ = d.client.ContainerRemove(ctx, containerID, dockertypes.ContainerRemoveOptions{Force: true})
2019-02-21 14:54:50 +00:00
return &toolboxVol, nil
2019-02-21 14:54:50 +00:00
}
func (d *DockerDriver) Archs(ctx context.Context) ([]types.Arch, error) {
// since we are using the local docker driver we can return our go arch information
return []types.Arch{d.arch}, nil
}
2019-03-13 14:48:35 +00:00
func (d *DockerDriver) NewPod(ctx context.Context, podConfig *PodConfig, out io.Writer) (Pod, error) {
2019-02-21 14:54:50 +00:00
if len(podConfig.Containers) == 0 {
return nil, errors.Errorf("empty container config")
}
toolboxVol, err := d.createToolboxVolume(ctx, podConfig.ID, out)
if err != nil {
return nil, err
}
var mainContainerID string
for cindex := range podConfig.Containers {
resp, err := d.createContainer(ctx, cindex, podConfig, mainContainerID, toolboxVol, out)
if err != nil {
return nil, err
}
containerID := resp.ID
if cindex == 0 {
// save the maincontainerid
mainContainerID = containerID
}
if err := d.client.ContainerStart(ctx, containerID, dockertypes.ContainerStartOptions{}); err != nil {
return nil, err
}
}
searchLabels := map[string]string{}
searchLabels[agolaLabelKey] = agolaLabelValue
searchLabels[executorIDKey] = d.executorID
searchLabels[podIDKey] = podConfig.ID
searchLabels[taskIDKey] = podConfig.TaskID
args := filters.NewArgs()
for k, v := range searchLabels {
args.Add("label", fmt.Sprintf("%s=%s", k, v))
}
2019-02-21 14:54:50 +00:00
containers, err := d.client.ContainerList(ctx,
dockertypes.ContainerListOptions{
Filters: args,
})
if err != nil {
return nil, err
}
if len(containers) == 0 {
return nil, errors.Errorf("no container with labels %s", searchLabels)
}
pod := &DockerPod{
id: podConfig.ID,
client: d.client,
executorID: d.executorID,
containers: []*DockerContainer{},
toolboxVolumeName: toolboxVol.Name,
initVolumeDir: podConfig.InitVolumeDir,
}
count := 0
seenIndexes := map[int]struct{}{}
for _, container := range containers {
cIndexStr, ok := container.Labels[containerIndexKey]
if !ok {
// ignore container
continue
}
cIndex, err := strconv.Atoi(cIndexStr)
if err != nil {
// ignore container
continue
}
if _, ok := seenIndexes[cIndex]; ok {
return nil, errors.Errorf("duplicate container with index %d", cIndex)
}
dContainer := &DockerContainer{
Index: cIndex,
Container: container,
}
pod.containers = append(pod.containers, dContainer)
seenIndexes[cIndex] = struct{}{}
count++
}
if count != len(containers) {
return nil, errors.Errorf("expected %d containers but got %d", len(containers), count)
}
// put the containers in the right order based on their container index
sort.Sort(ContainerSlice(pod.containers))
return pod, nil
}
func (d *DockerDriver) fetchImage(ctx context.Context, image string, alwaysFetch bool, registryConfig *registry.DockerConfig, out io.Writer) error {
regName, err := registry.GetRegistry(image)
if err != nil {
return err
}
var registryAuth registry.DockerConfigAuth
if registryConfig != nil {
if regauth, ok := registryConfig.Auths[regName]; ok {
registryAuth = regauth
}
}
buf, err := json.Marshal(registryAuth)
if err != nil {
return err
}
registryAuthEnc := base64.URLEncoding.EncodeToString(buf)
tag, err := registry.GetImageTagOrDigest(image)
if err != nil {
return err
}
args := filters.NewArgs()
args.Add("reference", image)
img, err := d.client.ImageList(ctx, dockertypes.ImageListOptions{Filters: args})
2019-02-21 14:54:50 +00:00
if err != nil {
return err
}
exists := len(img) > 0
// fetch only if forced, is latest tag or image doesn't exist
if alwaysFetch || tag == "latest" || !exists {
reader, err := d.client.ImagePull(ctx, image, dockertypes.ImagePullOptions{RegistryAuth: registryAuthEnc})
if err != nil {
return err
}
_, err = io.Copy(out, reader)
return err
}
return nil
}
func (d *DockerDriver) createContainer(ctx context.Context, index int, podConfig *PodConfig, maincontainerID string, toolboxVol *dockertypes.Volume, out io.Writer) (*container.ContainerCreateCreatedBody, error) {
containerConfig := podConfig.Containers[index]
// by default always try to pull the image so we are sure only authorized users can fetch them
// see https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages
if err := d.fetchImage(ctx, containerConfig.Image, true, podConfig.DockerConfig, out); err != nil {
2019-02-21 14:54:50 +00:00
return nil, err
}
labels := map[string]string{}
labels[agolaLabelKey] = agolaLabelValue
labels[executorIDKey] = d.executorID
labels[podIDKey] = podConfig.ID
labels[taskIDKey] = podConfig.TaskID
2019-02-21 14:54:50 +00:00
containerLabels := map[string]string{}
for k, v := range labels {
containerLabels[k] = v
}
containerLabels[containerIndexKey] = strconv.Itoa(index)
2019-02-21 14:54:50 +00:00
cliContainerConfig := &container.Config{
2019-02-21 14:54:50 +00:00
Entrypoint: containerConfig.Cmd,
Env: makeEnvSlice(containerConfig.Env),
2019-02-21 14:54:50 +00:00
WorkingDir: containerConfig.WorkingDir,
Image: containerConfig.Image,
Tty: true,
Labels: containerLabels,
}
cliHostConfig := &container.HostConfig{
Privileged: containerConfig.Privileged,
2019-02-21 14:54:50 +00:00
}
if index == 0 {
// main container requires the initvolume containing the toolbox
// TODO(sgotti) migrate this to cliHostConfig.Mounts
cliHostConfig.Binds = []string{fmt.Sprintf("%s:%s", toolboxVol.Name, podConfig.InitVolumeDir)}
cliHostConfig.ReadonlyPaths = []string{fmt.Sprintf("%s:%s", toolboxVol.Name, podConfig.InitVolumeDir)}
} else {
// attach other containers to maincontainer network
cliHostConfig.NetworkMode = container.NetworkMode(fmt.Sprintf("container:%s", maincontainerID))
2019-02-21 14:54:50 +00:00
}
2019-10-16 12:46:51 +00:00
var mounts []mount.Mount
for _, vol := range containerConfig.Volumes {
if vol.TmpFS != nil {
2019-10-16 12:46:51 +00:00
mounts = append(mounts, mount.Mount{
Type: mount.TypeTmpfs,
Target: vol.Path,
TmpfsOptions: &mount.TmpfsOptions{
SizeBytes: vol.TmpFS.Size,
},
2019-10-16 12:46:51 +00:00
})
} else {
return nil, errors.Errorf("missing volume config")
}
}
2019-10-16 12:46:51 +00:00
if mounts != nil {
cliHostConfig.Mounts = mounts
}
resp, err := d.client.ContainerCreate(ctx, cliContainerConfig, cliHostConfig, nil, "")
return &resp, err
2019-02-21 14:54:50 +00:00
}
func (d *DockerDriver) ExecutorGroup(ctx context.Context) (string, error) {
// use the same group as the executor id
return d.executorID, nil
}
func (d *DockerDriver) GetExecutors(ctx context.Context) ([]string, error) {
return []string{d.executorID}, nil
}
func (d *DockerDriver) GetPods(ctx context.Context, all bool) ([]Pod, error) {
2019-02-21 14:54:50 +00:00
args := filters.NewArgs()
containers, err := d.client.ContainerList(ctx,
dockertypes.ContainerListOptions{
2019-02-21 14:54:50 +00:00
Filters: args,
All: all,
})
if err != nil {
return nil, err
}
volumes, err := d.client.VolumeList(ctx, args)
if err != nil {
return nil, err
}
2019-02-21 14:54:50 +00:00
podsMap := map[string]*DockerPod{}
for _, container := range containers {
executorID, ok := container.Labels[executorIDKey]
if !ok || executorID != d.executorID {
// skip container
continue
}
2019-02-21 14:54:50 +00:00
podID, ok := container.Labels[podIDKey]
if !ok {
// skip container
continue
}
if _, ok := podsMap[podID]; !ok {
2019-02-21 14:54:50 +00:00
pod := &DockerPod{
id: podID,
client: d.client,
executorID: d.executorID,
containers: []*DockerContainer{},
// TODO(sgotti) initvolumeDir isn't set
2019-02-21 14:54:50 +00:00
}
podsMap[podID] = pod
}
}
for _, container := range containers {
executorID, ok := container.Labels[executorIDKey]
if !ok || executorID != d.executorID {
// skip container
continue
}
2019-02-21 14:54:50 +00:00
podID, ok := container.Labels[podIDKey]
if !ok {
// skip container
continue
}
cIndexStr, ok := container.Labels[containerIndexKey]
if !ok {
// remove pod since some of its containers don't have the right labels
delete(podsMap, podID)
}
cIndex, err := strconv.Atoi(cIndexStr)
if err != nil {
// remove pod since some of its containers don't have the right labels
delete(podsMap, podID)
}
2019-02-21 14:54:50 +00:00
pod := podsMap[podID]
dContainer := &DockerContainer{
Index: cIndex,
Container: container,
}
pod.containers = append(pod.containers, dContainer)
2019-02-21 14:54:50 +00:00
// overwrite containers with the right order
// add labels from the container with index 0
if cIndex == 0 {
podLabels := map[string]string{}
// keep only labels starting with our prefix
2019-02-21 14:54:50 +00:00
for labelName, labelValue := range container.Labels {
if strings.HasPrefix(labelName, labelPrefix) {
podLabels[labelName] = labelValue
2019-02-21 14:54:50 +00:00
}
}
pod.labels = podLabels
}
}
for _, vol := range volumes.Volumes {
executorID, ok := vol.Labels[executorIDKey]
if !ok || executorID != d.executorID {
// skip vol
continue
}
podID, ok := vol.Labels[podIDKey]
if !ok {
// skip vol
continue
}
pod, ok := podsMap[podID]
if !ok {
// skip vol
continue
}
pod.toolboxVolumeName = vol.Name
}
2019-02-21 14:54:50 +00:00
pods := make([]Pod, 0, len(podsMap))
for _, pod := range podsMap {
// put the containers in the right order based on their container index
sort.Sort(ContainerSlice(pod.containers))
2019-02-21 14:54:50 +00:00
pods = append(pods, pod)
}
return pods, nil
}
type DockerPod struct {
id string
client *client.Client
labels map[string]string
containers []*DockerContainer
toolboxVolumeName string
executorID string
initVolumeDir string
2019-02-21 14:54:50 +00:00
}
type DockerContainer struct {
Index int
dockertypes.Container
}
type ContainerSlice []*DockerContainer
func (p ContainerSlice) Len() int { return len(p) }
func (p ContainerSlice) Less(i, j int) bool { return p[i].Index < p[j].Index }
func (p ContainerSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
2019-02-21 14:54:50 +00:00
func (dp *DockerPod) ID() string {
return dp.id
}
func (dp *DockerPod) ExecutorID() string {
return dp.executorID
}
func (dp *DockerPod) TaskID() string {
return dp.labels[taskIDKey]
2019-02-21 14:54:50 +00:00
}
func (dp *DockerPod) Stop(ctx context.Context) error {
d := 1 * time.Second
errs := []error{}
for _, container := range dp.containers {
if err := dp.client.ContainerStop(ctx, container.ID, &d); err != nil {
errs = append(errs, err)
}
}
if len(errs) != 0 {
return errors.Errorf("stop errors: %v", errs)
}
return nil
}
func (dp *DockerPod) Remove(ctx context.Context) error {
errs := []error{}
for _, container := range dp.containers {
if err := dp.client.ContainerRemove(ctx, container.ID, dockertypes.ContainerRemoveOptions{Force: true}); err != nil {
2019-02-21 14:54:50 +00:00
errs = append(errs, err)
}
}
if dp.toolboxVolumeName != "" {
if err := dp.client.VolumeRemove(ctx, dp.toolboxVolumeName, true); err != nil {
errs = append(errs, err)
}
}
2019-02-21 14:54:50 +00:00
if len(errs) != 0 {
return errors.Errorf("remove errors: %v", errs)
}
return nil
}
type DockerContainerExec struct {
execID string
hresp *dockertypes.HijackedResponse
2019-02-21 14:54:50 +00:00
client *client.Client
endCh chan error
stdin io.WriteCloser
}
// Stdin is a wrapped HikackedResponse implementing io.WriteCloser so users can
// easily close stdin. Internally it will close only the write side of the conn.
type Stdin struct {
hresp *dockertypes.HijackedResponse
2019-02-21 14:54:50 +00:00
}
func (s *Stdin) Write(p []byte) (int, error) {
return s.hresp.Conn.Write(p)
}
func (s *Stdin) Close() error {
return s.hresp.CloseWrite()
}
func (dp *DockerPod) Exec(ctx context.Context, execConfig *ExecConfig) (ContainerExec, error) {
2019-02-21 14:54:50 +00:00
endCh := make(chan error)
// old docker versions doesn't support providing Env (before api 1.25) and
// WorkingDir (before api 1.35) in exec command.
// Use a toolbox command that will set them up and then exec the real command.
envj, err := json.Marshal(execConfig.Env)
if err != nil {
return nil, err
}
cmd := []string{filepath.Join(dp.initVolumeDir, "agola-toolbox"), "exec", "-e", string(envj), "-w", execConfig.WorkingDir, "--"}
cmd = append(cmd, execConfig.Cmd...)
dockerExecConfig := dockertypes.ExecConfig{
Cmd: cmd,
2019-02-21 14:54:50 +00:00
Tty: execConfig.Tty,
AttachStdin: execConfig.AttachStdin,
2019-02-21 14:54:50 +00:00
AttachStdout: execConfig.Stdout != nil,
AttachStderr: execConfig.Stderr != nil,
User: execConfig.User,
}
response, err := dp.client.ContainerExecCreate(ctx, dp.containers[0].ID, dockerExecConfig)
2019-02-21 14:54:50 +00:00
if err != nil {
return nil, err
}
execStartCheck := dockertypes.ExecStartCheck{
2019-02-21 14:54:50 +00:00
Detach: dockerExecConfig.Detach,
Tty: dockerExecConfig.Tty,
}
hresp, err := dp.client.ContainerExecAttach(ctx, response.ID, execStartCheck)
2019-02-21 14:54:50 +00:00
if err != nil {
return nil, err
}
stdout := execConfig.Stdout
stderr := execConfig.Stderr
if execConfig.Stdout == nil {
stdout = ioutil.Discard
}
if execConfig.Stderr == nil {
stderr = ioutil.Discard
}
// copy both stdout and stderr to out file
go func() {
var err error
if execConfig.Tty {
_, err = io.Copy(stdout, hresp.Reader)
} else {
_, err = stdcopy.StdCopy(stdout, stderr, hresp.Reader)
}
endCh <- err
}()
stdin := &Stdin{
hresp: &hresp,
}
return &DockerContainerExec{
execID: response.ID,
hresp: &hresp,
client: dp.client,
2019-02-21 14:54:50 +00:00
stdin: stdin,
endCh: endCh,
}, nil
}
func (e *DockerContainerExec) Wait(ctx context.Context) (int, error) {
// ignore error, we'll use the exit code of the exec
select {
case <-ctx.Done():
return 0, ctx.Err()
case <-e.endCh:
}
2019-02-21 14:54:50 +00:00
var exitCode int
for {
resp, err := e.client.ContainerExecInspect(ctx, e.execID)
if err != nil {
return -1, err
}
if !resp.Running {
exitCode = resp.ExitCode
break
}
time.Sleep(500 * time.Millisecond)
2019-02-21 14:54:50 +00:00
}
e.hresp.Close()
return exitCode, nil
}
func (e *DockerContainerExec) Stdin() io.WriteCloser {
return e.stdin
}
func makeEnvSlice(env map[string]string) []string {
2019-02-21 14:54:50 +00:00
envList := make([]string, 0, len(env))
for k, v := range env {
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
}
return envList
}