2019-02-21 14:54:50 +00:00
|
|
|
// Copyright 2019 Sorint.lab
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
package driver
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2019-04-22 12:38:25 +00:00
|
|
|
"encoding/base64"
|
|
|
|
"encoding/json"
|
2019-02-21 14:54:50 +00:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"os"
|
2019-04-25 11:30:46 +00:00
|
|
|
"runtime"
|
2019-02-21 14:54:50 +00:00
|
|
|
"strconv"
|
|
|
|
"strings"
|
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/pkg/errors"
|
2019-04-25 11:30:46 +00:00
|
|
|
"github.com/sorintlab/agola/internal/common"
|
2019-04-22 12:38:25 +00:00
|
|
|
"github.com/sorintlab/agola/internal/services/runservice/executor/registry"
|
2019-02-21 14:54:50 +00:00
|
|
|
|
|
|
|
"github.com/docker/docker/api/types"
|
|
|
|
"github.com/docker/docker/api/types/container"
|
|
|
|
"github.com/docker/docker/api/types/filters"
|
|
|
|
"github.com/docker/docker/client"
|
|
|
|
"github.com/docker/docker/pkg/archive"
|
|
|
|
"github.com/docker/docker/pkg/stdcopy"
|
|
|
|
"go.uber.org/zap"
|
|
|
|
)
|
|
|
|
|
|
|
|
type DockerDriver struct {
|
|
|
|
logger *zap.Logger
|
|
|
|
client *client.Client
|
|
|
|
initVolumeHostDir string
|
2019-04-22 16:17:55 +00:00
|
|
|
toolboxPath string
|
2019-04-24 10:37:55 +00:00
|
|
|
executorID string
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
|
2019-04-24 10:37:55 +00:00
|
|
|
func NewDockerDriver(logger *zap.Logger, executorID, initVolumeHostDir, toolboxPath string) (*DockerDriver, error) {
|
2019-02-21 14:54:50 +00:00
|
|
|
cli, err := client.NewEnvClient()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &DockerDriver{
|
|
|
|
logger: logger,
|
|
|
|
client: cli,
|
|
|
|
initVolumeHostDir: initVolumeHostDir,
|
2019-04-22 16:17:55 +00:00
|
|
|
toolboxPath: toolboxPath,
|
2019-04-24 10:37:55 +00:00
|
|
|
executorID: executorID,
|
2019-02-21 14:54:50 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-04-22 16:17:55 +00:00
|
|
|
func (d *DockerDriver) Setup(ctx context.Context) error {
|
|
|
|
return d.CopyToolbox(ctx)
|
|
|
|
}
|
|
|
|
|
2019-02-21 14:54:50 +00:00
|
|
|
// CopyToolbox is an hack needed when running the executor inside a docker
|
|
|
|
// container. It copies the agola-toolbox binaries from the container to an
|
|
|
|
// host path so it can be bind mounted to the other containers
|
2019-04-22 16:17:55 +00:00
|
|
|
func (d *DockerDriver) CopyToolbox(ctx context.Context) error {
|
2019-02-21 14:54:50 +00:00
|
|
|
// by default always try to pull the image so we are sure only authorized users can fetch them
|
|
|
|
// see https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages
|
|
|
|
reader, err := d.client.ImagePull(ctx, "busybox", types.ImagePullOptions{})
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
io.Copy(os.Stdout, reader)
|
|
|
|
|
|
|
|
resp, err := d.client.ContainerCreate(ctx, &container.Config{
|
|
|
|
Entrypoint: []string{"cat"},
|
|
|
|
Image: "busybox",
|
|
|
|
Tty: true,
|
|
|
|
}, &container.HostConfig{
|
|
|
|
Binds: []string{fmt.Sprintf("%s:%s", d.initVolumeHostDir, "/tmp/agola")},
|
|
|
|
}, nil, "")
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
containerID := resp.ID
|
|
|
|
|
|
|
|
if err := d.client.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2019-04-22 16:17:55 +00:00
|
|
|
srcInfo, err := archive.CopyInfoSourcePath(d.toolboxPath, false)
|
2019-02-21 14:54:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
srcArchive, err := archive.TarResource(srcInfo)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer srcArchive.Close()
|
|
|
|
|
|
|
|
options := types.CopyToContainerOptions{
|
|
|
|
AllowOverwriteDirWithFile: false,
|
|
|
|
CopyUIDGID: false,
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := d.client.CopyToContainer(ctx, containerID, "/tmp/agola", srcArchive, options); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// ignore remove error
|
|
|
|
d.client.ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{Force: true})
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2019-04-25 11:30:46 +00:00
|
|
|
func (d *DockerDriver) Archs(ctx context.Context) ([]common.Arch, error) {
|
|
|
|
// since we are using the local docker driver we can return our go arch information
|
|
|
|
return []common.Arch{common.ArchFromString(runtime.GOARCH)}, nil
|
|
|
|
}
|
|
|
|
|
2019-03-13 14:48:35 +00:00
|
|
|
func (d *DockerDriver) NewPod(ctx context.Context, podConfig *PodConfig, out io.Writer) (Pod, error) {
|
2019-02-21 14:54:50 +00:00
|
|
|
if len(podConfig.Containers) == 0 {
|
|
|
|
return nil, errors.Errorf("empty container config")
|
|
|
|
}
|
|
|
|
|
|
|
|
containerConfig := podConfig.Containers[0]
|
|
|
|
|
2019-04-22 12:38:25 +00:00
|
|
|
regName, err := registry.GetRegistry(containerConfig.Image)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
var registryAuth registry.DockerConfigAuth
|
|
|
|
if podConfig.DockerConfig != nil {
|
|
|
|
if regauth, ok := podConfig.DockerConfig.Auths[regName]; ok {
|
|
|
|
registryAuth = regauth
|
|
|
|
}
|
|
|
|
}
|
|
|
|
buf, err := json.Marshal(registryAuth)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
registryAuthEnc := base64.URLEncoding.EncodeToString(buf)
|
|
|
|
|
2019-02-21 14:54:50 +00:00
|
|
|
// by default always try to pull the image so we are sure only authorized users can fetch them
|
|
|
|
// see https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#alwayspullimages
|
2019-04-22 12:38:25 +00:00
|
|
|
reader, err := d.client.ImagePull(ctx, containerConfig.Image, types.ImagePullOptions{RegistryAuth: registryAuthEnc})
|
2019-02-21 14:54:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2019-03-13 14:48:35 +00:00
|
|
|
io.Copy(out, reader)
|
2019-02-21 14:54:50 +00:00
|
|
|
|
|
|
|
labels := map[string]string{}
|
|
|
|
labels[agolaLabelKey] = agolaLabelValue
|
2019-04-22 15:53:34 +00:00
|
|
|
labels[podIDKey] = podConfig.ID
|
2019-04-24 10:37:55 +00:00
|
|
|
labels[taskIDKey] = podConfig.TaskID
|
2019-02-21 14:54:50 +00:00
|
|
|
|
|
|
|
containerLabels := map[string]string{}
|
|
|
|
for k, v := range labels {
|
|
|
|
containerLabels[k] = v
|
|
|
|
}
|
|
|
|
containerLabels[containerIndexKey] = "0"
|
|
|
|
|
|
|
|
resp, err := d.client.ContainerCreate(ctx, &container.Config{
|
|
|
|
Entrypoint: containerConfig.Cmd,
|
2019-04-22 16:19:13 +00:00
|
|
|
Env: makeEnvSlice(containerConfig.Env),
|
2019-02-21 14:54:50 +00:00
|
|
|
WorkingDir: containerConfig.WorkingDir,
|
|
|
|
Image: containerConfig.Image,
|
|
|
|
Tty: true,
|
|
|
|
Labels: containerLabels,
|
|
|
|
}, &container.HostConfig{
|
|
|
|
Binds: []string{fmt.Sprintf("%s:%s", d.initVolumeHostDir, podConfig.InitVolumeDir)},
|
|
|
|
ReadonlyPaths: []string{fmt.Sprintf("%s:%s", d.initVolumeHostDir, podConfig.InitVolumeDir)},
|
2019-03-13 11:11:46 +00:00
|
|
|
Privileged: containerConfig.Privileged,
|
2019-02-21 14:54:50 +00:00
|
|
|
}, nil, "")
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
containerID := resp.ID
|
|
|
|
|
|
|
|
if err := d.client.ContainerStart(ctx, containerID, types.ContainerStartOptions{}); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
args := filters.NewArgs()
|
|
|
|
for k, v := range labels {
|
|
|
|
args.Add("label", fmt.Sprintf("%s=%s", k, v))
|
|
|
|
}
|
|
|
|
|
|
|
|
containers, err := d.client.ContainerList(ctx,
|
|
|
|
types.ContainerListOptions{
|
|
|
|
Filters: args,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if len(containers) == 0 {
|
|
|
|
return nil, errors.Errorf("no container with id %s", containerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
return &DockerPod{
|
2019-04-22 15:53:34 +00:00
|
|
|
id: podConfig.ID,
|
2019-02-21 14:54:50 +00:00
|
|
|
client: d.client,
|
|
|
|
containers: containers,
|
2019-04-24 10:37:55 +00:00
|
|
|
executorID: d.executorID,
|
2019-02-21 14:54:50 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
2019-04-24 10:37:55 +00:00
|
|
|
func (d *DockerDriver) ExecutorGroup(ctx context.Context) (string, error) {
|
|
|
|
// use the same group as the executor id
|
|
|
|
return d.executorID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DockerDriver) GetExecutors(ctx context.Context) ([]string, error) {
|
|
|
|
return []string{d.executorID}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (d *DockerDriver) GetPods(ctx context.Context, all bool) ([]Pod, error) {
|
2019-02-21 14:54:50 +00:00
|
|
|
args := filters.NewArgs()
|
|
|
|
|
|
|
|
containers, err := d.client.ContainerList(ctx,
|
|
|
|
types.ContainerListOptions{
|
|
|
|
Filters: args,
|
|
|
|
All: all,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
podsMap := map[string]*DockerPod{}
|
|
|
|
for _, container := range containers {
|
|
|
|
podID, ok := container.Labels[podIDKey]
|
|
|
|
if !ok {
|
|
|
|
// skip container
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if pod, ok := podsMap[podID]; !ok {
|
|
|
|
pod := &DockerPod{
|
|
|
|
id: podID,
|
|
|
|
client: d.client,
|
|
|
|
containers: []types.Container{container},
|
2019-04-24 10:37:55 +00:00
|
|
|
executorID: d.executorID,
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
podsMap[podID] = pod
|
|
|
|
|
|
|
|
} else {
|
|
|
|
pod.containers = append(pod.containers, container)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Put the containers in the right order based on their containerIndexKey label value
|
|
|
|
for _, container := range containers {
|
|
|
|
podID, ok := container.Labels[podIDKey]
|
|
|
|
if !ok {
|
|
|
|
// skip container
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
cIndexStr, ok := container.Labels[containerIndexKey]
|
|
|
|
if !ok {
|
|
|
|
// remove pod since some of its containers don't have the right labels
|
|
|
|
delete(podsMap, podID)
|
|
|
|
}
|
|
|
|
cIndex, err := strconv.Atoi(cIndexStr)
|
|
|
|
if err != nil {
|
|
|
|
// remove pod since some of its containers don't have the right labels
|
|
|
|
delete(podsMap, podID)
|
|
|
|
}
|
|
|
|
pod := podsMap[podID]
|
|
|
|
pod.containers[cIndex] = container
|
|
|
|
|
|
|
|
// overwrite containers with the right order
|
|
|
|
|
|
|
|
// add labels from the container with index 0
|
|
|
|
if cIndex == 0 {
|
|
|
|
podLabels := map[string]string{}
|
2019-04-24 10:37:55 +00:00
|
|
|
// keep only labels starting with our prefix
|
2019-02-21 14:54:50 +00:00
|
|
|
for labelName, labelValue := range container.Labels {
|
2019-04-22 16:19:43 +00:00
|
|
|
if strings.HasPrefix(labelName, labelPrefix) {
|
2019-04-24 10:37:55 +00:00
|
|
|
podLabels[labelName] = labelValue
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
pod.labels = podLabels
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pods := make([]Pod, 0, len(podsMap))
|
|
|
|
for _, pod := range podsMap {
|
|
|
|
pods = append(pods, pod)
|
|
|
|
}
|
|
|
|
return pods, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func podLabelsFromContainer(containerLabels map[string]string) map[string]string {
|
|
|
|
labels := map[string]string{}
|
2019-04-24 10:37:55 +00:00
|
|
|
// keep only labels starting with our prefix
|
2019-02-21 14:54:50 +00:00
|
|
|
for k, v := range containerLabels {
|
2019-04-22 16:19:43 +00:00
|
|
|
if strings.HasPrefix(k, labelPrefix) {
|
2019-04-24 10:37:55 +00:00
|
|
|
labels[k] = v
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return labels
|
|
|
|
}
|
|
|
|
|
|
|
|
type DockerPod struct {
|
|
|
|
id string
|
|
|
|
client *client.Client
|
|
|
|
labels map[string]string
|
|
|
|
containers []types.Container
|
2019-04-24 10:37:55 +00:00
|
|
|
executorID string
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dp *DockerPod) ID() string {
|
|
|
|
return dp.id
|
|
|
|
}
|
|
|
|
|
2019-04-24 10:37:55 +00:00
|
|
|
func (dp *DockerPod) ExecutorID() string {
|
|
|
|
return dp.executorID
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dp *DockerPod) TaskID() string {
|
|
|
|
return dp.labels[taskIDKey]
|
2019-02-21 14:54:50 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (dp *DockerPod) Stop(ctx context.Context) error {
|
|
|
|
d := 1 * time.Second
|
|
|
|
errs := []error{}
|
|
|
|
for _, container := range dp.containers {
|
|
|
|
if err := dp.client.ContainerStop(ctx, container.ID, &d); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(errs) != 0 {
|
|
|
|
return errors.Errorf("stop errors: %v", errs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (dp *DockerPod) Remove(ctx context.Context) error {
|
|
|
|
errs := []error{}
|
|
|
|
for _, container := range dp.containers {
|
|
|
|
if err := dp.client.ContainerRemove(ctx, container.ID, types.ContainerRemoveOptions{Force: true}); err != nil {
|
|
|
|
errs = append(errs, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(errs) != 0 {
|
|
|
|
return errors.Errorf("remove errors: %v", errs)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type DockerContainerExec struct {
|
|
|
|
execID string
|
|
|
|
hresp *types.HijackedResponse
|
|
|
|
client *client.Client
|
|
|
|
endCh chan error
|
|
|
|
|
|
|
|
stdin io.WriteCloser
|
|
|
|
}
|
|
|
|
|
|
|
|
// Stdin is a wrapped HikackedResponse implementing io.WriteCloser so users can
|
|
|
|
// easily close stdin. Internally it will close only the write side of the conn.
|
|
|
|
type Stdin struct {
|
|
|
|
hresp *types.HijackedResponse
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Stdin) Write(p []byte) (int, error) {
|
|
|
|
return s.hresp.Conn.Write(p)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *Stdin) Close() error {
|
|
|
|
return s.hresp.CloseWrite()
|
|
|
|
}
|
|
|
|
|
2019-04-22 16:17:55 +00:00
|
|
|
func (dp *DockerPod) Exec(ctx context.Context, execConfig *ExecConfig) (ContainerExec, error) {
|
2019-02-21 14:54:50 +00:00
|
|
|
endCh := make(chan error)
|
|
|
|
|
|
|
|
dockerExecConfig := types.ExecConfig{
|
2019-04-15 09:12:07 +00:00
|
|
|
Cmd: execConfig.Cmd,
|
2019-04-22 16:19:13 +00:00
|
|
|
Env: makeEnvSlice(execConfig.Env),
|
2019-02-21 14:54:50 +00:00
|
|
|
Tty: execConfig.Tty,
|
|
|
|
WorkingDir: execConfig.WorkingDir,
|
|
|
|
AttachStdin: true,
|
|
|
|
AttachStdout: execConfig.Stdout != nil,
|
|
|
|
AttachStderr: execConfig.Stderr != nil,
|
|
|
|
User: execConfig.User,
|
|
|
|
}
|
|
|
|
|
2019-04-22 16:17:55 +00:00
|
|
|
response, err := dp.client.ContainerExecCreate(ctx, dp.containers[0].ID, dockerExecConfig)
|
2019-02-21 14:54:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
execStartCheck := types.ExecStartCheck{
|
|
|
|
Detach: dockerExecConfig.Detach,
|
|
|
|
Tty: dockerExecConfig.Tty,
|
|
|
|
}
|
2019-04-22 16:17:55 +00:00
|
|
|
hresp, err := dp.client.ContainerExecAttach(ctx, response.ID, execStartCheck)
|
2019-02-21 14:54:50 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
stdout := execConfig.Stdout
|
|
|
|
stderr := execConfig.Stderr
|
|
|
|
if execConfig.Stdout == nil {
|
|
|
|
stdout = ioutil.Discard
|
|
|
|
}
|
|
|
|
if execConfig.Stderr == nil {
|
|
|
|
stderr = ioutil.Discard
|
|
|
|
}
|
|
|
|
|
|
|
|
// copy both stdout and stderr to out file
|
|
|
|
go func() {
|
|
|
|
var err error
|
|
|
|
if execConfig.Tty {
|
|
|
|
_, err = io.Copy(stdout, hresp.Reader)
|
|
|
|
} else {
|
|
|
|
_, err = stdcopy.StdCopy(stdout, stderr, hresp.Reader)
|
|
|
|
}
|
|
|
|
endCh <- err
|
|
|
|
}()
|
|
|
|
|
|
|
|
stdin := &Stdin{
|
|
|
|
hresp: &hresp,
|
|
|
|
}
|
|
|
|
|
|
|
|
return &DockerContainerExec{
|
|
|
|
execID: response.ID,
|
|
|
|
hresp: &hresp,
|
2019-04-22 16:17:55 +00:00
|
|
|
client: dp.client,
|
2019-02-21 14:54:50 +00:00
|
|
|
stdin: stdin,
|
|
|
|
endCh: endCh,
|
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *DockerContainerExec) Wait(ctx context.Context) (int, error) {
|
|
|
|
// ignore error, we'll use the exit code of the exec
|
|
|
|
<-e.endCh
|
|
|
|
|
|
|
|
resp, err := e.client.ContainerExecInspect(ctx, e.execID)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
exitCode := resp.ExitCode
|
|
|
|
|
|
|
|
e.hresp.Close()
|
|
|
|
|
|
|
|
return exitCode, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *DockerContainerExec) Stdin() io.WriteCloser {
|
|
|
|
return e.stdin
|
|
|
|
}
|
|
|
|
|
2019-04-22 16:19:13 +00:00
|
|
|
func makeEnvSlice(env map[string]string) []string {
|
2019-02-21 14:54:50 +00:00
|
|
|
envList := make([]string, 0, len(env))
|
|
|
|
for k, v := range env {
|
|
|
|
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
|
|
|
|
}
|
|
|
|
|
|
|
|
return envList
|
|
|
|
}
|