runservice: handle multiple executor archs

An executor can handle multiple archs (an executor that talks with a k8s cluster
with multi arch nodes). Don't use a label for archs but a custom executor
field.
This commit is contained in:
Simone Gotti 2019-04-25 13:30:46 +02:00
parent e0d37b08f2
commit 6f88bd3d53
6 changed files with 47 additions and 9 deletions

View File

@ -22,11 +22,13 @@ import (
"io" "io"
"io/ioutil" "io/ioutil"
"os" "os"
"runtime"
"strconv" "strconv"
"strings" "strings"
"time" "time"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/sorintlab/agola/internal/common"
"github.com/sorintlab/agola/internal/services/runservice/executor/registry" "github.com/sorintlab/agola/internal/services/runservice/executor/registry"
"github.com/docker/docker/api/types" "github.com/docker/docker/api/types"
@ -119,6 +121,11 @@ func (d *DockerDriver) CopyToolbox(ctx context.Context) error {
return nil return nil
} }
func (d *DockerDriver) Archs(ctx context.Context) ([]common.Arch, error) {
// since we are using the local docker driver we can return our go arch information
return []common.Arch{common.ArchFromString(runtime.GOARCH)}, nil
}
func (d *DockerDriver) NewPod(ctx context.Context, podConfig *PodConfig, out io.Writer) (Pod, error) { func (d *DockerDriver) NewPod(ctx context.Context, podConfig *PodConfig, out io.Writer) (Pod, error) {
if len(podConfig.Containers) == 0 { if len(podConfig.Containers) == 0 {
return nil, errors.Errorf("empty container config") return nil, errors.Errorf("empty container config")

View File

@ -18,6 +18,7 @@ import (
"context" "context"
"io" "io"
"github.com/sorintlab/agola/internal/common"
"github.com/sorintlab/agola/internal/services/runservice/executor/registry" "github.com/sorintlab/agola/internal/services/runservice/executor/registry"
) )
@ -47,6 +48,7 @@ type Driver interface {
GetPods(ctx context.Context, all bool) ([]Pod, error) GetPods(ctx context.Context, all bool) ([]Pod, error)
ExecutorGroup(ctx context.Context) (string, error) ExecutorGroup(ctx context.Context) (string, error)
GetExecutors(ctx context.Context) ([]string, error) GetExecutors(ctx context.Context) ([]string, error)
Archs(ctx context.Context) ([]common.Arch, error)
} }
type Pod interface { type Pod interface {

View File

@ -29,6 +29,7 @@ import (
"github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/archive"
"github.com/pkg/errors" "github.com/pkg/errors"
uuid "github.com/satori/go.uuid" uuid "github.com/satori/go.uuid"
"github.com/sorintlab/agola/internal/common"
"go.uber.org/zap" "go.uber.org/zap"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
@ -198,6 +199,25 @@ func (d *K8sDriver) Setup(ctx context.Context) error {
return nil return nil
} }
func (d *K8sDriver) Archs(ctx context.Context) ([]common.Arch, error) {
// TODO(sgotti) use go client listers instead of querying every time
nodeClient := d.client.CoreV1().Nodes()
nodes, err := nodeClient.List(metav1.ListOptions{})
if err != nil {
return nil, err
}
archsMap := map[common.Arch]struct{}{}
archs := []common.Arch{}
for _, node := range nodes.Items {
archsMap[common.ArchFromString(node.Status.NodeInfo.Architecture)] = struct{}{}
}
for arch := range archsMap {
archs = append(archs, arch)
}
return archs, nil
}
func (d *K8sDriver) ExecutorGroup(ctx context.Context) (string, error) { func (d *K8sDriver) ExecutorGroup(ctx context.Context) (string, error) {
return d.executorsGroupID, nil return d.executorsGroupID, nil
} }

View File

@ -27,7 +27,6 @@ import (
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -657,9 +656,10 @@ func (e *Executor) sendExecutorStatus(ctx context.Context) error {
activeTasks := e.runningTasks.len() activeTasks := e.runningTasks.len()
// Add special labels (and override config provided ones) archs, err := e.driver.Archs(ctx)
arch := runtime.GOARCH if err != nil {
labels["arch"] = arch return err
}
executorGroup, err := e.driver.ExecutorGroup(ctx) executorGroup, err := e.driver.ExecutorGroup(ctx)
if err != nil { if err != nil {
@ -689,6 +689,7 @@ func (e *Executor) sendExecutorStatus(ctx context.Context) error {
executor := &types.Executor{ executor := &types.Executor{
ID: e.id, ID: e.id,
Archs: archs,
ListenURL: e.listenURL, ListenURL: e.listenURL,
Labels: labels, Labels: labels,
ActiveTasksLimit: e.c.ActiveTasksLimit, ActiveTasksLimit: e.c.ActiveTasksLimit,

View File

@ -290,15 +290,21 @@ func (s *Scheduler) chooseExecutor(ctx context.Context, rct *types.RunConfigTask
for _, e := range executors { for _, e := range executors {
// if arch is not defined use any executor arch // if arch is not defined use any executor arch
if rct.Runtime.Arch != "" { if rct.Runtime.Arch != "" {
if e.Labels["arch"] != string(rct.Runtime.Arch) { hasArch := false
for _, arch := range e.Archs {
if arch == rct.Runtime.Arch {
hasArch = true
}
}
if !hasArch {
continue continue
} }
}
if e.ActiveTasksLimit != 0 { if e.ActiveTasksLimit != 0 {
if e.ActiveTasks >= e.ActiveTasksLimit { if e.ActiveTasks >= e.ActiveTasksLimit {
continue continue
} }
} }
}
return e, nil return e, nil
} }

View File

@ -664,6 +664,8 @@ type Executor struct {
ID string `json:"id,omitempty"` ID string `json:"id,omitempty"`
ListenURL string `json:"listenURL,omitempty"` ListenURL string `json:"listenURL,omitempty"`
Archs []common.Arch `json:"archs,omitempty"`
Labels map[string]string `json:"labels,omitempty"` Labels map[string]string `json:"labels,omitempty"`
ActiveTasksLimit int `json:"active_tasks_limit,omitempty"` ActiveTasksLimit int `json:"active_tasks_limit,omitempty"`