runservice: handle multiple executor archs
An executor can handle multiple archs (an executor that talks with a k8s cluster with multi arch nodes). Don't use a label for archs but a custom executor field.
This commit is contained in:
parent
e0d37b08f2
commit
6f88bd3d53
|
@ -22,11 +22,13 @@ import (
|
|||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sorintlab/agola/internal/common"
|
||||
"github.com/sorintlab/agola/internal/services/runservice/executor/registry"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
|
@ -119,6 +121,11 @@ func (d *DockerDriver) CopyToolbox(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *DockerDriver) Archs(ctx context.Context) ([]common.Arch, error) {
|
||||
// since we are using the local docker driver we can return our go arch information
|
||||
return []common.Arch{common.ArchFromString(runtime.GOARCH)}, nil
|
||||
}
|
||||
|
||||
func (d *DockerDriver) NewPod(ctx context.Context, podConfig *PodConfig, out io.Writer) (Pod, error) {
|
||||
if len(podConfig.Containers) == 0 {
|
||||
return nil, errors.Errorf("empty container config")
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/sorintlab/agola/internal/common"
|
||||
"github.com/sorintlab/agola/internal/services/runservice/executor/registry"
|
||||
)
|
||||
|
||||
|
@ -47,6 +48,7 @@ type Driver interface {
|
|||
GetPods(ctx context.Context, all bool) ([]Pod, error)
|
||||
ExecutorGroup(ctx context.Context) (string, error)
|
||||
GetExecutors(ctx context.Context) ([]string, error)
|
||||
Archs(ctx context.Context) ([]common.Arch, error)
|
||||
}
|
||||
|
||||
type Pod interface {
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/pkg/errors"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"github.com/sorintlab/agola/internal/common"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -198,6 +199,25 @@ func (d *K8sDriver) Setup(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (d *K8sDriver) Archs(ctx context.Context) ([]common.Arch, error) {
|
||||
// TODO(sgotti) use go client listers instead of querying every time
|
||||
nodeClient := d.client.CoreV1().Nodes()
|
||||
nodes, err := nodeClient.List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archsMap := map[common.Arch]struct{}{}
|
||||
archs := []common.Arch{}
|
||||
for _, node := range nodes.Items {
|
||||
archsMap[common.ArchFromString(node.Status.NodeInfo.Architecture)] = struct{}{}
|
||||
}
|
||||
for arch := range archsMap {
|
||||
archs = append(archs, arch)
|
||||
}
|
||||
|
||||
return archs, nil
|
||||
}
|
||||
|
||||
func (d *K8sDriver) ExecutorGroup(ctx context.Context) (string, error) {
|
||||
return d.executorsGroupID, nil
|
||||
}
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -657,9 +656,10 @@ func (e *Executor) sendExecutorStatus(ctx context.Context) error {
|
|||
|
||||
activeTasks := e.runningTasks.len()
|
||||
|
||||
// Add special labels (and override config provided ones)
|
||||
arch := runtime.GOARCH
|
||||
labels["arch"] = arch
|
||||
archs, err := e.driver.Archs(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
executorGroup, err := e.driver.ExecutorGroup(ctx)
|
||||
if err != nil {
|
||||
|
@ -689,6 +689,7 @@ func (e *Executor) sendExecutorStatus(ctx context.Context) error {
|
|||
|
||||
executor := &types.Executor{
|
||||
ID: e.id,
|
||||
Archs: archs,
|
||||
ListenURL: e.listenURL,
|
||||
Labels: labels,
|
||||
ActiveTasksLimit: e.c.ActiveTasksLimit,
|
||||
|
|
|
@ -290,13 +290,19 @@ func (s *Scheduler) chooseExecutor(ctx context.Context, rct *types.RunConfigTask
|
|||
for _, e := range executors {
|
||||
// if arch is not defined use any executor arch
|
||||
if rct.Runtime.Arch != "" {
|
||||
if e.Labels["arch"] != string(rct.Runtime.Arch) {
|
||||
hasArch := false
|
||||
for _, arch := range e.Archs {
|
||||
if arch == rct.Runtime.Arch {
|
||||
hasArch = true
|
||||
}
|
||||
}
|
||||
if !hasArch {
|
||||
continue
|
||||
}
|
||||
if e.ActiveTasksLimit != 0 {
|
||||
if e.ActiveTasks >= e.ActiveTasksLimit {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if e.ActiveTasksLimit != 0 {
|
||||
if e.ActiveTasks >= e.ActiveTasksLimit {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -664,6 +664,8 @@ type Executor struct {
|
|||
ID string `json:"id,omitempty"`
|
||||
ListenURL string `json:"listenURL,omitempty"`
|
||||
|
||||
Archs []common.Arch `json:"archs,omitempty"`
|
||||
|
||||
Labels map[string]string `json:"labels,omitempty"`
|
||||
|
||||
ActiveTasksLimit int `json:"active_tasks_limit,omitempty"`
|
||||
|
|
Loading…
Reference in New Issue