mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-12 04:26:51 +00:00
containerMode option to allow running jobs in k8's instead of docker (#1546)
* added containerMode=kubernetes env variables to the runner * removed unused logging * restored configs and charts * restored makefile cert version and acceptance/run * added workVolumeClaimTemplate in pod definition, including logic * added claim template name based on the runner * Apply suggestions from code review update errors * added concurrent cleanup before runner pod is deleted * update manifests * added retry after 30s if pod cleanup contains err * added admission webhook check, made workVolumeClaimTemplate mandatory for k8s * style changes and added comments * added izZero timestamp check for deleting runner-linked pods * changed order of local variable to avoid copy if p is deleted * removed docker from container mode k8s * restored charts, config, makefile * restored forked files back and not the ARC ones * created PersistentVolume on containerMode k8s * create pv only if storage class name is local-storage * removed actions if storage class name is local-storage * added service account validation if container mode kubernetes * changed the coding style to match rest of the ARC * added validation to the runnerdeployment webhook * specified fields more precisely, added webhook validation to the replicaset as well * remake manifests * wraped delete runner-linked-pods in kube mode * fixed empty line * fixed import * makefile changes for hooks * added cleanup secrets * create manifests * docs * update access modes * update dockerfile * nit changes * fixed dockerfile * rewrite allowing reuse for runners and runnersets * deepcopy forgot to stage * changed privileged * make manifests * partly moved to finalizer, still need to apply finalizer first * finalizer added if env variable used in container mode exists * bump runner version * error message moved from Error to Info on cleanup pods/secrets * removed useless dereferencing, added transformation tests of workVolumeClaimTemplate * Apply suggestions from code review * Update controllers/utils_test.go Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com> * Update controllers/utils_test.go Co-authored-by: Thomas Boop <52323235+thboop@users.noreply.github.com> * add hook version to cli, update to 0.1.2 * Apply suggestions from code review * Update controllers/utils_test.go * Update runner/Makefile * Fix missing secret permission and the error handling * Fix a runnerpod reconciler finalizer to not trigger unnecessary retry Co-authored-by: Nikola Jokic <nikola-jokic@github.com> Co-authored-by: Nikola Jokic <97525037+nikola-jokic@users.noreply.github.com> Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
This commit is contained in:
@@ -9,7 +9,8 @@ const (
|
||||
const (
|
||||
// This names requires at least one slash to work.
|
||||
// See https://github.com/google/knative-gcp/issues/378
|
||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||
runnerPodFinalizerName = "actions.summerwind.dev/runner-pod"
|
||||
runnerLinkedResourcesFinalizerName = "actions.summerwind.dev/linked-resources"
|
||||
|
||||
annotationKeyPrefix = "actions-runner/"
|
||||
|
||||
@@ -61,4 +62,7 @@ const (
|
||||
|
||||
EnvVarRunnerName = "RUNNER_NAME"
|
||||
EnvVarRunnerToken = "RUNNER_TOKEN"
|
||||
|
||||
// defaultHookPath is path to the hook script used when the "containerMode: kubernetes" is specified
|
||||
defaultRunnerHookPath = "/runner/k8s/index.js"
|
||||
)
|
||||
|
||||
@@ -18,7 +18,9 @@ package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -76,6 +78,7 @@ type RunnerReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
@@ -112,6 +115,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
// Pod was not found
|
||||
return r.processRunnerDeletion(runner, ctx, log, nil)
|
||||
}
|
||||
|
||||
return r.processRunnerDeletion(runner, ctx, log, &pod)
|
||||
}
|
||||
|
||||
@@ -412,7 +416,17 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||
template.Spec.EnableServiceLinks = runner.Spec.EnableServiceLinks
|
||||
|
||||
pod, err := newRunnerPod(runner.Name, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL)
|
||||
if runner.Spec.ContainerMode == "kubernetes" {
|
||||
workDir := runner.Spec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
if err := applyWorkVolumeClaimTemplateToPod(&template, runner.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||
return corev1.Pod{}, err
|
||||
}
|
||||
}
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, runner.Name, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL)
|
||||
if err != nil {
|
||||
return pod, err
|
||||
}
|
||||
@@ -424,6 +438,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
// if operater provides a work volume mount, use that
|
||||
isPresent, _ := workVolumeMountPresent(runnerSpec.VolumeMounts)
|
||||
if isPresent {
|
||||
if runnerSpec.ContainerMode == "kubernetes" {
|
||||
return pod, errors.New("volume mount \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||
}
|
||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||
// if we don't remove it here we would get a duplicate key error, i.e. two volumes named work
|
||||
_, index := workVolumeMountPresent(pod.Spec.Containers[0].VolumeMounts)
|
||||
@@ -437,6 +454,9 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
// if operator provides a work volume. use that
|
||||
isPresent, _ := workVolumePresent(runnerSpec.Volumes)
|
||||
if isPresent {
|
||||
if runnerSpec.ContainerMode == "kubernetes" {
|
||||
return pod, errors.New("volume \"work\" should be specified by workVolumeClaimTemplate in container mode kubernetes")
|
||||
}
|
||||
_, index := workVolumePresent(pod.Spec.Volumes)
|
||||
|
||||
// remove work volume since it will be provided from runnerSpec.Volumes
|
||||
@@ -446,6 +466,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||
}
|
||||
|
||||
if len(runnerSpec.InitContainers) != 0 {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||
}
|
||||
@@ -530,7 +551,45 @@ func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
||||
return updated
|
||||
}
|
||||
|
||||
func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
||||
func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
|
||||
isRequireSameNode, err := isRequireSameNode(pod)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return []corev1.EnvVar{
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_CONTAINER_HOOKS",
|
||||
Value: defaultRunnerHookPath,
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_REQUIRE_JOB_CONTAINER",
|
||||
Value: "true",
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_POD_NAME",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "ACTIONS_RUNNER_JOB_NAMESPACE",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "ACTIONS_RUNNER_REQUIRE_SAME_NODE",
|
||||
Value: strconv.FormatBool(isRequireSameNode),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newRunnerPodWithContainerMode(containerMode string, runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
@@ -539,6 +598,12 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
)
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
dockerdInRunner = false
|
||||
dockerEnabled = false
|
||||
dockerdInRunnerPrivileged = false
|
||||
}
|
||||
|
||||
template = *template.DeepCopy()
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
@@ -625,6 +690,17 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
}
|
||||
}
|
||||
|
||||
if containerMode == "kubernetes" {
|
||||
if dockerdContainer != nil {
|
||||
template.Spec.Containers = append(template.Spec.Containers[:dockerdContainerIndex], template.Spec.Containers[dockerdContainerIndex+1:]...)
|
||||
}
|
||||
if runnerContainerIndex < runnerContainerIndex {
|
||||
runnerContainerIndex--
|
||||
}
|
||||
dockerdContainer = nil
|
||||
dockerdContainerIndex = -1
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
runnerContainerIndex = -1
|
||||
runnerContainer = &corev1.Container{
|
||||
@@ -655,6 +731,13 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
}
|
||||
|
||||
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||
if containerMode == "kubernetes" {
|
||||
hookEnvs, err := runnerHookEnvs(&template)
|
||||
if err != nil {
|
||||
return corev1.Pod{}, err
|
||||
}
|
||||
runnerContainer.Env = append(runnerContainer.Env, hookEnvs...)
|
||||
}
|
||||
|
||||
if runnerContainer.SecurityContext == nil {
|
||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||
@@ -879,6 +962,10 @@ func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.Ru
|
||||
return *pod, nil
|
||||
}
|
||||
|
||||
func newRunnerPod(runnerName string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string) (corev1.Pod, error) {
|
||||
return newRunnerPodWithContainerMode("", runnerName, template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL)
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runner-controller"
|
||||
if r.Name != "" {
|
||||
@@ -941,3 +1028,71 @@ func workVolumeMountPresent(items []corev1.VolumeMount) (bool, int) {
|
||||
}
|
||||
return false, 0
|
||||
}
|
||||
|
||||
func applyWorkVolumeClaimTemplateToPod(pod *corev1.Pod, workVolumeClaimTemplate *v1alpha1.WorkVolumeClaimTemplate, workDir string) error {
|
||||
if workVolumeClaimTemplate == nil {
|
||||
return errors.New("work volume claim template must be specified in container mode kubernetes")
|
||||
}
|
||||
for i := range pod.Spec.Volumes {
|
||||
if pod.Spec.Volumes[i].Name == "work" {
|
||||
return fmt.Errorf("Work volume should not be specified in container mode kubernetes. workVolumeClaimTemplate field should be used instead.")
|
||||
}
|
||||
}
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, workVolumeClaimTemplate.V1Volume())
|
||||
|
||||
var runnerContainer *corev1.Container
|
||||
for i := range pod.Spec.Containers {
|
||||
if pod.Spec.Containers[i].Name == "runner" {
|
||||
runnerContainer = &pod.Spec.Containers[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
return fmt.Errorf("runner container is not present when applying work volume claim template")
|
||||
}
|
||||
|
||||
if isPresent, _ := workVolumeMountPresent(runnerContainer.VolumeMounts); isPresent {
|
||||
return fmt.Errorf("volume mount \"work\" should not be present on the runner container in container mode kubernetes")
|
||||
}
|
||||
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts, workVolumeClaimTemplate.V1VolumeMount(workDir))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// isRequireSameNode specifies for the runner in kubernetes mode wether it should
|
||||
// schedule jobs to the same node where the runner is
|
||||
//
|
||||
// This function should only be called in containerMode: kubernetes
|
||||
func isRequireSameNode(pod *corev1.Pod) (bool, error) {
|
||||
isPresent, index := workVolumePresent(pod.Spec.Volumes)
|
||||
if !isPresent {
|
||||
return true, errors.New("internal error: work volume mount must exist in containerMode: kubernetes")
|
||||
}
|
||||
|
||||
if pod.Spec.Volumes[index].Ephemeral == nil || pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate == nil {
|
||||
return true, errors.New("containerMode: kubernetes should have pod.Spec.Volumes[].Ephemeral.VolumeClaimTemplate set")
|
||||
}
|
||||
|
||||
for _, accessMode := range pod.Spec.Volumes[index].Ephemeral.VolumeClaimTemplate.Spec.AccessModes {
|
||||
switch accessMode {
|
||||
case corev1.ReadWriteOnce:
|
||||
return true, nil
|
||||
case corev1.ReadWriteMany:
|
||||
default:
|
||||
return true, errors.New("actions-runner-controller supports ReadWriteOnce and ReadWriteMany modes only")
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func overwriteRunnerEnv(runner *v1alpha1.Runner, key string, value string) {
|
||||
for i := range runner.Spec.Env {
|
||||
if runner.Spec.Env[i].Name == key {
|
||||
runner.Spec.Env[i].Value = value
|
||||
return
|
||||
}
|
||||
}
|
||||
runner.Spec.Env = append(runner.Spec.Env, corev1.EnvVar{Name: key, Value: value})
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
@@ -50,6 +51,7 @@ type RunnerPodReconciler struct {
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
@@ -77,6 +79,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
}
|
||||
|
||||
var enterprise, org, repo string
|
||||
var isContainerMode bool
|
||||
|
||||
for _, e := range envvars {
|
||||
switch e.Name {
|
||||
@@ -86,13 +89,20 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
org = e.Value
|
||||
case EnvVarRepo:
|
||||
repo = e.Value
|
||||
case "ACTIONS_RUNNER_CONTAINER_HOOKS":
|
||||
isContainerMode = true
|
||||
}
|
||||
}
|
||||
|
||||
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if added {
|
||||
var cleanupFinalizersAdded bool
|
||||
if isContainerMode {
|
||||
finalizers, cleanupFinalizersAdded = addFinalizer(finalizers, runnerLinkedResourcesFinalizerName)
|
||||
}
|
||||
|
||||
if added || cleanupFinalizersAdded {
|
||||
newRunner := runnerPod.DeepCopy()
|
||||
newRunner.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
@@ -108,6 +118,27 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
|
||||
} else {
|
||||
log.V(2).Info("Seen deletion-timestamp is already set")
|
||||
|
||||
if finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerLinkedResourcesFinalizerName); removed {
|
||||
if err := r.cleanupRunnerLinkedPods(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked pods clean up that has failed due to an error. If this persists, please manually remove the runner-linked pods to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
if err := r.cleanupRunnerLinkedSecrets(ctx, &runnerPod, log); err != nil {
|
||||
log.Info("Runner-linked secrets clean up that has failed due to an error. If this persists, please manually remove the runner-linked secrets to unblock ARC", "err", err.Error())
|
||||
return ctrl.Result{Requeue: true, RequeueAfter: 30 * time.Second}, nil
|
||||
}
|
||||
patchedPod := runnerPod.DeepCopy()
|
||||
patchedPod.ObjectMeta.Finalizers = finalizers
|
||||
|
||||
if err := r.Patch(ctx, patchedPod, client.MergeFrom(&runnerPod)); err != nil {
|
||||
log.Error(err, "Failed to update runner for finalizer linked resources removal")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Otherwise the subsequent patch request can revive the removed finalizer and it will trigger a unnecessary reconcilation
|
||||
runnerPod = *patchedPod
|
||||
}
|
||||
|
||||
finalizers, removed := removeFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
|
||||
|
||||
if removed {
|
||||
@@ -222,3 +253,93 @@ func (r *RunnerPodReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) cleanupRunnerLinkedPods(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||
var runnerLinkedPodList corev1.PodList
|
||||
if err := r.List(ctx, &runnerLinkedPodList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked pods: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
errs []error
|
||||
)
|
||||
for _, p := range runnerLinkedPodList.Items {
|
||||
if !p.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
p := p
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := r.Delete(ctx, &p); err != nil {
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete pod %q error: %v", p.ObjectMeta.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
log.Error(err, "failed to remove runner-linked pod")
|
||||
}
|
||||
return errors.New("failed to remove some runner linked pods")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RunnerPodReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, pod *corev1.Pod, log logr.Logger) error {
|
||||
log.V(2).Info("Listing runner-linked secrets to be deleted", "ns", pod.Namespace)
|
||||
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
if err := r.List(ctx, &runnerLinkedSecretList, client.InNamespace(pod.Namespace), client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": pod.ObjectMeta.Name,
|
||||
},
|
||||
)); err != nil {
|
||||
return fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||
}
|
||||
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
errs []error
|
||||
)
|
||||
for _, s := range runnerLinkedSecretList.Items {
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
s := s
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
if err := r.Delete(ctx, &s); err != nil {
|
||||
if kerrors.IsNotFound(err) || kerrors.IsGone(err) {
|
||||
return
|
||||
}
|
||||
errs = append(errs, fmt.Errorf("delete secret %q error: %v", s.ObjectMeta.Name, err))
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
if len(errs) > 0 {
|
||||
for _, err := range errs {
|
||||
log.Error(err, "failed to remove runner-linked secret")
|
||||
}
|
||||
return errors.New("failed to remove some runner linked secrets")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -195,7 +195,31 @@ func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*ap
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||
}
|
||||
|
||||
pod, err := newRunnerPod(runnerSet.Name, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL)
|
||||
if runnerSet.Spec.RunnerConfig.ContainerMode == "kubernetes" {
|
||||
found := false
|
||||
for i := range template.Spec.Containers {
|
||||
if template.Spec.Containers[i].Name == containerName {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "runner",
|
||||
})
|
||||
}
|
||||
|
||||
workDir := runnerSet.Spec.RunnerConfig.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
if err := applyWorkVolumeClaimTemplateToPod(&template, runnerSet.Spec.WorkVolumeClaimTemplate, workDir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
template.Spec.ServiceAccountName = runnerSet.Spec.ServiceAccountName
|
||||
}
|
||||
|
||||
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, runnerSet.Name, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -3,6 +3,9 @@ package controllers
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func Test_filterLabels(t *testing.T) {
|
||||
@@ -32,3 +35,94 @@ func Test_filterLabels(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_workVolumeClaimTemplateVolumeV1VolumeTransformation(t *testing.T) {
|
||||
storageClassName := "local-storage"
|
||||
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||
StorageClassName: storageClassName,
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
}
|
||||
want := corev1.Volume{
|
||||
Name: "work",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Ephemeral: &corev1.EphemeralVolumeSource{
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaimTemplate{
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
StorageClassName: &storageClassName,
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
got := workVolumeClaimTemplate.V1Volume()
|
||||
|
||||
if got.Name != want.Name {
|
||||
t.Errorf("want name %q, got %q\n", want.Name, got.Name)
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral == nil {
|
||||
t.Fatal("work volume claim template should transform itself into Ephemeral volume source\n")
|
||||
}
|
||||
|
||||
if got.VolumeSource.Ephemeral.VolumeClaimTemplate == nil {
|
||||
t.Fatal("work volume claim template should have ephemeral volume claim template set\n")
|
||||
}
|
||||
|
||||
gotClassName := *got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
wantClassName := *want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.StorageClassName
|
||||
if gotClassName != wantClassName {
|
||||
t.Errorf("expected storage class name %q, got %q\n", wantClassName, gotClassName)
|
||||
}
|
||||
|
||||
gotAccessModes := got.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
wantAccessModes := want.VolumeSource.Ephemeral.VolumeClaimTemplate.Spec.AccessModes
|
||||
if len(gotAccessModes) != len(wantAccessModes) {
|
||||
t.Fatalf("access modes lengths missmatch: got %v, expected %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
|
||||
diff := make(map[corev1.PersistentVolumeAccessMode]int, len(wantAccessModes))
|
||||
for _, am := range wantAccessModes {
|
||||
diff[am]++
|
||||
}
|
||||
|
||||
for _, am := range gotAccessModes {
|
||||
_, ok := diff[am]
|
||||
if !ok {
|
||||
t.Errorf("got access mode %v that is not in the wanted access modes\n", am)
|
||||
}
|
||||
|
||||
diff[am]--
|
||||
if diff[am] == 0 {
|
||||
delete(diff, am)
|
||||
}
|
||||
}
|
||||
|
||||
if len(diff) != 0 {
|
||||
t.Fatalf("got access modes did not take every access mode into account\nactual: %v expected: %v\n", gotAccessModes, wantAccessModes)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_workVolumeClaimTemplateV1VolumeMount(t *testing.T) {
|
||||
|
||||
workVolumeClaimTemplate := v1alpha1.WorkVolumeClaimTemplate{
|
||||
StorageClassName: "local-storage",
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce, corev1.ReadWriteMany},
|
||||
Resources: corev1.ResourceRequirements{},
|
||||
}
|
||||
|
||||
mountPath := "/test/_work"
|
||||
want := corev1.VolumeMount{
|
||||
MountPath: mountPath,
|
||||
Name: "work",
|
||||
}
|
||||
|
||||
got := workVolumeClaimTemplate.V1VolumeMount(mountPath)
|
||||
|
||||
if want != got {
|
||||
t.Fatalf("expected volume mount %+v, actual %+v\n", want, got)
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user