mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-11 03:57:01 +00:00
Introduce new preview auto-scaling mode for ARC. (#2153)
Co-authored-by: Cory Miller <cory-miller@github.com> Co-authored-by: Nikola Jokic <nikola-jokic@github.com> Co-authored-by: Ava Stancu <AvaStancu@github.com> Co-authored-by: Ferenc Hammerl <fhammerl@github.com> Co-authored-by: Francesco Renzi <rentziass@github.com> Co-authored-by: Bassem Dghaidi <Link-@github.com>
This commit is contained in:
450
controllers/actions.github.com/autoscalinglistener_controller.go
Normal file
450
controllers/actions.github.com/autoscalinglistener_controller.go
Normal file
@@ -0,0 +1,450 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
hash "github.com/actions/actions-runner-controller/hash"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
autoscalingListenerOwnerKey = ".metadata.controller"
|
||||
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
// AutoscalingListenerReconciler reconciles a AutoscalingListener object
|
||||
type AutoscalingListenerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/status,verbs=get
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;create;update
|
||||
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=get;list;watch;create
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get;list;watch;update
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get;list;watch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/finalizers,verbs=update
|
||||
|
||||
// Reconcile a AutoscalingListener resource to meet its desired spec.
|
||||
func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("autoscalinglistener", req.NamespacedName)
|
||||
|
||||
autoscalingListener := new(v1alpha1.AutoscalingListener)
|
||||
if err := r.Get(ctx, req.NamespacedName, autoscalingListener); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingListener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
|
||||
log.Info("Deleting resources")
|
||||
done, err := r.cleanupResources(ctx, autoscalingListener, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to cleanup resources after deletion")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
log.Info("Waiting for resources to be deleted before removing finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingListenerFinalizerName)
|
||||
})
|
||||
if err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to remove finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully removed finalizer after cleanup")
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(autoscalingListener, autoscalingListenerFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, autoscalingListener, func(obj *v1alpha1.AutoscalingListener) {
|
||||
controllerutil.AddFinalizer(obj, autoscalingListenerFinalizerName)
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to add finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully added finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Check if the AutoscalingRunnerSet exists
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.AutoscalingRunnerSetName}, &autoscalingRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to find AutoscalingRunnerSet.",
|
||||
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"name", autoscalingListener.Spec.AutoscalingRunnerSetName)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Check if the GitHub config secret exists
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: autoscalingListener.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"name", autoscalingListener.Spec.GitHubConfigSecret)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a mirror secret in the same namespace as the AutoscalingListener
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerSecretMirrorName(autoscalingListener)}, mirrorSecret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener secret mirror", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerSecretMirrorName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a mirror secret for the listener pod in the Controller namespace for listener pod to use
|
||||
log.Info("Creating a mirror listener secret for the listener pod")
|
||||
return r.createSecretsForListener(ctx, autoscalingListener, secret, log)
|
||||
}
|
||||
|
||||
// make sure the mirror secret is up to date
|
||||
mirrorSecretDataHash := mirrorSecret.Labels["secret-data-hash"]
|
||||
secretDataHash := hash.ComputeTemplateHash(secret.Data)
|
||||
if mirrorSecretDataHash != secretDataHash {
|
||||
log.Info("Updating mirror listener secret for the listener pod", "mirrorSecretDataHash", mirrorSecretDataHash, "secretDataHash", secretDataHash)
|
||||
return r.updateSecretsForListener(ctx, secret, mirrorSecret, log)
|
||||
}
|
||||
|
||||
// Make sure the runner scale set listener service account is created for the listener pod in the controller namespace
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Namespace, Name: scaleSetListenerServiceAccountName(autoscalingListener)}, serviceAccount); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener service accounts", "namespace", autoscalingListener.Namespace, "name", scaleSetListenerServiceAccountName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a service account for the listener pod in the controller namespace
|
||||
log.Info("Creating a service account for the listener pod")
|
||||
return r.createServiceAccountForListener(ctx, autoscalingListener, log)
|
||||
}
|
||||
|
||||
// TODO: make sure the service account is up to date
|
||||
|
||||
// Make sure the runner scale set listener role is created in the AutoscalingRunnerSet namespace
|
||||
listenerRole := new(rbacv1.Role)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRole); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a role for the listener pod in the AutoScalingRunnerSet namespace
|
||||
log.Info("Creating a role for the listener pod")
|
||||
return r.createRoleForListener(ctx, autoscalingListener, log)
|
||||
}
|
||||
|
||||
// Make sure the listener role has the up-to-date rules
|
||||
existingRuleHash := listenerRole.Labels["role-policy-rules-hash"]
|
||||
desiredRules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
|
||||
desiredRulesHash := hash.ComputeTemplateHash(&desiredRules)
|
||||
if existingRuleHash != desiredRulesHash {
|
||||
log.Info("Updating the listener role with the up-to-date rules")
|
||||
return r.updateRoleForListener(ctx, listenerRole, desiredRules, desiredRulesHash, log)
|
||||
}
|
||||
|
||||
// Make sure the runner scale set listener role binding is created
|
||||
listenerRoleBinding := new(rbacv1.RoleBinding)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace, Name: scaleSetListenerRoleName(autoscalingListener)}, listenerRoleBinding); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener role binding", "namespace", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, "name", scaleSetListenerRoleName(autoscalingListener))
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a role binding for the listener pod in the AutoScalingRunnerSet namespace
|
||||
log.Info("Creating a role binding for the service account and role")
|
||||
return r.createRoleBindingForListener(ctx, autoscalingListener, listenerRole, serviceAccount, log)
|
||||
}
|
||||
|
||||
// TODO: make sure the role binding has the up-to-date role and service account
|
||||
|
||||
listenerPod := new(corev1.Pod)
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: autoscalingListener.Namespace, Name: autoscalingListener.Name}, listenerPod); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to get listener pod", "namespace", autoscalingListener.Namespace, "name", autoscalingListener.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Create a listener pod in the controller namespace
|
||||
log.Info("Creating a listener pod")
|
||||
return r.createListenerPod(ctx, &autoscalingRunnerSet, autoscalingListener, serviceAccount, mirrorSecret, log)
|
||||
}
|
||||
|
||||
// The listener pod failed might mean the mirror secret is out of date
|
||||
// Delete the listener pod and re-create it to make sure the mirror secret is up to date
|
||||
if listenerPod.Status.Phase == corev1.PodFailed && listenerPod.DeletionTimestamp.IsZero() {
|
||||
log.Info("Listener pod failed, deleting it and re-creating it", "namespace", listenerPod.Namespace, "name", listenerPod.Name, "reason", listenerPod.Status.Reason, "message", listenerPod.Status.Message)
|
||||
if err := r.Delete(ctx, listenerPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Unable to delete the listener pod", "namespace", listenerPod.Namespace, "name", listenerPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
|
||||
var requests []reconcile.Request
|
||||
labels := obj.GetLabels()
|
||||
namespace, ok := labels["auto-scaling-listener-namespace"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
name, ok := labels["auto-scaling-listener-name"]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
requests = append(requests,
|
||||
reconcile.Request{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
},
|
||||
)
|
||||
return requests
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingListener{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.ServiceAccount{}).
|
||||
Owns(&corev1.Secret{}).
|
||||
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener pod")
|
||||
listenerPod := new(corev1.Pod)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: autoscalingListener.Name, Namespace: autoscalingListener.Namespace}, listenerPod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener pod")
|
||||
if err := r.Delete(ctx, listenerPod); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener pod: %v", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener pods: %v", err)
|
||||
}
|
||||
logger.Info("Listener pod is deleted")
|
||||
|
||||
logger.Info("Cleaning up the listener service account")
|
||||
listenerSa := new(corev1.ServiceAccount)
|
||||
err = r.Get(ctx, types.NamespacedName{Name: scaleSetListenerServiceAccountName(autoscalingListener), Namespace: autoscalingListener.Namespace}, listenerSa)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listenerSa.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener service account")
|
||||
if err := r.Delete(ctx, listenerSa); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener service account: %v", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener service account: %v", err)
|
||||
}
|
||||
logger.Info("Listener service account is deleted")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createServiceAccountForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newServiceAccount := r.resourceBuilder.newScaleSetListenerServiceAccount(autoscalingListener)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newServiceAccount, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Creating listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name)
|
||||
if err := r.Create(ctx, newServiceAccount); err != nil {
|
||||
logger.Error(err, "Unable to create listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener service accounts", "namespace", newServiceAccount.Namespace, "name", newServiceAccount.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createListenerPod(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newPod := r.resourceBuilder.newScaleSetListenerPod(autoscalingListener, serviceAccount, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newPod, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Creating listener pod", "namespace", newPod.Namespace, "name", newPod.Name)
|
||||
if err := r.Create(ctx, newPod); err != nil {
|
||||
logger.Error(err, "Unable to create listener pod", "namespace", newPod.Namespace, "name", newPod.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener pod", "namespace", newPod.Namespace, "name", newPod.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createSecretsForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
newListenerSecret := r.resourceBuilder.newScaleSetListenerSecretMirror(autoscalingListener, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingListener, newListenerSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Creating listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
if err := r.Create(ctx, newListenerSecret); err != nil {
|
||||
logger.Error(err, "Unable to create listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener secret", "namespace", newListenerSecret.Namespace, "name", newListenerSecret.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) updateSecretsForListener(ctx context.Context, secret *corev1.Secret, mirrorSecret *corev1.Secret, logger logr.Logger) (ctrl.Result, error) {
|
||||
dataHash := hash.ComputeTemplateHash(secret.Data)
|
||||
updatedMirrorSecret := mirrorSecret.DeepCopy()
|
||||
updatedMirrorSecret.Labels["secret-data-hash"] = dataHash
|
||||
updatedMirrorSecret.Data = secret.Data
|
||||
|
||||
logger.Info("Updating listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
||||
if err := r.Update(ctx, updatedMirrorSecret); err != nil {
|
||||
logger.Error(err, "Unable to update listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Updated listener mirror secret", "namespace", updatedMirrorSecret.Namespace, "name", updatedMirrorSecret.Name, "hash", dataHash)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRole := r.resourceBuilder.newScaleSetListenerRole(autoscalingListener)
|
||||
|
||||
logger.Info("Creating listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
if err := r.Create(ctx, newRole); err != nil {
|
||||
logger.Error(err, "Unable to create listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener role", "namespace", newRole.Namespace, "name", newRole.Name, "rules", newRole.Rules)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) updateRoleForListener(ctx context.Context, listenerRole *rbacv1.Role, desiredRules []rbacv1.PolicyRule, desiredRulesHash string, logger logr.Logger) (ctrl.Result, error) {
|
||||
updatedPatchRole := listenerRole.DeepCopy()
|
||||
updatedPatchRole.Labels["role-policy-rules-hash"] = desiredRulesHash
|
||||
updatedPatchRole.Rules = desiredRules
|
||||
|
||||
logger.Info("Updating listener role in namespace to have the right permission", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "oldRules", listenerRole.Rules, "newRules", updatedPatchRole.Rules)
|
||||
if err := r.Update(ctx, updatedPatchRole); err != nil {
|
||||
logger.Error(err, "Unable to update listener role", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "rules", updatedPatchRole.Rules)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Updated listener role in namespace to have the right permission", "namespace", updatedPatchRole.Namespace, "name", updatedPatchRole.Name, "rules", updatedPatchRole.Rules)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount, logger logr.Logger) (ctrl.Result, error) {
|
||||
newRoleBinding := r.resourceBuilder.newScaleSetListenerRoleBinding(autoscalingListener, listenerRole, serviceAccount)
|
||||
|
||||
logger.Info("Creating listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
"name", newRoleBinding.Name,
|
||||
"role", listenerRole.Name,
|
||||
"serviceAccountNamespace", serviceAccount.Namespace,
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
if err := r.Create(ctx, newRoleBinding); err != nil {
|
||||
logger.Error(err, "Unable to create listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
"name", newRoleBinding.Name,
|
||||
"role", listenerRole.Name,
|
||||
"serviceAccountNamespace", serviceAccount.Namespace,
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Created listener role binding",
|
||||
"namespace", newRoleBinding.Namespace,
|
||||
"name", newRoleBinding.Name,
|
||||
"role", listenerRole.Name,
|
||||
"serviceAccountNamespace", serviceAccount.Namespace,
|
||||
"serviceAccount", serviceAccount.Name)
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
@@ -0,0 +1,393 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
autoScalingListenerTestTimeout = time.Second * 5
|
||||
autoScalingListenerTestInterval = time.Millisecond * 250
|
||||
autoScalingListenerTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
var _ = Describe("Test AutoScalingListener controller", func() {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
autoScalingNS := new(corev1.Namespace)
|
||||
autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet)
|
||||
configSecret := new(corev1.Secret)
|
||||
autoScalingListener := new(actionsv1alpha1.AutoscalingListener)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
autoScalingNS = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-listener" + RandStringRunes(5)},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet")
|
||||
|
||||
configSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "github-config-secret",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(autoScalingListenerTestGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, configSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create config secret")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: autoScalingNS.Name,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &AutoscalingListenerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, autoScalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
autoScalingListener = &actionsv1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asl",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
RunnerScaleSetId: 1,
|
||||
AutoscalingRunnerSetNamespace: autoScalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoScalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: "test-ers",
|
||||
MaxRunners: 10,
|
||||
MinRunners: 1,
|
||||
Image: "ghcr.io/owner/repo",
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, autoScalingListener)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingListener")
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet")
|
||||
})
|
||||
|
||||
Context("When creating a new AutoScalingListener", func() {
|
||||
It("It should create/add all required resources for a new AutoScalingListener (finalizer, secret, service account, role, rolebinding, pod)", func() {
|
||||
// Check if finalizer is added
|
||||
created := new(actionsv1alpha1.AutoscalingListener)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoscalingListenerFinalizerName), "AutoScalingListener should have a finalizer")
|
||||
|
||||
// Check if secret is created
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(mirrorSecret.Data["github_token"]), nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListenerTestGitHubToken), "Mirror secret should be created")
|
||||
|
||||
// Check if service account is created
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerServiceAccountName(autoScalingListener), Namespace: autoScalingListener.Namespace}, serviceAccount)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return serviceAccount.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerServiceAccountName(autoScalingListener)), "Service account should be created")
|
||||
|
||||
// Check if role is created
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return role.Rules, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{autoScalingListener.Spec.EphemeralRunnerSetName})), "Role should be created")
|
||||
|
||||
// Check if rolebinding is created
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, roleBinding)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return roleBinding.RoleRef.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(scaleSetListenerRoleName(autoScalingListener)), "Rolebinding should be created")
|
||||
|
||||
// Check if pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When deleting a new AutoScalingListener", func() {
|
||||
It("It should cleanup all resources for a deleting AutoScalingListener before removing it", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created")
|
||||
|
||||
// Delete the AutoScalingListener
|
||||
err := k8sClient.Delete(ctx, autoScalingListener)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test AutoScalingListener")
|
||||
|
||||
// Cleanup the listener pod
|
||||
Eventually(
|
||||
func() error {
|
||||
podList := new(corev1.PodList)
|
||||
err := k8sClient.List(ctx, podList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(podList.Items) > 0 {
|
||||
return fmt.Errorf("pod still exists")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete pod")
|
||||
|
||||
// Cleanup the listener service account
|
||||
Eventually(
|
||||
func() error {
|
||||
serviceAccountList := new(corev1.ServiceAccountList)
|
||||
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoScalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(serviceAccountList.Items) > 0 {
|
||||
return fmt.Errorf("service account still exists")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete service account")
|
||||
|
||||
// The AutoScalingListener should be deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
listenerList := new(actionsv1alpha1.AutoscalingListenerList)
|
||||
err := k8sClient.List(ctx, listenerList, client.InNamespace(autoScalingListener.Namespace), client.MatchingFields{".metadata.name": autoScalingListener.Name})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(listenerList.Items) > 0 {
|
||||
return fmt.Errorf("AutoScalingListener still exists")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).ShouldNot(Succeed(), "failed to delete AutoScalingListener")
|
||||
})
|
||||
})
|
||||
|
||||
Context("React to changes in the AutoScalingListener", func() {
|
||||
It("It should update role to match EphemeralRunnerSet", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created")
|
||||
|
||||
// Update the AutoScalingListener
|
||||
updated := autoScalingListener.DeepCopy()
|
||||
updated.Spec.EphemeralRunnerSetName = "test-ers-updated"
|
||||
err := k8sClient.Patch(ctx, updated, client.MergeFrom(autoScalingListener))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test AutoScalingListener")
|
||||
|
||||
// Check if role is updated with right rules
|
||||
role := new(rbacv1.Role)
|
||||
Eventually(
|
||||
func() ([]rbacv1.PolicyRule, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerRoleName(autoScalingListener), Namespace: autoScalingListener.Spec.AutoscalingRunnerSetNamespace}, role)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return role.Rules, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(rulesForListenerRole([]string{updated.Spec.EphemeralRunnerSetName})), "Role should be updated")
|
||||
})
|
||||
|
||||
It("It should update mirror secrets to match secret used by AutoScalingRunnerSet", func() {
|
||||
// Waiting for the pod is created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, pod)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(autoScalingListener.Name), "Pod should be created")
|
||||
|
||||
// Update the secret
|
||||
updatedSecret := configSecret.DeepCopy()
|
||||
updatedSecret.Data["github_token"] = []byte(autoScalingListenerTestGitHubToken + "_updated")
|
||||
err := k8sClient.Update(ctx, updatedSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test secret")
|
||||
|
||||
updatedPod := pod.DeepCopy()
|
||||
updatedPod.Status.Phase = corev1.PodFailed
|
||||
err = k8sClient.Status().Update(ctx, updatedPod)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update test pod to failed")
|
||||
|
||||
// Check if mirror secret is updated with right data
|
||||
mirrorSecret := new(corev1.Secret)
|
||||
Eventually(
|
||||
func() (map[string][]byte, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerSecretMirrorName(autoScalingListener), Namespace: autoScalingListener.Namespace}, mirrorSecret)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return mirrorSecret.Data, nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(BeEquivalentTo(updatedSecret.Data), "Mirror secret should be updated")
|
||||
|
||||
// Check if we re-created a new pod
|
||||
Eventually(
|
||||
func() error {
|
||||
latestPod := new(corev1.Pod)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingListener.Name, Namespace: autoScalingListener.Namespace}, latestPod)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if latestPod.UID == pod.UID {
|
||||
return fmt.Errorf("Pod should be recreated")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoScalingListenerTestTimeout,
|
||||
autoScalingListenerTestInterval).Should(Succeed(), "Pod should be recreated")
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -0,0 +1,506 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/handler"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: Replace with shared image.
|
||||
name = "autoscaler"
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
LabelKeyAutoScaleRunnerSetName = "auto-scale-runner-set-name"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdKey = "runner-scale-set-id"
|
||||
|
||||
// scaleSetListenerLabel is the key of pod.meta.labels to label
|
||||
// that the pod is a listener application
|
||||
scaleSetListenerLabel = "runner-scale-set-listener"
|
||||
)
|
||||
|
||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||
type AutoscalingRunnerSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ControllerNamespace string
|
||||
DefaultRunnerScaleSetListenerImage string
|
||||
DefaultRunnerScaleSetListenerImagePullSecrets []string
|
||||
ActionsClient actions.MultiClient
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces;pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=namespaces/status;pods/status,verbs=get
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalingrunnersets/finalizers,verbs=update
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=autoscalinglisteners/status,verbs=get;update;patch
|
||||
|
||||
// Reconcile a AutoscalingRunnerSet resource to meet its desired spec.
|
||||
func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("autoscalingrunnerset", req.NamespacedName)
|
||||
|
||||
autoscalingRunnerSet := new(v1alpha1.AutoscalingRunnerSet)
|
||||
if err := r.Get(ctx, req.NamespacedName, autoscalingRunnerSet); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !autoscalingRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
log.Info("Deleting resources")
|
||||
done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up listener")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
// we are going to get notified anyway to proceed with rest of the
|
||||
// cleanup. No need to re-queue
|
||||
log.Info("Waiting for listener to be deleted")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up ephemeral runner sets")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
log.Info("Waiting for ephemeral runner sets to be deleted")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||
})
|
||||
if err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to update autoscaling runner set without finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully removed finalizer after cleanup")
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(autoscalingRunnerSet, autoscalingRunnerSetFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
controllerutil.AddFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to update autoscaling runner set with finalizer added")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully added finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
scaleSetIdRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdKey]
|
||||
if !ok {
|
||||
// Need to create a new runner scale set on Actions service
|
||||
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
|
||||
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
if id, err := strconv.Atoi(scaleSetIdRaw); err != nil || id <= 0 {
|
||||
log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.")
|
||||
// something modified the scaleSetId. Try to create one
|
||||
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
log.Error(err, "Failed to find GitHub config secret.",
|
||||
"namespace", autoscalingRunnerSet.Namespace,
|
||||
"name", autoscalingRunnerSet.Spec.GitHubConfigSecret)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to list existing ephemeral runner sets")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
latestRunnerSet := existingRunnerSets.latest()
|
||||
if latestRunnerSet == nil {
|
||||
log.Info("Latest runner set does not exist. Creating a new runner set.")
|
||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
for _, runnerSet := range existingRunnerSets.all() {
|
||||
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash])
|
||||
}
|
||||
|
||||
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] {
|
||||
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set ")
|
||||
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
|
||||
}
|
||||
|
||||
oldRunnerSets := existingRunnerSets.old()
|
||||
if len(oldRunnerSets) > 0 {
|
||||
log.Info("Cleanup old ephemeral runner sets", "count", len(oldRunnerSets))
|
||||
err := r.deleteEphemeralRunnerSets(ctx, oldRunnerSets, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up old runner sets")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure the AutoscalingListener is up and running in the controller namespace
|
||||
listener := new(v1alpha1.AutoscalingListener)
|
||||
if err := r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
// We don't have a listener
|
||||
log.Info("Creating a new AutoscalingListener for the runner set", "ephemeralRunnerSetName", latestRunnerSet.Name)
|
||||
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
|
||||
}
|
||||
log.Error(err, "Failed to get AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// Our listener pod is out of date, so we need to delete it to get a new recreate.
|
||||
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
|
||||
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
|
||||
if err := r.Delete(ctx, listener); err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
log.Error(err, "Failed to delete AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Deleted RunnerScaleSetListener since existing one is out of date")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Update the status of autoscaling runner set.
|
||||
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
|
||||
if err := patch(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) cleanupListener(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up the listener")
|
||||
var listener v1alpha1.AutoscalingListener
|
||||
err = r.Get(ctx, client.ObjectKey{Namespace: r.ControllerNamespace, Name: scaleSetListenerName(autoscalingRunnerSet)}, &listener)
|
||||
switch {
|
||||
case err == nil:
|
||||
if listener.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Deleting the listener")
|
||||
if err := r.Delete(ctx, &listener); err != nil {
|
||||
return false, fmt.Errorf("failed to delete listener: %v", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, fmt.Errorf("failed to get listener: %v", err)
|
||||
}
|
||||
|
||||
logger.Info("Listener is deleted")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) cleanupEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (done bool, err error) {
|
||||
logger.Info("Cleaning up ephemeral runner sets")
|
||||
runnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
}
|
||||
if runnerSets.empty() {
|
||||
logger.Info("All ephemeral runner sets are deleted")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
logger.Info("Deleting all ephemeral runner sets", "count", runnerSets.count())
|
||||
if err := r.deleteEphemeralRunnerSets(ctx, runnerSets.all(), logger); err != nil {
|
||||
return false, fmt.Errorf("failed to delete ephemeral runner sets: %v", err)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.Context, oldRunnerSets []v1alpha1.EphemeralRunnerSet, logger logr.Logger) error {
|
||||
for i := range oldRunnerSets {
|
||||
rs := &oldRunnerSets[i]
|
||||
// already deleted but contains finalizer so it still exists
|
||||
if !rs.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
logger.Info("Skip ephemeral runner set since it is already marked for deletion", "name", rs.Name)
|
||||
continue
|
||||
}
|
||||
logger.Info("Deleting ephemeral runner set", "name", rs.Name)
|
||||
if err := r.Delete(ctx, rs); err != nil {
|
||||
return fmt.Errorf("failed to delete EphemeralRunnerSet resource: %v", err)
|
||||
}
|
||||
logger.Info("Deleted ephemeral runner set", "name", rs.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to initialize Actions service client for creating a new runner scale set")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
runnerScaleSet, err := actionsClient.GetRunnerScaleSet(ctx, autoscalingRunnerSet.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to get runner scale set from Actions service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if runnerScaleSet == nil {
|
||||
runnerGroupId := 1
|
||||
if len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 {
|
||||
runnerGroup, err := actionsClient.GetRunnerGroupByName(ctx, autoscalingRunnerSet.Spec.RunnerGroup)
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to get runner group by name", "runnerGroup", autoscalingRunnerSet.Spec.RunnerGroup)
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
runnerGroupId = int(runnerGroup.ID)
|
||||
}
|
||||
|
||||
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
|
||||
ctx,
|
||||
&actions.RunnerScaleSet{
|
||||
Name: autoscalingRunnerSet.Name,
|
||||
RunnerGroupId: runnerGroupId,
|
||||
Labels: []actions.Label{
|
||||
{
|
||||
Name: autoscalingRunnerSet.Name,
|
||||
Type: "System",
|
||||
},
|
||||
},
|
||||
RunnerSetting: actions.RunnerSetting{
|
||||
Ephemeral: true,
|
||||
DisableUpdate: true,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to create a new runner scale set on Actions service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("Created/Reused a runner scale set", "id", runnerScaleSet.Id)
|
||||
if autoscalingRunnerSet.Annotations == nil {
|
||||
autoscalingRunnerSet.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
autoscalingRunnerSet.Annotations[runnerScaleSetIdKey] = strconv.Itoa(runnerScaleSet.Id)
|
||||
logger.Info("Adding runner scale set ID as an annotation")
|
||||
if err := r.Update(ctx, autoscalingRunnerSet); err != nil {
|
||||
logger.Error(err, "Failed to add runner scale set ID")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
logger.Info("Updated with runner scale set ID as an annotation")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createEphemeralRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
desiredRunnerSet, err := r.resourceBuilder.newEphemeralRunnerSet(autoscalingRunnerSet)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create EphemeralRunnerSet")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(autoscalingRunnerSet, desiredRunnerSet, r.Scheme); err != nil {
|
||||
log.Error(err, "Failed to set controller reference to a new EphemeralRunnerSet")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Creating a new EphemeralRunnerSet resource", "name", desiredRunnerSet.Name)
|
||||
if err := r.Create(ctx, desiredRunnerSet); err != nil {
|
||||
log.Error(err, "Failed to create EphemeralRunnerSet resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Created a new EphemeralRunnerSet resource", "name", desiredRunnerSet.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (ctrl.Result, error) {
|
||||
var imagePullSecrets []corev1.LocalObjectReference
|
||||
for _, imagePullSecret := range r.DefaultRunnerScaleSetListenerImagePullSecrets {
|
||||
imagePullSecrets = append(imagePullSecrets, corev1.LocalObjectReference{
|
||||
Name: imagePullSecret,
|
||||
})
|
||||
}
|
||||
|
||||
autoscalingListener, err := r.resourceBuilder.newAutoScalingListener(autoscalingRunnerSet, ephemeralRunnerSet, r.ControllerNamespace, r.DefaultRunnerScaleSetListenerImage, imagePullSecrets)
|
||||
if err != nil {
|
||||
log.Error(err, "Could not create AutoscalingListener spec")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Creating a new AutoscalingListener resource", "name", autoscalingListener.Name, "namespace", autoscalingListener.Namespace)
|
||||
if err := r.Create(ctx, autoscalingListener); err != nil {
|
||||
log.Error(err, "Failed to create AutoscalingListener resource")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Created a new AutoscalingListener resource", "name", autoscalingListener.Name, "namespace", autoscalingListener.Namespace)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
|
||||
list := new(v1alpha1.EphemeralRunnerSetList)
|
||||
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil {
|
||||
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
|
||||
}
|
||||
|
||||
return &EphemeralRunnerSets{list: list}, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) actionsClientFor(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (actions.ActionsService, error) {
|
||||
var configSecret corev1.Secret
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: autoscalingRunnerSet.Namespace, Name: autoscalingRunnerSet.Spec.GitHubConfigSecret}, &configSecret); err != nil {
|
||||
return nil, fmt.Errorf("failed to find GitHub config secret: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(ctx, autoscalingRunnerSet.Spec.GitHubConfigUrl, autoscalingRunnerSet.Namespace, configSecret.Data)
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
groupVersionIndexer := func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
owner := metav1.GetControllerOf(rawObj)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingRunnerSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.AutoscalingRunnerSet{}).
|
||||
Owns(&v1alpha1.EphemeralRunnerSet{}).
|
||||
Watches(&source.Kind{Type: &v1alpha1.AutoscalingListener{}}, handler.EnqueueRequestsFromMapFunc(
|
||||
func(o client.Object) []reconcile.Request {
|
||||
autoscalingListener := o.(*v1alpha1.AutoscalingListener)
|
||||
return []reconcile.Request{
|
||||
{
|
||||
NamespacedName: types.NamespacedName{
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Name: autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
},
|
||||
},
|
||||
}
|
||||
},
|
||||
)).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
// NOTE: if this is logic should be used for other resources,
|
||||
// consider using generics
|
||||
type EphemeralRunnerSets struct {
|
||||
list *v1alpha1.EphemeralRunnerSetList
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) latest() *v1alpha1.EphemeralRunnerSet {
|
||||
if rs.empty() {
|
||||
return nil
|
||||
}
|
||||
if !rs.sorted {
|
||||
rs.sort()
|
||||
}
|
||||
return rs.list.Items[0].DeepCopy()
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) old() []v1alpha1.EphemeralRunnerSet {
|
||||
if rs.empty() {
|
||||
return nil
|
||||
}
|
||||
if !rs.sorted {
|
||||
rs.sort()
|
||||
}
|
||||
copy := rs.list.DeepCopy()
|
||||
return copy.Items[1:]
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) all() []v1alpha1.EphemeralRunnerSet {
|
||||
if rs.empty() {
|
||||
return nil
|
||||
}
|
||||
copy := rs.list.DeepCopy()
|
||||
return copy.Items
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) empty() bool {
|
||||
return rs.list == nil || len(rs.list.Items) == 0
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) sort() {
|
||||
sort.Slice(rs.list.Items, func(i, j int) bool {
|
||||
return rs.list.Items[i].GetCreationTimestamp().After(rs.list.Items[j].GetCreationTimestamp().Time)
|
||||
})
|
||||
}
|
||||
|
||||
func (rs *EphemeralRunnerSets) count() int {
|
||||
return len(rs.list.Items)
|
||||
}
|
||||
@@ -0,0 +1,367 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
)
|
||||
|
||||
const (
|
||||
autoScalingRunnerSetTestTimeout = time.Second * 5
|
||||
autoScalingRunnerSetTestInterval = time.Millisecond * 250
|
||||
autoScalingRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
var _ = Describe("Test AutoScalingRunnerSet controller", func() {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
autoScalingNS := new(corev1.Namespace)
|
||||
autoScalingRunnerSet := new(actionsv1alpha1.AutoscalingRunnerSet)
|
||||
configSecret := new(corev1.Secret)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
autoScalingNS = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling" + RandStringRunes(5)},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for AutoScalingRunnerSet")
|
||||
|
||||
configSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "github-config-secret",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(autoScalingRunnerSetTestGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, configSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create config secret")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: autoScalingNS.Name,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoScalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoScalingRunnerSet = &actionsv1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, autoScalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for AutoScalingRunnerSet")
|
||||
})
|
||||
|
||||
Context("When creating a new AutoScalingRunnerSet", func() {
|
||||
It("It should create/add all required resources for a new AutoScalingRunnerSet (finalizer, runnerscaleset, ephemeralrunnerset, listener)", func() {
|
||||
// Check if finalizer is added
|
||||
created := new(actionsv1alpha1.AutoscalingRunnerSet)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
// Check if runner scale set is created on service
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if _, ok := created.Annotations[runnerScaleSetIdKey]; !ok {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
return created.Annotations[runnerScaleSetIdKey], nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(BeEquivalentTo("1"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")
|
||||
|
||||
// Check if ephemeral runner set is created
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return len(runnerSetList.Items), nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
||||
|
||||
// Check if listener is created
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener))
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created")
|
||||
|
||||
// Check if status is updated
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||
Expect(len(runnerSetList.Items)).To(BeEquivalentTo(1), "Only one EphemeralRunnerSet should be created")
|
||||
runnerSet := runnerSetList.Items[0]
|
||||
statusUpdate := runnerSet.DeepCopy()
|
||||
statusUpdate.Status.CurrentReplicas = 100
|
||||
err = k8sClient.Status().Patch(ctx, statusUpdate, client.MergeFrom(&runnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch EphemeralRunnerSet status")
|
||||
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updated := new(actionsv1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, updated)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get AutoScalingRunnerSet: %w", err)
|
||||
}
|
||||
return updated.Status.CurrentRunners, nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(100), "AutoScalingRunnerSet status should be updated")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When deleting a new AutoScalingRunnerSet", func() {
|
||||
It("It should cleanup all resources for a deleting AutoScalingRunnerSet before removing it", func() {
|
||||
// Wait till the listener is created
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener))
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created")
|
||||
|
||||
// Delete the AutoScalingRunnerSet
|
||||
err := k8sClient.Delete(ctx, autoScalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete AutoScalingRunnerSet")
|
||||
|
||||
// Check if the listener is deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingListener))
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("listener is not deleted")
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be deleted")
|
||||
|
||||
// Check if all the EphemeralRunnerSet is deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(runnerSetList.Items) != 0 {
|
||||
return fmt.Errorf("EphemeralRunnerSet is not deleted, count=%v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "All EphemeralRunnerSet should be deleted")
|
||||
|
||||
// Check if the AutoScalingRunnerSet is deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoScalingRunnerSet.Name, Namespace: autoScalingRunnerSet.Namespace}, new(actionsv1alpha1.AutoscalingRunnerSet))
|
||||
if err != nil && errors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("AutoScalingRunnerSet is not deleted")
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "AutoScalingRunnerSet should be deleted")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When updating a new AutoScalingRunnerSet", func() {
|
||||
It("It should re-create EphemeralRunnerSet and Listener as needed when updating AutoScalingRunnerSet", func() {
|
||||
// Wait till the listener is created
|
||||
listener := new(actionsv1alpha1.AutoscalingListener)
|
||||
Eventually(
|
||||
func() error {
|
||||
return k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener)
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(Succeed(), "Listener should be created")
|
||||
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
|
||||
runnerSet := runnerSetList.Items[0]
|
||||
|
||||
// Update the AutoScalingRunnerSet.Spec.Template
|
||||
// This should trigger re-creation of EphemeralRunnerSet and Listener
|
||||
patched := autoScalingRunnerSet.DeepCopy()
|
||||
patched.Spec.Template.Spec.PriorityClassName = "test-priority-class"
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
autoScalingRunnerSet = patched.DeepCopy()
|
||||
|
||||
// We should create a new EphemeralRunnerSet and delete the old one, eventually, we will have only one EphemeralRunnerSet
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(runnerSetList.Items) != 1 {
|
||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
|
||||
|
||||
// We should create a new listener
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
listener := new(actionsv1alpha1.AutoscalingListener)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return listener.Spec.EphemeralRunnerSetName, nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Name), "New Listener should be created")
|
||||
|
||||
// Only update the Spec for the AutoScalingListener
|
||||
// This should trigger re-creation of the Listener only
|
||||
runnerSetList = new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err = k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to list EphemeralRunnerSet")
|
||||
Expect(len(runnerSetList.Items)).To(Equal(1), "There should be 1 EphemeralRunnerSet")
|
||||
runnerSet = runnerSetList.Items[0]
|
||||
|
||||
listener = new(actionsv1alpha1.AutoscalingListener)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get Listener")
|
||||
|
||||
patched = autoScalingRunnerSet.DeepCopy()
|
||||
min := 10
|
||||
patched.Spec.MinRunners = &min
|
||||
err = k8sClient.Patch(ctx, patched, client.MergeFrom(autoScalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to patch AutoScalingRunnerSet")
|
||||
|
||||
// We should not re-create a new EphemeralRunnerSet
|
||||
Consistently(
|
||||
func() (string, error) {
|
||||
runnerSetList := new(actionsv1alpha1.EphemeralRunnerSetList)
|
||||
err := k8sClient.List(ctx, runnerSetList, client.InNamespace(autoScalingRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(runnerSetList.Items) != 1 {
|
||||
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
|
||||
}
|
||||
|
||||
return string(runnerSetList.Items[0].UID), nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).Should(BeEquivalentTo(string(runnerSet.UID)), "New EphemeralRunnerSet should not be created")
|
||||
|
||||
// We should only re-create a new listener
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
listener := new(actionsv1alpha1.AutoscalingListener)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: scaleSetListenerName(autoScalingRunnerSet), Namespace: autoScalingRunnerSet.Namespace}, listener)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(listener.UID), nil
|
||||
},
|
||||
autoScalingRunnerSetTestTimeout,
|
||||
autoScalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(string(listener.UID)), "New Listener should be created")
|
||||
})
|
||||
})
|
||||
})
|
||||
22
controllers/actions.github.com/clientutil.go
Normal file
22
controllers/actions.github.com/clientutil.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
kclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
type object[T kclient.Object] interface {
|
||||
kclient.Object
|
||||
DeepCopy() T
|
||||
}
|
||||
|
||||
type patcher interface {
|
||||
Patch(ctx context.Context, obj kclient.Object, patch kclient.Patch, opts ...kclient.PatchOption) error
|
||||
}
|
||||
|
||||
func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(obj T)) error {
|
||||
original := obj.DeepCopy()
|
||||
update(obj)
|
||||
return client.Patch(ctx, obj, kclient.MergeFrom(original))
|
||||
}
|
||||
10
controllers/actions.github.com/constants.go
Normal file
10
controllers/actions.github.com/constants.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package actionsgithubcom
|
||||
|
||||
const (
|
||||
LabelKeyRunnerTemplateHash = "runner-template-hash"
|
||||
LabelKeyPodTemplateHash = "pod-template-hash"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG"
|
||||
)
|
||||
645
controllers/actions.github.com/ephemeralrunner_controller.go
Normal file
645
controllers/actions.github.com/ephemeralrunner_controller.go
Normal file
@@ -0,0 +1,645 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.uber.org/multierr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
const (
|
||||
// EphemeralRunnerContainerName is the name of the runner container.
|
||||
// It represents the name of the container running the self-hosted runner image.
|
||||
EphemeralRunnerContainerName = "runner"
|
||||
|
||||
ephemeralRunnerFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
// EphemeralRunnerReconciler reconciles a EphemeralRunner object
|
||||
type EphemeralRunnerReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=secrets,verbs=get;list;watch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=serviceaccounts,verbs=create;delete;get
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=roles,verbs=create;delete;get
|
||||
// +kubebuilder:rbac:groups=rbac.authorization.k8s.io,resources=rolebindings,verbs=create;delete;get
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
//
|
||||
// For more details, check Reconcile and its Result here:
|
||||
// - https://pkg.go.dev/sigs.k8s.io/controller-runtime@v0.6.4/pkg/reconcile
|
||||
func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("ephemeralrunner", req.NamespacedName)
|
||||
|
||||
ephemeralRunner := new(v1alpha1.EphemeralRunner)
|
||||
if err := r.Get(ctx, req.NamespacedName, ephemeralRunner); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
if !ephemeralRunner.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
||||
log.Info("Finalizing ephemeral runner")
|
||||
done, err := r.cleanupResources(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up ephemeral runner owned resources")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
log.Info("Waiting for ephemeral runner owned resources to be deleted")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
done, err = r.cleanupContainerHooksResources(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up container hooks resources")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
log.Info("Waiting for container hooks resources to be deleted")
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
controllerutil.RemoveFinalizer(obj, ephemeralRunnerFinalizerName)
|
||||
})
|
||||
if err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to update ephemeral runner without the finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully removed finalizer after cleanup")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunner, ephemeralRunnerFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
controllerutil.AddFinalizer(obj, ephemeralRunnerFinalizerName)
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to update with finalizer set")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully added finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.Phase == corev1.PodSucceeded || ephemeralRunner.Status.Phase == corev1.PodFailed {
|
||||
// Stop reconciling on this object.
|
||||
// The EphemeralRunnerSet is responsible for cleaning it up.
|
||||
log.Info("EphemeralRunner has already finished. Stopping reconciliation and waiting for EphemeralRunnerSet to clean it up", "phase", ephemeralRunner.Status.Phase)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Creating new ephemeral runner registration and updating status with runner config")
|
||||
return r.updateStatusWithRunnerConfig(ctx, ephemeralRunner, log)
|
||||
}
|
||||
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, req.NamespacedName, secret); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to fetch secret")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
// create secret if not created
|
||||
log.Info("Creating new ephemeral runner secret for jitconfig.")
|
||||
return r.createSecret(ctx, ephemeralRunner, log)
|
||||
}
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
if err := r.Get(ctx, req.NamespacedName, pod); err != nil {
|
||||
switch {
|
||||
case !kerrors.IsNotFound(err):
|
||||
log.Error(err, "Failed to fetch the pod")
|
||||
return ctrl.Result{}, err
|
||||
|
||||
case len(ephemeralRunner.Status.Failures) > 5:
|
||||
log.Info("EphemeralRunner has failed more than 5 times. Marking it as failed")
|
||||
if err := r.markAsFailed(ctx, ephemeralRunner, log); err != nil {
|
||||
log.Error(err, "Failed to set ephemeral runner to phase Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default:
|
||||
// Pod was not found. Create if the pod has never been created
|
||||
log.Info("Creating new EphemeralRunner pod.")
|
||||
return r.createPod(ctx, ephemeralRunner, secret, log)
|
||||
}
|
||||
}
|
||||
|
||||
cs := runnerContainerStatus(pod)
|
||||
switch {
|
||||
case cs == nil:
|
||||
// starting, no container state yet
|
||||
log.Info("Waiting for runner container status to be available")
|
||||
return ctrl.Result{}, nil
|
||||
case cs.State.Terminated == nil: // still running or evicted
|
||||
if pod.Status.Phase == corev1.PodFailed && pod.Status.Reason == "Evicted" {
|
||||
log.Info("Pod set the termination phase, but container state is not terminated. Deleting pod",
|
||||
"PodPhase", pod.Status.Phase,
|
||||
"PodReason", pod.Status.Reason,
|
||||
"PodMessage", pod.Status.Message,
|
||||
)
|
||||
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete pod as failed on pod.Status.Phase: Failed")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Ephemeral runner container is still running")
|
||||
if err := r.updateRunStatusFromPod(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Info("Failed to update ephemeral runner status. Requeue to not miss this event")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
case cs.State.Terminated.ExitCode != 0: // failed
|
||||
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "Failed to delete runner pod on failure")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
|
||||
default:
|
||||
// pod succeeded. We double-check with the service if the runner exists.
|
||||
// The reason is that image can potentially finish with status 0, but not pick up the job.
|
||||
existsInService, err := r.runnerRegisteredWithService(ctx, ephemeralRunner.DeepCopy(), log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to check if runner is registered with the service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !existsInService {
|
||||
// the runner does not exist in the service, so it must be done
|
||||
log.Info("Ephemeral runner has finished since it does not exist in the service anymore")
|
||||
if err := r.markAsFinished(ctx, ephemeralRunner, log); err != nil {
|
||||
log.Error(err, "Failed to mark ephemeral runner as finished")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// The runner still exists. This can happen if the pod exited with 0 but fails to start
|
||||
log.Info("Ephemeral runner pod has finished, but the runner still exists in the service. Deleting the pod to restart it.")
|
||||
if err := r.deletePodAsFailed(ctx, ephemeralRunner, pod, log); err != nil {
|
||||
log.Error(err, "failed to delete a pod that still exists in the service")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (deleted bool, err error) {
|
||||
log.Info("Cleaning up the runner pod")
|
||||
pod := new(corev1.Pod)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, pod)
|
||||
switch {
|
||||
case err == nil:
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the runner pod")
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("failed to delete pod: %v", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, err
|
||||
}
|
||||
log.Info("Pod is deleted")
|
||||
|
||||
log.Info("Cleaning up the runner jitconfig secret")
|
||||
secret := new(corev1.Secret)
|
||||
err = r.Get(ctx, types.NamespacedName{Namespace: ephemeralRunner.Namespace, Name: ephemeralRunner.Name}, secret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if secret.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the jitconfig secret")
|
||||
if err := r.Delete(ctx, secret); err != nil && !kerrors.IsNotFound(err) {
|
||||
return false, fmt.Errorf("failed to delete secret: %v", err)
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
return false, err
|
||||
}
|
||||
log.Info("Secret is deleted")
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupContainerHooksResources(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
log.Info("Cleaning up runner linked pods")
|
||||
done, err = r.cleanupRunnerLinkedPods(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to clean up runner linked pods: %v", err)
|
||||
}
|
||||
|
||||
if !done {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Cleaning up runner linked secrets")
|
||||
done, err = r.cleanupRunnerLinkedSecrets(ctx, ephemeralRunner, log)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return done, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedPods(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
runnerLinedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
},
|
||||
)
|
||||
var runnerLinkedPodList corev1.PodList
|
||||
err = r.List(ctx, &runnerLinkedPodList, client.InNamespace(ephemeralRunner.Namespace), runnerLinedLabels)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list runner-linked pods: %v", err)
|
||||
}
|
||||
|
||||
if len(runnerLinkedPodList.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Info("Deleting container hooks runner-linked pods", "count", len(runnerLinkedPodList.Items))
|
||||
|
||||
var errs []error
|
||||
for i := range runnerLinkedPodList.Items {
|
||||
linkedPod := &runnerLinkedPodList.Items[i]
|
||||
if !linkedPod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Deleting container hooks runner-linked pod", "name", linkedPod.Name)
|
||||
if err := r.Delete(ctx, linkedPod); err != nil && !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked pod %q: %v", linkedPod.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return false, multierr.Combine(errs...)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (done bool, err error) {
|
||||
runnerLinkedLabels := client.MatchingLabels(
|
||||
map[string]string{
|
||||
"runner-pod": ephemeralRunner.ObjectMeta.Name,
|
||||
},
|
||||
)
|
||||
var runnerLinkedSecretList corev1.SecretList
|
||||
err = r.List(ctx, &runnerLinkedSecretList, client.InNamespace(ephemeralRunner.Namespace), runnerLinkedLabels)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list runner-linked secrets: %w", err)
|
||||
}
|
||||
|
||||
if len(runnerLinkedSecretList.Items) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
log.Info("Deleting container hooks runner-linked secrets", "count", len(runnerLinkedSecretList.Items))
|
||||
|
||||
var errs []error
|
||||
for i := range runnerLinkedSecretList.Items {
|
||||
s := &runnerLinkedSecretList.Items[i]
|
||||
if !s.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Deleting container hooks runner-linked secret", "name", s.Name)
|
||||
if err := r.Delete(ctx, s); err != nil && !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, fmt.Errorf("failed to delete runner linked secret %q: %v", s.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
return false, multierr.Combine(errs...)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
log.Info("Updating ephemeral runner status to Failed")
|
||||
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodFailed
|
||||
obj.Status.Reason = "TooManyPodFailures"
|
||||
obj.Status.Message = "Pod has failed to start more than 5 times"
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Removing the runner from the service")
|
||||
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
|
||||
return fmt.Errorf("failed to remove the runner from service: %v", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner is marked as Failed and deleted from the service")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
log.Info("Updating ephemeral runner status to Finished")
|
||||
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = corev1.PodSucceeded
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner status is marked as Finished")
|
||||
return nil
|
||||
}
|
||||
|
||||
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
|
||||
// It should not be responsible for setting the status to Failed.
|
||||
func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
log.Info("Deleting the ephemeral runner pod", "podId", pod.UID)
|
||||
if err := r.Delete(ctx, pod); err != nil && !kerrors.IsNotFound(err) {
|
||||
return fmt.Errorf("failed to delete pod with status failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
log.Info("Updating ephemeral runner status to track the failure count")
|
||||
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
if obj.Status.Failures == nil {
|
||||
obj.Status.Failures = make(map[string]bool)
|
||||
}
|
||||
obj.Status.Failures[string(pod.UID)] = true
|
||||
obj.Status.Ready = false
|
||||
obj.Status.Reason = pod.Status.Reason
|
||||
obj.Status.Message = pod.Status.Message
|
||||
}); err != nil {
|
||||
return fmt.Errorf("failed to update ephemeral runner status: failed attempts: %v", err)
|
||||
}
|
||||
|
||||
log.Info("EphemeralRunner pod is deleted and status is updated with failure count")
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateStatusWithRunnerConfig fetches runtime configuration needed by the runner
|
||||
// This method should always set .status.runnerId and .status.runnerJITConfig
|
||||
func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
// Runner is not registered with the service. We need to register it first
|
||||
log.Info("Creating ephemeral runner JIT config")
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get actions client for generating JIT config: %v", err)
|
||||
}
|
||||
|
||||
jitSettings := &actions.RunnerScaleSetJitRunnerSetting{
|
||||
Name: ephemeralRunner.Name,
|
||||
}
|
||||
jitConfig, err := actionsClient.GenerateJitRunnerConfig(ctx, jitSettings, ephemeralRunner.Spec.RunnerScaleSetId)
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with generic error: %v", err)
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusConflict ||
|
||||
!strings.Contains(actionsError.ExceptionName, "AgentExistsException") {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to generate JIT config with Actions service error: %v", err)
|
||||
}
|
||||
|
||||
// If the runner with the name we want already exists it means:
|
||||
// - We might have a name collision.
|
||||
// - Our previous reconciliation loop failed to update the
|
||||
// status with the runnerId and runnerJITConfig after the `GenerateJitRunnerConfig`
|
||||
// created the runner registration on the service.
|
||||
// We will try to get the runner and see if it's belong to this AutoScalingRunnerSet,
|
||||
// if so, we can simply delete the runner registration and create a new one.
|
||||
log.Info("Getting runner jit config failed with conflict error, trying to get the runner by name", "runnerName", ephemeralRunner.Name)
|
||||
existingRunner, err := actionsClient.GetRunnerByName(ctx, ephemeralRunner.Name)
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to get runner by name: %v", err)
|
||||
}
|
||||
|
||||
if existingRunner == nil {
|
||||
log.Info("Runner with the same name does not exist, re-queuing the reconciliation")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Found the runner with the same name", "runnerId", existingRunner.Id, "runnerScaleSetId", existingRunner.RunnerScaleSetId)
|
||||
if existingRunner.RunnerScaleSetId == ephemeralRunner.Spec.RunnerScaleSetId {
|
||||
log.Info("Removing the runner with the same name")
|
||||
err := actionsClient.RemoveRunner(ctx, int64(existingRunner.Id))
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to remove runner from the service: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Removed the runner with the same name, re-queuing the reconciliation")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
// TODO: Do we want to mark the ephemeral runner as failed, and let EphemeralRunnerSet to clean it up, so we can recover from this situation?
|
||||
// The situation is that the EphemeralRunner's name is already used by something else to register a runner, and we can't take the control back.
|
||||
return ctrl.Result{}, fmt.Errorf("runner with the same name but doesn't belong to this RunnerScaleSet: %v", err)
|
||||
}
|
||||
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
|
||||
|
||||
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
|
||||
err = patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.RunnerId = jitConfig.Runner.Id
|
||||
obj.Status.RunnerName = jitConfig.Runner.Name
|
||||
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
|
||||
})
|
||||
if err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Updated ephemeral runner status with runnerId and runnerJITConfig")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret, log logr.Logger) (ctrl.Result, error) {
|
||||
log.Info("Creating new pod for ephemeral runner")
|
||||
newPod := r.resourceBuilder.newEphemeralRunnerPod(ctx, runner, secret)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, newPod, r.Scheme); err != nil {
|
||||
log.Error(err, "Failed to set controller reference to a new pod")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Created new pod spec for ephemeral runner")
|
||||
if err := r.Create(ctx, newPod); err != nil {
|
||||
log.Error(err, "Failed to create pod resource for ephemeral runner.")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner pod",
|
||||
"runnerScaleSetId", runner.Spec.RunnerScaleSetId,
|
||||
"runnerName", runner.Status.RunnerName,
|
||||
"runnerId", runner.Status.RunnerId,
|
||||
"configUrl", runner.Spec.GitHubConfigUrl,
|
||||
"podName", newPod.Name)
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) createSecret(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (ctrl.Result, error) {
|
||||
log.Info("Creating new secret for ephemeral runner")
|
||||
jitSecret := r.resourceBuilder.newEphemeralRunnerJitSecret(runner)
|
||||
|
||||
if err := ctrl.SetControllerReference(runner, jitSecret, r.Scheme); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to set controller reference: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Created new secret spec for ephemeral runner")
|
||||
if err := r.Create(ctx, jitSecret); err != nil {
|
||||
return ctrl.Result{}, fmt.Errorf("failed to create jit secret: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Created ephemeral runner secret", "secretName", jitSecret.Name)
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// updateRunStatusFromPod is responsible for updating non-exiting statuses.
|
||||
// It should never update phase to Failed or Succeeded
|
||||
//
|
||||
// The event should not be re-queued since the termination status should be set
|
||||
// before proceeding with reconciliation logic
|
||||
func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, pod *corev1.Pod, log logr.Logger) error {
|
||||
if pod.Status.Phase == corev1.PodSucceeded || pod.Status.Phase == corev1.PodFailed {
|
||||
return nil
|
||||
}
|
||||
if ephemeralRunner.Status.Phase == pod.Status.Phase {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message)
|
||||
err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
|
||||
obj.Status.Phase = pod.Status.Phase
|
||||
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)
|
||||
obj.Status.Reason = pod.Status.Reason
|
||||
obj.Status.Message = pod.Status.Message
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update runner status for Phase/Reason/Message: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Updated ephemeral runner status with pod phase")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) actionsClientFor(ctx context.Context, runner *v1alpha1.EphemeralRunner) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runner.Namespace, Name: runner.Spec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(ctx, runner.Spec.GitHubConfigUrl, runner.Namespace, secret.Data)
|
||||
}
|
||||
|
||||
// runnerRegisteredWithService checks if the runner is still registered with the service
|
||||
// Returns found=false and err=nil if ephemeral runner does not exist in GitHub service and should be deleted
|
||||
func (r EphemeralRunnerReconciler) runnerRegisteredWithService(ctx context.Context, runner *v1alpha1.EphemeralRunner, log logr.Logger) (found bool, err error) {
|
||||
actionsClient, err := r.actionsClientFor(ctx, runner)
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get Actions client for ScaleSet: %w", err)
|
||||
}
|
||||
|
||||
log.Info("Checking if runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
_, err = actionsClient.GetRunner(ctx, int64(runner.Status.RunnerId))
|
||||
if err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if !errors.As(err, &actionsError) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if actionsError.StatusCode != http.StatusNotFound ||
|
||||
!strings.Contains(actionsError.ExceptionName, "AgentNotFoundException") {
|
||||
return false, fmt.Errorf("failed to check if runner exists in GitHub service: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Runner does not exist in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
log.Info("Runner exists in GitHub service", "runnerId", runner.Status.RunnerId)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
|
||||
client, err := r.actionsClientFor(ctx, ephemeralRunner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get actions client for runner: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerId)
|
||||
err = client.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove runner from the service: %v", err)
|
||||
}
|
||||
|
||||
log.Info("Removed runner from the service", "runnerId", ephemeralRunner.Status.RunnerId)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// TODO(nikola-jokic): Add indexing and filtering fields on corev1.Pod{}
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.EphemeralRunner{}).
|
||||
Owns(&corev1.Pod{}).
|
||||
Owns(&corev1.Secret{}).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Named("ephemeral-runner-controller").
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
func runnerContainerStatus(pod *corev1.Pod) *corev1.ContainerStatus {
|
||||
for i := range pod.Status.ContainerStatuses {
|
||||
cs := &pod.Status.ContainerStatuses[i]
|
||||
if cs.Name == EphemeralRunnerContainerName {
|
||||
return cs
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -0,0 +1,769 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
const (
|
||||
gh_token = "gh_token"
|
||||
timeout = time.Second * 30
|
||||
interval = time.Millisecond * 250
|
||||
runnerImage = "ghcr.io/actions/actions-runner:latest"
|
||||
)
|
||||
|
||||
func newExampleRunner(name, namespace, configSecretName string) *v1alpha1.EphemeralRunner {
|
||||
return &v1alpha1.EphemeralRunner{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecretName,
|
||||
RunnerScaleSetId: 1,
|
||||
PodTemplateSpec: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
Image: runnerImage,
|
||||
Command: []string{"/runner/run.sh"},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "runner",
|
||||
MountPath: "/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "setup",
|
||||
Image: runnerImage,
|
||||
Command: []string{"sh", "-c", "cp -r /actions-runner/* /runner/"},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "runner",
|
||||
MountPath: "/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "runner",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
EmptyDir: &corev1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
var _ = Describe("EphemeralRunner", func() {
|
||||
|
||||
Describe("Resource manipulation", func() {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
autoScalingNS := new(corev1.Namespace)
|
||||
configSecret := new(corev1.Secret)
|
||||
|
||||
controller := new(EphemeralRunnerReconciler)
|
||||
ephemeralRunner := new(v1alpha1.EphemeralRunner)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
autoScalingNS = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testns-autoscaling-runner" + RandStringRunes(5),
|
||||
},
|
||||
}
|
||||
err := k8sClient.Create(ctx, autoScalingNS)
|
||||
Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner")
|
||||
|
||||
configSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "github-config-secret",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(gh_token),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, configSecret)
|
||||
Expect(err).To(BeNil(), "failed to create config secret")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: autoScalingNS.Name,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).To(BeNil(), "failed to create manager")
|
||||
|
||||
controller = &EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
|
||||
ephemeralRunner = newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
|
||||
err = k8sClient.Create(ctx, ephemeralRunner)
|
||||
Expect(err).To(BeNil(), "failed to create ephemeral runner")
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).To(BeNil(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, autoScalingNS)
|
||||
Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner")
|
||||
})
|
||||
|
||||
It("It should create/add all required resources for EphemeralRunner (finalizer, jit secret)", func() {
|
||||
created := new(v1alpha1.EphemeralRunner)
|
||||
// Check if finalizer is added
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(ephemeralRunnerFinalizerName))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
_, ok := secret.Data[jitTokenKey]
|
||||
return ok, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Name, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(ephemeralRunner.Name))
|
||||
})
|
||||
|
||||
It("It should re-create pod on failure", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}).Should(BeEquivalentTo(true))
|
||||
|
||||
err := k8sClient.Delete(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to delete pod")
|
||||
|
||||
pod = new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should clean up resources when deleted", func() {
|
||||
// wait for pod to be created
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}).Should(BeEquivalentTo(true))
|
||||
|
||||
// create runner-linked pod
|
||||
runnerLinkedPod := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-runner-linked-pod",
|
||||
Namespace: ephemeralRunner.Namespace,
|
||||
Labels: map[string]string{
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
},
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner-linked-container",
|
||||
Image: "ubuntu:latest",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, runnerLinkedPod)
|
||||
Expect(err).To(BeNil(), "failed to create runner linked pod")
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedPod.Name, Namespace: runnerLinkedPod.Namespace}, pod); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
// create runner linked secret
|
||||
runnerLinkedSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-runner-linked-secret",
|
||||
Namespace: ephemeralRunner.Namespace,
|
||||
Labels: map[string]string{
|
||||
"runner-pod": ephemeralRunner.Name,
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{"test": []byte("test")},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, runnerLinkedSecret)
|
||||
Expect(err).To(BeNil(), "failed to create runner linked secret")
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedSecret.Name, Namespace: runnerLinkedSecret.Namespace}, secret); err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
err = k8sClient.Delete(ctx, ephemeralRunner)
|
||||
Expect(err).To(BeNil(), "failed to delete ephemeral runner")
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedPod.Name, Namespace: runnerLinkedPod.Namespace}, pod)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: runnerLinkedSecret.Name, Namespace: runnerLinkedSecret.Namespace}, secret)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
secret := new(corev1.Secret)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, secret)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
})
|
||||
|
||||
It("It should eventually have runner id set", func() {
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
updatedEphemeralRunner := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updatedEphemeralRunner)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return updatedEphemeralRunner.Status.RunnerId, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeNumerically(">", 0))
|
||||
})
|
||||
|
||||
It("It should patch the ephemeral runner non terminating status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
for _, phase := range []corev1.PodPhase{corev1.PodRunning, corev1.PodPending} {
|
||||
podCopy := pod.DeepCopy()
|
||||
pod.Status.Phase = phase
|
||||
// set container state to force status update
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{},
|
||||
})
|
||||
err := k8sClient.Status().Patch(ctx, pod, client.MergeFrom(podCopy))
|
||||
Expect(err).To(BeNil(), "failed to patch pod status")
|
||||
|
||||
Eventually(
|
||||
func() (corev1.PodPhase, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(phase))
|
||||
}
|
||||
})
|
||||
|
||||
It("It should not update phase if container state does not exist", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to patch pod status")
|
||||
|
||||
Consistently(
|
||||
func() (corev1.PodPhase, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
|
||||
return corev1.PodUnknown, err
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
).Should(BeEquivalentTo(""))
|
||||
})
|
||||
|
||||
It("It should not re-create pod indefinitely", func() {
|
||||
pod := new(corev1.Pod)
|
||||
failures := 0
|
||||
for i := 0; i < 6; i++ {
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "Failed to update pod status")
|
||||
|
||||
failures++
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == failures, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
}
|
||||
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err == nil {
|
||||
return false, nil
|
||||
}
|
||||
return kerrors.IsNotFound(err), nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should re-create pod on eviction", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.Phase = corev1.PodFailed
|
||||
pod.Status.Reason = "Evicted"
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to patch pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
})
|
||||
|
||||
It("It should re-create pod on exit status 0, but runner exists within the service", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (bool, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return len(updated.Status.Failures) == 1, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
|
||||
// should re-create after failure
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
pod := new(corev1.Pod)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
})
|
||||
|
||||
It("It should not set the phase to succeeded without pod termination status", func() {
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(
|
||||
func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(true))
|
||||
|
||||
// first set phase to running
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Running: &corev1.ContainerStateRunning{
|
||||
StartedAt: metav1.Now(),
|
||||
},
|
||||
},
|
||||
})
|
||||
pod.Status.Phase = corev1.PodRunning
|
||||
err := k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Eventually(
|
||||
func() (corev1.PodPhase, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
interval,
|
||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||
|
||||
// set phase to succeeded
|
||||
pod.Status.Phase = corev1.PodSucceeded
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
Consistently(
|
||||
func() (corev1.PodPhase, error) {
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
},
|
||||
timeout,
|
||||
).Should(BeEquivalentTo(corev1.PodRunning))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Checking the API", func() {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
|
||||
autoScalingNS := new(corev1.Namespace)
|
||||
configSecret := new(corev1.Secret)
|
||||
|
||||
var mgr manager.Manager
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
autoScalingNS = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testns-autoscaling-runner" + RandStringRunes(5),
|
||||
},
|
||||
}
|
||||
err := k8sClient.Create(ctx, autoScalingNS)
|
||||
Expect(err).To(BeNil(), "failed to create test namespace for EphemeralRunner")
|
||||
|
||||
configSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "github-config-secret",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(gh_token),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, configSecret)
|
||||
Expect(err).To(BeNil(), "failed to create config secret")
|
||||
|
||||
mgr, err = ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: autoScalingNS.Name,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).To(BeNil(), "failed to create manager")
|
||||
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, autoScalingNS)
|
||||
Expect(err).To(BeNil(), "failed to delete test namespace for EphemeralRunner")
|
||||
})
|
||||
|
||||
It("It should set the Phase to Succeeded", func() {
|
||||
controller := &EphemeralRunnerReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(
|
||||
fake.WithDefaultClient(
|
||||
fake.NewFakeClient(
|
||||
fake.WithGetRunner(
|
||||
nil,
|
||||
&actions.ActionsError{
|
||||
StatusCode: http.StatusNotFound,
|
||||
ExceptionName: "AgentNotFoundException",
|
||||
},
|
||||
),
|
||||
),
|
||||
nil,
|
||||
),
|
||||
),
|
||||
}
|
||||
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).To(BeNil(), "failed to setup controller")
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).To(BeNil(), "failed to start manager")
|
||||
}()
|
||||
|
||||
ephemeralRunner := newExampleRunner("test-runner", autoScalingNS.Name, configSecret.Name)
|
||||
|
||||
err = k8sClient.Create(ctx, ephemeralRunner)
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
pod := new(corev1.Pod)
|
||||
Eventually(func() (bool, error) {
|
||||
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, pod); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return true, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(true))
|
||||
|
||||
pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, corev1.ContainerStatus{
|
||||
Name: EphemeralRunnerContainerName,
|
||||
State: corev1.ContainerState{
|
||||
Terminated: &corev1.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
})
|
||||
err = k8sClient.Status().Update(ctx, pod)
|
||||
Expect(err).To(BeNil(), "failed to update pod status")
|
||||
|
||||
updated := new(v1alpha1.EphemeralRunner)
|
||||
Eventually(func() (corev1.PodPhase, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
|
||||
if err != nil {
|
||||
return "", nil
|
||||
}
|
||||
return updated.Status.Phase, nil
|
||||
}, timeout, interval).Should(BeEquivalentTo(corev1.PodSucceeded))
|
||||
})
|
||||
})
|
||||
})
|
||||
463
controllers/actions.github.com/ephemeralrunnerset_controller.go
Normal file
463
controllers/actions.github.com/ephemeralrunnerset_controller.go
Normal file
@@ -0,0 +1,463 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
"go.uber.org/multierr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
"sigs.k8s.io/controller-runtime/pkg/predicate"
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller"
|
||||
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
|
||||
)
|
||||
|
||||
// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object
|
||||
type EphemeralRunnerSetReconciler struct {
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Scheme *runtime.Scheme
|
||||
ActionsClient actions.MultiClient
|
||||
|
||||
resourceBuilder resourceBuilder
|
||||
}
|
||||
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
|
||||
|
||||
// Reconcile is part of the main kubernetes reconciliation loop which aims to
|
||||
// move the current state of the cluster closer to the desired state.
|
||||
//
|
||||
// The responsibility of this controller is to bring the state to the desired one, but it should
|
||||
// avoid patching itself, because of the frequent patches that the listener is doing.
|
||||
// The safe point where we can patch the resource is when we are reacting on finalizer.
|
||||
// Then, the listener should be deleted first, to allow controller clean up resources without interruptions
|
||||
//
|
||||
// The resource should be created with finalizer. To leave it to this controller to add it, we would
|
||||
// risk the same issue of patching the status. Responsibility of this controller should only
|
||||
// be to bring the count of EphemeralRunners to the desired one, not to patch this resource
|
||||
// until it is safe to do so
|
||||
func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("ephemeralrunnerset", req.NamespacedName)
|
||||
|
||||
ephemeralRunnerSet := new(v1alpha1.EphemeralRunnerSet)
|
||||
if err := r.Get(ctx, req.NamespacedName, ephemeralRunnerSet); err != nil {
|
||||
return ctrl.Result{}, client.IgnoreNotFound(err)
|
||||
}
|
||||
|
||||
// Requested deletion does not need reconciled.
|
||||
if !ephemeralRunnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
if controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
|
||||
log.Info("Deleting resources")
|
||||
done, err := r.cleanUpEphemeralRunners(ctx, ephemeralRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to clean up EphemeralRunners")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
if !done {
|
||||
log.Info("Waiting for resources to be deleted")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||
controllerutil.RemoveFinalizer(obj, ephemeralRunnerSetFinalizerName)
|
||||
}); err != nil && !kerrors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to update ephemeral runner set with removed finalizer")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully removed finalizer after cleanup")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Add finalizer if not present
|
||||
if !controllerutil.ContainsFinalizer(ephemeralRunnerSet, ephemeralRunnerSetFinalizerName) {
|
||||
log.Info("Adding finalizer")
|
||||
if err := patch(ctx, r.Client, ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||
controllerutil.AddFinalizer(obj, ephemeralRunnerSetFinalizerName)
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to update ephemeral runner set with finalizer added")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
log.Info("Successfully added finalizer")
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
// Find all EphemeralRunner with matching namespace and own by this EphemeralRunnerSet.
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err := r.List(
|
||||
ctx,
|
||||
ephemeralRunnerList,
|
||||
client.InNamespace(req.Namespace),
|
||||
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name},
|
||||
)
|
||||
if err != nil {
|
||||
log.Error(err, "Unable to list child ephemeral runners")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList)
|
||||
|
||||
log.Info("Ephemeral runner counts",
|
||||
"pending", len(pendingEphemeralRunners),
|
||||
"running", len(runningEphemeralRunners),
|
||||
"finished", len(finishedEphemeralRunners),
|
||||
"failed", len(failedEphemeralRunners),
|
||||
"deleting", len(deletingEphemeralRunners),
|
||||
)
|
||||
|
||||
// cleanup finished runners and proceed
|
||||
var errs []error
|
||||
for i := range finishedEphemeralRunners {
|
||||
log.Info("Deleting finished ephemeral runner", "name", finishedEphemeralRunners[i].Name)
|
||||
if err := r.Delete(ctx, finishedEphemeralRunners[i]); err != nil {
|
||||
if !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
mergedErrs := multierr.Combine(errs...)
|
||||
log.Error(mergedErrs, "Failed to delete finished ephemeral runners")
|
||||
return ctrl.Result{}, mergedErrs
|
||||
}
|
||||
|
||||
total := len(pendingEphemeralRunners) + len(runningEphemeralRunners) + len(failedEphemeralRunners)
|
||||
log.Info("Scaling comparison", "current", total, "desired", ephemeralRunnerSet.Spec.Replicas)
|
||||
switch {
|
||||
case total < ephemeralRunnerSet.Spec.Replicas: // Handle scale up
|
||||
count := ephemeralRunnerSet.Spec.Replicas - total
|
||||
log.Info("Creating new ephemeral runners (scale up)", "count", count)
|
||||
if err := r.createEphemeralRunners(ctx, ephemeralRunnerSet, count, log); err != nil {
|
||||
log.Error(err, "failed to make ephemeral runner")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
case total > ephemeralRunnerSet.Spec.Replicas: // Handle scale down scenario.
|
||||
count := total - ephemeralRunnerSet.Spec.Replicas
|
||||
log.Info("Deleting ephemeral runners (scale down)", "count", count)
|
||||
if err := r.deleteIdleEphemeralRunners(ctx, ephemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners, count, log); err != nil {
|
||||
log.Error(err, "failed to delete idle runners")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update the status if needed.
|
||||
if ephemeralRunnerSet.Status.CurrentReplicas != total {
|
||||
log.Info("Updating status with current runners count", "count", total)
|
||||
if err := patch(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
|
||||
obj.Status.CurrentReplicas = total
|
||||
}); err != nil {
|
||||
log.Error(err, "Failed to update status with current runners count")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (done bool, err error) {
|
||||
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
|
||||
err = r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name})
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
|
||||
}
|
||||
|
||||
// only if there are no ephemeral runners left, return true
|
||||
if len(ephemeralRunnerList.Items) == 0 {
|
||||
log.Info("All ephemeral runners are deleted")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners := categorizeEphemeralRunners(ephemeralRunnerList)
|
||||
|
||||
log.Info("Clean up runner counts",
|
||||
"pending", len(pendingEphemeralRunners),
|
||||
"running", len(runningEphemeralRunners),
|
||||
"finished", len(finishedEphemeralRunners),
|
||||
"failed", len(failedEphemeralRunners),
|
||||
"deleting", len(deletingEphemeralRunners),
|
||||
)
|
||||
|
||||
log.Info("Cleanup finished or failed ephemeral runners")
|
||||
var errs []error
|
||||
for _, ephemeralRunner := range append(finishedEphemeralRunners, failedEphemeralRunners...) {
|
||||
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
mergedErrs := multierr.Combine(errs...)
|
||||
log.Error(mergedErrs, "Failed to delete ephemeral runners")
|
||||
return false, mergedErrs
|
||||
}
|
||||
|
||||
// avoid fetching the client if we have nothing left to do
|
||||
if len(runningEphemeralRunners) == 0 && len(pendingEphemeralRunners) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Cleanup pending or running ephemeral runners")
|
||||
errs = errs[0:0]
|
||||
for _, ephemeralRunner := range append(pendingEphemeralRunners, runningEphemeralRunners...) {
|
||||
log.Info("Removing the ephemeral runner from the service", "name", ephemeralRunner.Name)
|
||||
_, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
mergedErrs := multierr.Combine(errs...)
|
||||
log.Error(mergedErrs, "Failed to remove ephemeral runners from the service")
|
||||
return false, mergedErrs
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// createEphemeralRunners provisions `count` number of v1alpha1.EphemeralRunner resources in the cluster.
|
||||
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
|
||||
// Track multiple errors at once and return the bundle.
|
||||
errs := make([]error, 0)
|
||||
for i := 0; i < count; i++ {
|
||||
ephemeralRunner := r.resourceBuilder.newEphemeralRunner(runnerSet)
|
||||
|
||||
// Make sure that we own the resource we create.
|
||||
if err := ctrl.SetControllerReference(runnerSet, ephemeralRunner, r.Scheme); err != nil {
|
||||
log.Error(err, "failed to set controller reference on ephemeral runner")
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Creating new ephemeral runner", "progress", i+1, "total", count)
|
||||
if err := r.Create(ctx, ephemeralRunner); err != nil {
|
||||
log.Error(err, "failed to make ephemeral runner")
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Created new ephemeral runner", "runner", ephemeralRunner.Name)
|
||||
}
|
||||
|
||||
return multierr.Combine(errs...)
|
||||
}
|
||||
|
||||
// deleteIdleEphemeralRunners try to deletes `count` number of v1alpha1.EphemeralRunner resources in the cluster.
|
||||
// It will only delete `v1alpha1.EphemeralRunner` that has registered with Actions service
|
||||
// which has a `v1alpha1.EphemeralRunner.Status.RunnerId` set.
|
||||
// So, it is possible that this function will not delete enough ephemeral runners
|
||||
// if there are not enough ephemeral runners that have registered with Actions service.
|
||||
// When this happens, the next reconcile loop will try to delete the remaining ephemeral runners
|
||||
// after we get notified by any of the `v1alpha1.EphemeralRunner.Status` updates.
|
||||
func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, pendingEphemeralRunners, runningEphemeralRunners []*v1alpha1.EphemeralRunner, count int, log logr.Logger) error {
|
||||
runners := newEphemeralRunnerStepper(pendingEphemeralRunners, runningEphemeralRunners)
|
||||
if runners.len() == 0 {
|
||||
log.Info("No pending or running ephemeral runners running at this time for scale down")
|
||||
return nil
|
||||
}
|
||||
actionsClient, err := r.actionsClientFor(ctx, ephemeralRunnerSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create actions client for ephemeral runner replica set: %v", err)
|
||||
}
|
||||
var errs []error
|
||||
deletedCount := 0
|
||||
for runners.next() {
|
||||
ephemeralRunner := runners.object()
|
||||
if ephemeralRunner.Status.RunnerId == 0 {
|
||||
log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
if ephemeralRunner.Status.JobRequestId > 0 {
|
||||
log.Info("Skipping ephemeral runner since it is running a job", "name", ephemeralRunner.Name, "jobRequestId", ephemeralRunner.Status.JobRequestId)
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Removing the idle ephemeral runner", "name", ephemeralRunner.Name)
|
||||
ok, err := r.deleteEphemeralRunnerWithActionsClient(ctx, ephemeralRunner, actionsClient, log)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
deletedCount++
|
||||
if deletedCount == count {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return multierr.Combine(errs...)
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient actions.ActionsService, log logr.Logger) (bool, error) {
|
||||
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
|
||||
actionsError := &actions.ActionsError{}
|
||||
if errors.As(err, &actionsError) &&
|
||||
actionsError.StatusCode == http.StatusBadRequest &&
|
||||
strings.Contains(actionsError.ExceptionName, "JobStillRunningException") {
|
||||
// Runner is still running a job, proceed with the next one
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Deleting ephemeral runner after removing from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
|
||||
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
log.Info("Deleted ephemeral runner", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *EphemeralRunnerSetReconciler) actionsClientFor(ctx context.Context, rs *v1alpha1.EphemeralRunnerSet) (actions.ActionsService, error) {
|
||||
secret := new(corev1.Secret)
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: rs.Namespace, Name: rs.Spec.EphemeralRunnerSpec.GitHubConfigSecret}, secret); err != nil {
|
||||
return nil, fmt.Errorf("failed to get secret: %w", err)
|
||||
}
|
||||
|
||||
return r.ActionsClient.GetClientFromSecret(ctx, rs.Spec.EphemeralRunnerSpec.GitHubConfigUrl, rs.Namespace, secret.Data)
|
||||
}
|
||||
|
||||
// SetupWithManager sets up the controller with the Manager.
|
||||
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string {
|
||||
groupVersion := v1alpha1.GroupVersion.String()
|
||||
|
||||
// grab the job object, extract the owner...
|
||||
ephemeralRunner := rawObj.(*v1alpha1.EphemeralRunner)
|
||||
owner := metav1.GetControllerOf(ephemeralRunner)
|
||||
if owner == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...make sure it is owned by this controller
|
||||
if owner.APIVersion != groupVersion || owner.Kind != "EphemeralRunnerSet" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ...and if so, return it
|
||||
return []string{owner.Name}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.EphemeralRunnerSet{}).
|
||||
Owns(&v1alpha1.EphemeralRunner{}).
|
||||
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
type ephemeralRunnerStepper struct {
|
||||
items []*v1alpha1.EphemeralRunner
|
||||
index int
|
||||
}
|
||||
|
||||
func newEphemeralRunnerStepper(pending, running []*v1alpha1.EphemeralRunner) *ephemeralRunnerStepper {
|
||||
sort.Slice(pending, func(i, j int) bool {
|
||||
return pending[i].GetCreationTimestamp().Time.Before(pending[j].GetCreationTimestamp().Time)
|
||||
})
|
||||
sort.Slice(running, func(i, j int) bool {
|
||||
return running[i].GetCreationTimestamp().Time.Before(running[j].GetCreationTimestamp().Time)
|
||||
})
|
||||
|
||||
return &ephemeralRunnerStepper{
|
||||
items: append(pending, running...),
|
||||
index: -1,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ephemeralRunnerStepper) next() bool {
|
||||
if s.index+1 < len(s.items) {
|
||||
s.index++
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ephemeralRunnerStepper) object() *v1alpha1.EphemeralRunner {
|
||||
if s.index >= 0 && s.index < len(s.items) {
|
||||
return s.items[s.index]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ephemeralRunnerStepper) len() int {
|
||||
return len(s.items)
|
||||
}
|
||||
|
||||
func categorizeEphemeralRunners(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) (pendingEphemeralRunners, runningEphemeralRunners, finishedEphemeralRunners, failedEphemeralRunners, deletingEphemeralRunners []*v1alpha1.EphemeralRunner) {
|
||||
for i := range ephemeralRunnerList.Items {
|
||||
r := &ephemeralRunnerList.Items[i]
|
||||
if !r.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
deletingEphemeralRunners = append(deletingEphemeralRunners, r)
|
||||
continue
|
||||
}
|
||||
|
||||
switch r.Status.Phase {
|
||||
case corev1.PodRunning:
|
||||
runningEphemeralRunners = append(runningEphemeralRunners, r)
|
||||
case corev1.PodSucceeded:
|
||||
finishedEphemeralRunners = append(finishedEphemeralRunners, r)
|
||||
case corev1.PodFailed:
|
||||
failedEphemeralRunners = append(failedEphemeralRunners, r)
|
||||
default:
|
||||
// Pending or no phase should be considered as pending.
|
||||
//
|
||||
// If field is not set, that means that the EphemeralRunner
|
||||
// did not yet have chance to update the Status.Phase field.
|
||||
pendingEphemeralRunners = append(pendingEphemeralRunners, r)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -0,0 +1,445 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions/fake"
|
||||
)
|
||||
|
||||
const (
|
||||
ephemeralRunnerSetTestTimeout = time.Second * 5
|
||||
ephemeralRunnerSetTestInterval = time.Millisecond * 250
|
||||
ephemeralRunnerSetTestGitHubToken = "gh_token"
|
||||
)
|
||||
|
||||
var _ = Describe("Test EphemeralRunnerSet controller", func() {
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
autoScalingNS := new(corev1.Namespace)
|
||||
ephemeralRunnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
configSecret := new(corev1.Secret)
|
||||
|
||||
BeforeEach(func() {
|
||||
ctx, cancel = context.WithCancel(context.TODO())
|
||||
autoScalingNS = &corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-autoscaling-runnerset" + RandStringRunes(5)},
|
||||
}
|
||||
|
||||
err := k8sClient.Create(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace for EphemeralRunnerSet")
|
||||
|
||||
configSecret = &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "github-config-secret",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(ephemeralRunnerSetTestGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, configSecret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create config secret")
|
||||
|
||||
mgr, err := ctrl.NewManager(cfg, ctrl.Options{
|
||||
Namespace: autoScalingNS.Name,
|
||||
MetricsBindAddress: "0",
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
|
||||
|
||||
controller := &EphemeralRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err = controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
ephemeralRunnerSet = &actionsv1alpha1.EphemeralRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoScalingNS.Name,
|
||||
},
|
||||
Spec: actionsv1alpha1.EphemeralRunnerSetSpec{
|
||||
EphemeralRunnerSpec: actionsv1alpha1.EphemeralRunnerSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
RunnerScaleSetId: 100,
|
||||
PodTemplateSpec: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, ephemeralRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create EphemeralRunnerSet")
|
||||
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, autoScalingNS)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace for EphemeralRunnerSet")
|
||||
})
|
||||
|
||||
Context("When creating a new EphemeralRunnerSet", func() {
|
||||
It("It should create/add all required resources for a new EphemeralRunnerSet (finalizer)", func() {
|
||||
// Check if finalizer is added
|
||||
created := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(ephemeralRunnerSetFinalizerName), "EphemeralRunnerSet should have a finalizer")
|
||||
|
||||
// Check if the number of ephemeral runners are stay 0
|
||||
Consistently(
|
||||
func() (int, error) {
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "No EphemeralRunner should be created")
|
||||
|
||||
// Check if the status stay 0
|
||||
Consistently(
|
||||
func() (int, error) {
|
||||
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return int(runnerSet.Status.CurrentReplicas), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "EphemeralRunnerSet status should be 0")
|
||||
|
||||
// Scaling up the EphemeralRunnerSet
|
||||
updated := created.DeepCopy()
|
||||
updated.Spec.Replicas = 5
|
||||
err := k8sClient.Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// Check if the number of ephemeral runners are created
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
|
||||
|
||||
// Check if the status is updated
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerSet := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, runnerSet)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return int(runnerSet.Status.CurrentReplicas), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "EphemeralRunnerSet status should be 5")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When deleting a new EphemeralRunnerSet", func() {
|
||||
It("It should cleanup all resources for a deleting EphemeralRunnerSet before removing it", func() {
|
||||
created := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
||||
|
||||
// Scale up the EphemeralRunnerSet
|
||||
updated := created.DeepCopy()
|
||||
updated.Spec.Replicas = 5
|
||||
err = k8sClient.Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// Wait for the EphemeralRunnerSet to be scaled up
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
|
||||
|
||||
// Delete the EphemeralRunnerSet
|
||||
err = k8sClient.Delete(ctx, created)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunnerSet")
|
||||
|
||||
// Check if all ephemeral runners are deleted
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "All EphemeralRunner should be deleted")
|
||||
|
||||
// Check if the EphemeralRunnerSet is deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
deleted := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
err = k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, deleted)
|
||||
if err != nil {
|
||||
if kerrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return fmt.Errorf("EphemeralRunnerSet is not deleted")
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(Succeed(), "EphemeralRunnerSet should be deleted")
|
||||
})
|
||||
})
|
||||
|
||||
Context("When a new EphemeralRunnerSet scale up and down", func() {
|
||||
It("It should delete finished EphemeralRunner and create new EphemeralRunner", func() {
|
||||
created := new(actionsv1alpha1.EphemeralRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunnerSet.Name, Namespace: ephemeralRunnerSet.Namespace}, created)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to get EphemeralRunnerSet")
|
||||
|
||||
// Scale up the EphemeralRunnerSet
|
||||
updated := created.DeepCopy()
|
||||
updated.Spec.Replicas = 5
|
||||
err = k8sClient.Update(ctx, updated)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// Wait for the EphemeralRunnerSet to be scaled up
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
|
||||
|
||||
// Set status to simulate a configured EphemeralRunner
|
||||
for i, runner := range runnerList.Items {
|
||||
updatedRunner := runner.DeepCopy()
|
||||
updatedRunner.Status.Phase = corev1.PodRunning
|
||||
updatedRunner.Status.RunnerId = i + 100
|
||||
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
}
|
||||
|
||||
// Mark one of the EphemeralRunner as finished
|
||||
finishedRunner := runnerList.Items[4].DeepCopy()
|
||||
finishedRunner.Status.Phase = corev1.PodSucceeded
|
||||
err = k8sClient.Status().Patch(ctx, finishedRunner, client.MergeFrom(&runnerList.Items[4]))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
|
||||
// Wait for the finished EphemeralRunner to be deleted
|
||||
Eventually(
|
||||
func() error {
|
||||
runnerList := new(actionsv1alpha1.EphemeralRunnerList)
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, runner := range runnerList.Items {
|
||||
if runner.Name == finishedRunner.Name {
|
||||
return fmt.Errorf("EphemeralRunner is not deleted")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(Succeed(), "Finished EphemeralRunner should be deleted")
|
||||
|
||||
// We should still have the EphemeralRunnerSet scale up
|
||||
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(5), "5 EphemeralRunner should be created")
|
||||
|
||||
// Set status to simulate a configured EphemeralRunner
|
||||
for i, runner := range runnerList.Items {
|
||||
updatedRunner := runner.DeepCopy()
|
||||
updatedRunner.Status.Phase = corev1.PodRunning
|
||||
updatedRunner.Status.RunnerId = i + 100
|
||||
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
}
|
||||
|
||||
// Scale down the EphemeralRunnerSet
|
||||
updated = created.DeepCopy()
|
||||
updated.Spec.Replicas = 3
|
||||
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// Wait for the EphemeralRunnerSet to be scaled down
|
||||
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(3), "3 EphemeralRunner should be created")
|
||||
|
||||
// We will not scale down runner that is running jobs
|
||||
runningRunner := runnerList.Items[0].DeepCopy()
|
||||
runningRunner.Status.JobRequestId = 1000
|
||||
err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0]))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
|
||||
runningRunner = runnerList.Items[1].DeepCopy()
|
||||
runningRunner.Status.JobRequestId = 1001
|
||||
err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[0]))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
|
||||
// Scale down to 1
|
||||
updated = created.DeepCopy()
|
||||
updated.Spec.Replicas = 1
|
||||
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// Wait for the EphemeralRunnerSet to be scaled down to 2 since we still have 2 runner running jobs
|
||||
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
|
||||
|
||||
// We will not scale down failed runner
|
||||
failedRunner := runnerList.Items[0].DeepCopy()
|
||||
failedRunner.Status.Phase = corev1.PodFailed
|
||||
err = k8sClient.Status().Patch(ctx, failedRunner, client.MergeFrom(&runnerList.Items[0]))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
|
||||
// Scale down to 0
|
||||
updated = created.DeepCopy()
|
||||
updated.Spec.Replicas = 0
|
||||
err = k8sClient.Patch(ctx, updated, client.MergeFrom(created))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunnerSet")
|
||||
|
||||
// We should not scale down the EphemeralRunnerSet since we still have 1 runner running job and 1 failed runner
|
||||
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Consistently(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
|
||||
|
||||
// We will scale down to 0 when the running job is completed and the failed runner is deleted
|
||||
runningRunner = runnerList.Items[1].DeepCopy()
|
||||
runningRunner.Status.Phase = corev1.PodSucceeded
|
||||
err = k8sClient.Status().Patch(ctx, runningRunner, client.MergeFrom(&runnerList.Items[1]))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
|
||||
|
||||
err = k8sClient.Delete(ctx, &runnerList.Items[0])
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete EphemeralRunner")
|
||||
|
||||
// Wait for the EphemeralRunnerSet to be scaled down to 0
|
||||
runnerList = new(actionsv1alpha1.EphemeralRunnerList)
|
||||
Eventually(
|
||||
func() (int, error) {
|
||||
err := k8sClient.List(ctx, runnerList, client.InNamespace(ephemeralRunnerSet.Namespace))
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
return len(runnerList.Items), nil
|
||||
},
|
||||
ephemeralRunnerSetTestTimeout,
|
||||
ephemeralRunnerSetTestInterval).Should(BeEquivalentTo(0), "0 EphemeralRunner should be created")
|
||||
})
|
||||
})
|
||||
})
|
||||
437
controllers/actions.github.com/resourcebuilder.go
Normal file
437
controllers/actions.github.com/resourcebuilder.go
Normal file
@@ -0,0 +1,437 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/hash"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
jitTokenKey = "jitToken"
|
||||
)
|
||||
|
||||
type resourceBuilder struct {
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret) *corev1.Pod {
|
||||
newLabels := map[string]string{}
|
||||
newLabels[scaleSetListenerLabel] = fmt.Sprintf("%v-%v", autoscalingListener.Spec.AutoscalingRunnerSetNamespace, autoscalingListener.Spec.AutoscalingRunnerSetName)
|
||||
|
||||
listenerEnv := []corev1.EnvVar{
|
||||
{
|
||||
Name: "GITHUB_CONFIGURE_URL",
|
||||
Value: autoscalingListener.Spec.GitHubConfigUrl,
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_EPHEMERAL_RUNNER_SET_NAMESPACE",
|
||||
Value: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_EPHEMERAL_RUNNER_SET_NAME",
|
||||
Value: autoscalingListener.Spec.EphemeralRunnerSetName,
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_MAX_RUNNERS",
|
||||
Value: strconv.Itoa(autoscalingListener.Spec.MaxRunners),
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_MIN_RUNNERS",
|
||||
Value: strconv.Itoa(autoscalingListener.Spec.MinRunners),
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_RUNNER_SCALE_SET_ID",
|
||||
Value: strconv.Itoa(autoscalingListener.Spec.RunnerScaleSetId),
|
||||
},
|
||||
}
|
||||
|
||||
if _, ok := secret.Data["github_token"]; ok {
|
||||
listenerEnv = append(listenerEnv, corev1.EnvVar{
|
||||
Name: "GITHUB_TOKEN",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Key: "github_token",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if _, ok := secret.Data["github_app_id"]; ok {
|
||||
listenerEnv = append(listenerEnv, corev1.EnvVar{
|
||||
Name: "GITHUB_APP_ID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Key: "github_app_id",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if _, ok := secret.Data["github_app_installation_id"]; ok {
|
||||
listenerEnv = append(listenerEnv, corev1.EnvVar{
|
||||
Name: "GITHUB_APP_INSTALLATION_ID",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Key: "github_app_installation_id",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if _, ok := secret.Data["github_app_private_key"]; ok {
|
||||
listenerEnv = append(listenerEnv, corev1.EnvVar{
|
||||
Name: "GITHUB_APP_PRIVATE_KEY",
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Key: "github_app_private_key",
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
podSpec := corev1.PodSpec{
|
||||
ServiceAccountName: serviceAccount.Name,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: name,
|
||||
Image: autoscalingListener.Spec.Image,
|
||||
Env: listenerEnv,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
Command: []string{
|
||||
"/github-runnerscaleset-listener",
|
||||
},
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: autoscalingListener.Spec.ImagePullSecrets,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
}
|
||||
|
||||
newRunnerScaleSetListenerPod := &corev1.Pod{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Pod",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingListener.Name,
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: newLabels,
|
||||
},
|
||||
Spec: podSpec,
|
||||
}
|
||||
|
||||
return newRunnerScaleSetListenerPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
|
||||
|
||||
newLabels := map[string]string{}
|
||||
newLabels[LabelKeyRunnerSpecHash] = runnerSpecHash
|
||||
|
||||
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
|
||||
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
|
||||
Labels: newLabels,
|
||||
},
|
||||
Spec: v1alpha1.EphemeralRunnerSetSpec{
|
||||
Replicas: 0,
|
||||
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return newEphemeralRunnerSet, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
|
||||
return &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerServiceAccountName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: map[string]string{
|
||||
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerRole(autoscalingListener *v1alpha1.AutoscalingListener) *rbacv1.Role {
|
||||
rules := rulesForListenerRole([]string{autoscalingListener.Spec.EphemeralRunnerSetName})
|
||||
rulesHash := hash.ComputeTemplateHash(&rules)
|
||||
newRole := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: map[string]string{
|
||||
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
|
||||
"auto-scaling-listener-name": autoscalingListener.Name,
|
||||
"role-policy-rules-hash": rulesHash,
|
||||
},
|
||||
},
|
||||
Rules: rules,
|
||||
}
|
||||
|
||||
return newRole
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerRoleBinding(autoscalingListener *v1alpha1.AutoscalingListener, listenerRole *rbacv1.Role, serviceAccount *corev1.ServiceAccount) *rbacv1.RoleBinding {
|
||||
roleRef := rbacv1.RoleRef{
|
||||
Kind: "Role",
|
||||
Name: listenerRole.Name,
|
||||
}
|
||||
roleRefHash := hash.ComputeTemplateHash(&roleRef)
|
||||
|
||||
subjects := []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Namespace: serviceAccount.Namespace,
|
||||
Name: serviceAccount.Name,
|
||||
},
|
||||
}
|
||||
subjectHash := hash.ComputeTemplateHash(&subjects)
|
||||
|
||||
newRoleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerRoleName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
Labels: map[string]string{
|
||||
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"auto-scaling-listener-namespace": autoscalingListener.Namespace,
|
||||
"auto-scaling-listener-name": autoscalingListener.Name,
|
||||
"role-binding-role-ref-hash": roleRefHash,
|
||||
"role-binding-subject-hash": subjectHash,
|
||||
},
|
||||
},
|
||||
RoleRef: roleRef,
|
||||
Subjects: subjects,
|
||||
}
|
||||
|
||||
return newRoleBinding
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v1alpha1.AutoscalingListener, secret *corev1.Secret) *corev1.Secret {
|
||||
dataHash := hash.ComputeTemplateHash(&secret.Data)
|
||||
|
||||
newListenerSecret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerSecretMirrorName(autoscalingListener),
|
||||
Namespace: autoscalingListener.Namespace,
|
||||
Labels: map[string]string{
|
||||
"auto-scaling-runner-set-namespace": autoscalingListener.Spec.AutoscalingRunnerSetNamespace,
|
||||
"auto-scaling-runner-set-name": autoscalingListener.Spec.AutoscalingRunnerSetName,
|
||||
"secret-data-hash": dataHash,
|
||||
},
|
||||
},
|
||||
Data: secret.DeepCopy().Data,
|
||||
}
|
||||
|
||||
return newListenerSecret
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdKey])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
effectiveMinRunners := 0
|
||||
effectiveMaxRunners := math.MaxInt32
|
||||
if autoscalingRunnerSet.Spec.MaxRunners != nil {
|
||||
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
|
||||
}
|
||||
if autoscalingRunnerSet.Spec.MinRunners != nil {
|
||||
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
|
||||
}
|
||||
|
||||
autoscalingListener := &v1alpha1.AutoscalingListener{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: scaleSetListenerName(autoscalingRunnerSet),
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"auto-scaling-runner-set-namespace": autoscalingRunnerSet.Namespace,
|
||||
"auto-scaling-runner-set-name": autoscalingRunnerSet.Name,
|
||||
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingListenerSpec{
|
||||
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
|
||||
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
|
||||
RunnerScaleSetId: runnerScaleSetId,
|
||||
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
|
||||
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
|
||||
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
},
|
||||
}
|
||||
|
||||
return autoscalingListener, nil
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {
|
||||
return &v1alpha1.EphemeralRunner{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: ephemeralRunnerSet.Name + "-runner-",
|
||||
Namespace: ephemeralRunnerSet.Namespace,
|
||||
},
|
||||
Spec: ephemeralRunnerSet.Spec.EphemeralRunnerSpec,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerPod(ctx context.Context, runner *v1alpha1.EphemeralRunner, secret *corev1.Secret) *corev1.Pod {
|
||||
var newPod corev1.Pod
|
||||
|
||||
labels := map[string]string{}
|
||||
annotations := map[string]string{}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
for k, v := range runner.ObjectMeta.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
for k, v := range runner.Spec.PodTemplateSpec.Annotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
||||
FilterLabels(labels, LabelKeyRunnerTemplateHash),
|
||||
annotations,
|
||||
runner.Spec,
|
||||
runner.Status.RunnerJITConfig,
|
||||
)
|
||||
|
||||
labels["actions-ephemeral-runner"] = string(corev1.ConditionTrue)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
|
||||
newPod.ObjectMeta = objectMeta
|
||||
newPod.Spec = runner.Spec.PodTemplateSpec.Spec
|
||||
newPod.Spec.Containers = make([]corev1.Container, 0, len(runner.Spec.PodTemplateSpec.Spec.Containers))
|
||||
|
||||
for _, c := range runner.Spec.PodTemplateSpec.Spec.Containers {
|
||||
if c.Name == EphemeralRunnerContainerName {
|
||||
c.Env = append(c.Env, corev1.EnvVar{
|
||||
Name: EnvVarRunnerJITConfig,
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
SecretKeyRef: &corev1.SecretKeySelector{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: secret.Name,
|
||||
},
|
||||
Key: jitTokenKey,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
newPod.Spec.Containers = append(newPod.Spec.Containers, c)
|
||||
}
|
||||
|
||||
return &newPod
|
||||
}
|
||||
|
||||
func (b *resourceBuilder) newEphemeralRunnerJitSecret(ephemeralRunner *v1alpha1.EphemeralRunner) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: ephemeralRunner.Name,
|
||||
Namespace: ephemeralRunner.Namespace,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
jitTokenKey: []byte(ephemeralRunner.Status.RunnerJITConfig),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func scaleSetListenerName(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingRunnerSet.Namespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingRunnerSet.Name, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerServiceAccountName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerRoleName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func scaleSetListenerSecretMirrorName(autoscalingListener *v1alpha1.AutoscalingListener) string {
|
||||
namespaceHash := hash.FNVHashString(autoscalingListener.Spec.AutoscalingRunnerSetNamespace)
|
||||
if len(namespaceHash) > 8 {
|
||||
namespaceHash = namespaceHash[:8]
|
||||
}
|
||||
return fmt.Sprintf("%v-%v-listener", autoscalingListener.Spec.AutoscalingRunnerSetName, namespaceHash)
|
||||
}
|
||||
|
||||
func rulesForListenerRole(resourceNames []string) []rbacv1.PolicyRule {
|
||||
return []rbacv1.PolicyRule{
|
||||
{
|
||||
APIGroups: []string{"actions.github.com"},
|
||||
Resources: []string{"ephemeralrunnersets"},
|
||||
ResourceNames: resourceNames,
|
||||
Verbs: []string{"patch"},
|
||||
},
|
||||
{
|
||||
APIGroups: []string{"actions.github.com"},
|
||||
Resources: []string{"ephemeralrunners", "ephemeralrunners/status"},
|
||||
Verbs: []string{"patch"},
|
||||
},
|
||||
}
|
||||
}
|
||||
91
controllers/actions.github.com/suite_test.go
Normal file
91
controllers/actions.github.com/suite_test.go
Normal file
@@ -0,0 +1,91 @@
|
||||
/*
|
||||
Copyright 2020 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
// +kubebuilder:scaffold:imports
|
||||
)
|
||||
|
||||
// These tests use Ginkgo (BDD-style Go testing framework). Refer to
|
||||
// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
|
||||
|
||||
var cfg *rest.Config
|
||||
var k8sClient client.Client
|
||||
var testEnv *envtest.Environment
|
||||
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Controller Suite",
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
|
||||
|
||||
By("bootstrapping test environment")
|
||||
testEnv = &envtest.Environment{
|
||||
CRDDirectoryPaths: []string{filepath.Join("../..", "config", "crd", "bases")},
|
||||
}
|
||||
|
||||
// Avoids the following error:
|
||||
// 2021-03-19T15:14:11.673+0900 ERROR controller-runtime.controller Reconciler error {"controller": "testns-tvjzjrunner", "request": "testns-gdnyx/example-runnerdeploy-zps4z-j5562", "error": "Pod \"example-runnerdeploy-zps4z-j5562\" is invalid: [spec.containers[1].image: Required value, spec.containers[1].securityContext.privileged: Forbidden: disallowed by cluster policy]"}
|
||||
testEnv.ControlPlane.GetAPIServer().Configure().
|
||||
Append("allow-privileged", "true")
|
||||
|
||||
var err error
|
||||
cfg, err = testEnv.Start()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(cfg).ToNot(BeNil())
|
||||
|
||||
err = actionsv1alpha1.AddToScheme(scheme.Scheme)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// +kubebuilder:scaffold:scheme
|
||||
|
||||
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(k8sClient).ToNot(BeNil())
|
||||
|
||||
close(done)
|
||||
}, 60)
|
||||
|
||||
var _ = AfterSuite(func() {
|
||||
By("tearing down the test environment")
|
||||
err := testEnv.Stop()
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
})
|
||||
27
controllers/actions.github.com/utils.go
Normal file
27
controllers/actions.github.com/utils.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
)
|
||||
|
||||
func FilterLabels(labels map[string]string, filter string) map[string]string {
|
||||
filtered := map[string]string{}
|
||||
|
||||
for k, v := range labels {
|
||||
if k != filter {
|
||||
filtered[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
return filtered
|
||||
}
|
||||
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyz1234567890")
|
||||
|
||||
func RandStringRunes(n int) string {
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
34
controllers/actions.github.com/utils_test.go
Normal file
34
controllers/actions.github.com/utils_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package actionsgithubcom
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func Test_filterLabels(t *testing.T) {
|
||||
type args struct {
|
||||
labels map[string]string
|
||||
filter string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "ok",
|
||||
args: args{
|
||||
labels: map[string]string{LabelKeyRunnerTemplateHash: "abc", LabelKeyPodTemplateHash: "def"},
|
||||
filter: LabelKeyRunnerTemplateHash,
|
||||
},
|
||||
want: map[string]string{LabelKeyPodTemplateHash: "def"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := FilterLabels(tt.args.labels, tt.args.filter); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("FilterLabels() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user