chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 (#2132)

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Yusuke Kuoka <ykuoka@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
This commit is contained in:
dependabot[bot]
2023-01-27 09:23:28 +09:00
committed by GitHub
parent b09e3a2dc9
commit 219ba5b477
32 changed files with 1683 additions and 249 deletions

View File

@@ -11,7 +11,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -219,7 +219,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
// Update the status of autoscaling runner set.
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
if err := patch(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set status with current runner count")

View File

@@ -10,7 +10,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -20,3 +20,13 @@ func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))
}
type subResourcePatcher interface {
Patch(ctx context.Context, obj kclient.Object, patch kclient.Patch, opts ...kclient.SubResourcePatchOption) error
}
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
original := obj.DeepCopy()
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))
}

View File

@@ -369,7 +369,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Reason = "TooManyPodFailures"
obj.Status.Message = "Pod has failed to start more than 5 times"
@@ -388,7 +388,7 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Finished")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodSucceeded
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err)
@@ -409,7 +409,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
}
log.Info("Updating ephemeral runner status to track the failure count")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if obj.Status.Failures == nil {
obj.Status.Failures = make(map[string]bool)
}
@@ -487,7 +487,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
err = patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = jitConfig.Runner.Id
obj.Status.RunnerName = jitConfig.Runner.Name
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
@@ -556,7 +556,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
}
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message)
err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)
obj.Status.Reason = pod.Status.Reason

View File

@@ -10,7 +10,7 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/fake"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"

View File

@@ -182,7 +182,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
// Update the status if needed.
if ephemeralRunnerSet.Status.CurrentReplicas != total {
log.Info("Updating status with current runners count", "count", total)
if err := patch(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
obj.Status.CurrentReplicas = total
}); err != nil {
log.Error(err, "Failed to update status with current runners count")

View File

@@ -11,7 +11,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -31,7 +31,6 @@ import (
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
@@ -49,12 +48,10 @@ func TestAPIs(t *testing.T) {
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func(done Done) {
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
By("bootstrapping test environment")
@@ -80,9 +77,7 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
})
var _ = AfterSuite(func() {
By("tearing down the test environment")

View File

@@ -681,71 +681,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string {
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
if hra.Spec.ScaleTargetRef.Name == "" {
autoscaler.Log.V(1).Info(fmt.Sprintf("scale target ref name not set for hra %s", hra.Name))
return nil
}
switch hra.Spec.ScaleTargetRef.Kind {
case "", "RunnerDeployment":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}
keys := []string{}
if rd.Spec.Template.Spec.Repository != "" {
keys = append(keys, rd.Spec.Template.Spec.Repository) // Repository runners
}
if rd.Spec.Template.Spec.Organization != "" {
if group := rd.Spec.Template.Spec.Group; group != "" {
keys = append(keys, organizationalRunnerGroupKey(rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Group)) // Organization runner groups
} else {
keys = append(keys, rd.Spec.Template.Spec.Organization) // Organization runners
}
}
if enterprise := rd.Spec.Template.Spec.Enterprise; enterprise != "" {
if group := rd.Spec.Template.Spec.Group; group != "" {
keys = append(keys, enterpriseRunnerGroupKey(enterprise, rd.Spec.Template.Spec.Group)) // Enterprise runner groups
} else {
keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners
}
}
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
return keys
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}
keys := []string{}
if rs.Spec.Repository != "" {
keys = append(keys, rs.Spec.Repository) // Repository runners
}
if rs.Spec.Organization != "" {
keys = append(keys, rs.Spec.Organization) // Organization runners
if group := rs.Spec.Group; group != "" {
keys = append(keys, organizationalRunnerGroupKey(rs.Spec.Organization, rs.Spec.Group)) // Organization runner groups
}
}
if enterprise := rs.Spec.Enterprise; enterprise != "" {
keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners
if group := rs.Spec.Group; group != "" {
keys = append(keys, enterpriseRunnerGroupKey(enterprise, rs.Spec.Group)) // Enterprise runner groups
}
}
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
return keys
}
return nil
}); err != nil {
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, autoscaler.indexer); err != nil {
return err
}
@@ -755,6 +691,72 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
Complete(autoscaler)
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) indexer(rawObj client.Object) []string {
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
if hra.Spec.ScaleTargetRef.Name == "" {
autoscaler.Log.V(1).Info(fmt.Sprintf("scale target ref name not set for hra %s", hra.Name))
return nil
}
switch hra.Spec.ScaleTargetRef.Kind {
case "", "RunnerDeployment":
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerDeployment not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}
keys := []string{}
if rd.Spec.Template.Spec.Repository != "" {
keys = append(keys, rd.Spec.Template.Spec.Repository) // Repository runners
}
if rd.Spec.Template.Spec.Organization != "" {
if group := rd.Spec.Template.Spec.Group; group != "" {
keys = append(keys, organizationalRunnerGroupKey(rd.Spec.Template.Spec.Organization, rd.Spec.Template.Spec.Group)) // Organization runner groups
} else {
keys = append(keys, rd.Spec.Template.Spec.Organization) // Organization runners
}
}
if enterprise := rd.Spec.Template.Spec.Enterprise; enterprise != "" {
if group := rd.Spec.Template.Spec.Group; group != "" {
keys = append(keys, enterpriseRunnerGroupKey(enterprise, rd.Spec.Template.Spec.Group)) // Enterprise runner groups
} else {
keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners
}
}
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
return keys
case "RunnerSet":
var rs v1alpha1.RunnerSet
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rs); err != nil {
autoscaler.Log.V(1).Info(fmt.Sprintf("RunnerSet not found with scale target ref name %s for hra %s", hra.Spec.ScaleTargetRef.Name, hra.Name))
return nil
}
keys := []string{}
if rs.Spec.Repository != "" {
keys = append(keys, rs.Spec.Repository) // Repository runners
}
if rs.Spec.Organization != "" {
keys = append(keys, rs.Spec.Organization) // Organization runners
if group := rs.Spec.Group; group != "" {
keys = append(keys, organizationalRunnerGroupKey(rs.Spec.Organization, rs.Spec.Group)) // Organization runner groups
}
}
if enterprise := rs.Spec.Enterprise; enterprise != "" {
keys = append(keys, enterpriseKey(enterprise)) // Enterprise runners
if group := rs.Spec.Group; group != "" {
keys = append(keys, enterpriseRunnerGroupKey(enterprise, rs.Spec.Group)) // Enterprise runner groups
}
}
autoscaler.Log.V(2).Info(fmt.Sprintf("HRA keys indexed for HRA %s: %v", hra.Name, keys))
return keys
}
return nil
}
func enterpriseKey(name string) string {
return keyPrefixEnterprise + name
}

View File

@@ -431,7 +431,11 @@ func testServerWithInitObjs(t *testing.T, eventType string, event interface{}, w
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
client := fake.NewClientBuilder().WithScheme(sc).WithRuntimeObjects(initObjs...).Build()
client := fake.NewClientBuilder().
WithScheme(sc).
WithRuntimeObjects(initObjs...).
WithIndex(&actionsv1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, hraWebhook.indexer).
Build()
logs := installTestLogger(hraWebhook)

View File

@@ -18,7 +18,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

View File

@@ -15,7 +15,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

View File

@@ -11,7 +11,7 @@ import (
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"

View File

@@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.summerwind.net/v1alpha1"
@@ -31,7 +31,6 @@ import (
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
@@ -49,12 +48,10 @@ func TestAPIs(t *testing.T) {
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func(done Done) {
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
By("bootstrapping test environment")
@@ -80,9 +77,7 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
})
var _ = AfterSuite(func() {
By("tearing down the test environment")