chore(deps): bump sigs.k8s.io/controller-runtime from 0.13.1 to 0.14.1 (#2132)

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: Yusuke Kuoka <ykuoka@gmail.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Yusuke Kuoka <ykuoka@gmail.com>
This commit is contained in:
dependabot[bot]
2023-01-27 09:23:28 +09:00
committed by GitHub
parent b09e3a2dc9
commit 219ba5b477
32 changed files with 1683 additions and 249 deletions

View File

@@ -11,7 +11,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -219,7 +219,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
// Update the status of autoscaling runner set.
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
if err := patch(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set status with current runner count")

View File

@@ -10,7 +10,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -20,3 +20,13 @@ func patch[T object[T]](ctx context.Context, client patcher, obj T, update func(
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))
}
type subResourcePatcher interface {
Patch(ctx context.Context, obj kclient.Object, patch kclient.Patch, opts ...kclient.SubResourcePatchOption) error
}
func patchSubResource[T object[T]](ctx context.Context, client subResourcePatcher, obj T, update func(obj T)) error {
original := obj.DeepCopy()
update(obj)
return client.Patch(ctx, obj, kclient.MergeFrom(original))
}

View File

@@ -369,7 +369,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Reason = "TooManyPodFailures"
obj.Status.Message = "Pod has failed to start more than 5 times"
@@ -388,7 +388,7 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
func (r *EphemeralRunnerReconciler) markAsFinished(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Finished")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodSucceeded
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner with status finished: %v", err)
@@ -409,7 +409,7 @@ func (r *EphemeralRunnerReconciler) deletePodAsFailed(ctx context.Context, ephem
}
log.Info("Updating ephemeral runner status to track the failure count")
if err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
if obj.Status.Failures == nil {
obj.Status.Failures = make(map[string]bool)
}
@@ -487,7 +487,7 @@ func (r *EphemeralRunnerReconciler) updateStatusWithRunnerConfig(ctx context.Con
log.Info("Created ephemeral runner JIT config", "runnerId", jitConfig.Runner.Id)
log.Info("Updating ephemeral runner status with runnerId and runnerJITConfig")
err = patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
err = patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = jitConfig.Runner.Id
obj.Status.RunnerName = jitConfig.Runner.Name
obj.Status.RunnerJITConfig = jitConfig.EncodedJITConfig
@@ -556,7 +556,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
}
log.Info("Updating ephemeral runner status with pod phase", "phase", pod.Status.Phase, "reason", pod.Status.Reason, "message", pod.Status.Message)
err := patch(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase
obj.Status.Ready = obj.Status.Ready || (pod.Status.Phase == corev1.PodRunning)
obj.Status.Reason = pod.Status.Reason

View File

@@ -10,7 +10,7 @@ import (
"github.com/actions/actions-runner-controller/github/actions"
"github.com/actions/actions-runner-controller/github/actions/fake"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
corev1 "k8s.io/api/core/v1"
kerrors "k8s.io/apimachinery/pkg/api/errors"

View File

@@ -182,7 +182,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
// Update the status if needed.
if ephemeralRunnerSet.Status.CurrentReplicas != total {
log.Info("Updating status with current runners count", "count", total)
if err := patch(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
obj.Status.CurrentReplicas = total
}); err != nil {
log.Error(err, "Failed to update status with current runners count")

View File

@@ -11,7 +11,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"

View File

@@ -23,7 +23,7 @@ import (
"github.com/onsi/ginkgo/config"
. "github.com/onsi/ginkgo"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
actionsv1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
@@ -31,7 +31,6 @@ import (
"k8s.io/client-go/rest"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/envtest"
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
logf "sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
// +kubebuilder:scaffold:imports
@@ -49,12 +48,10 @@ func TestAPIs(t *testing.T) {
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{printer.NewlineReporter{}})
RunSpecs(t, "Controller Suite")
}
var _ = BeforeSuite(func(done Done) {
var _ = BeforeSuite(func() {
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
By("bootstrapping test environment")
@@ -80,9 +77,7 @@ var _ = BeforeSuite(func(done Done) {
k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
Expect(err).ToNot(HaveOccurred())
Expect(k8sClient).ToNot(BeNil())
close(done)
}, 60)
})
var _ = AfterSuite(func() {
By("tearing down the test environment")