Compare commits

..

7 Commits

Author SHA1 Message Date
Moto Ishizawa
3818e584ec Merge pull request #76 from summerwind/fix-crash-on-startup
Fix crash on startup after the HRDA addition
2020-08-02 13:10:56 +09:00
Yusuke Kuoka
50487bbb54 Fix the HRA controller name 2020-08-02 10:38:15 +09:00
Yusuke Kuoka
e2164f9946 Fix integration test bugs and do verify scaling out 2020-08-02 10:34:58 +09:00
Moto Ishizawa
bdc1279e9e Merge pull request #80 from summerwind/runner-v2.272.0
Update runner to v2.272.0
2020-08-01 17:53:37 +09:00
Moto Ishizawa
3223480bc0 Update docker to v19.03.12 2020-08-01 17:28:40 +09:00
Moto Ishizawa
e642632a50 Update runner to v2.272.0 2020-08-01 17:28:16 +09:00
Yusuke Kuoka
3c3077a11c Fix crash on startup after the HRDA addition
This is a follow-up for #66.

The reconciler for the new HorizontalRunnerDeploymentAutoscaler had a terrible flaw that prevented the controller to fail launching due to an error like:

```
indexer conflict: map[field:.metadata.controller:{}]
```

This fixes that, while adding `integration_test.go` to verify its actually fixed and prevent regression in the future.
2020-07-29 21:20:46 +09:00
3 changed files with 258 additions and 21 deletions

View File

@@ -128,27 +128,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
}
func (r *HorizontalRunnerAutoscalerReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Recorder = mgr.GetEventRecorderFor("runnerdeployment-controller")
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
owner := metav1.GetControllerOf(runnerSet)
if owner == nil {
return nil
}
if owner.APIVersion != v1alpha1.GroupVersion.String() || owner.Kind != "RunnerDeployment" {
return nil
}
return []string{owner.Name}
}); err != nil {
return err
}
r.Recorder = mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller")
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.RunnerDeployment{}).
Owns(&v1alpha1.RunnerReplicaSet{}).
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
Complete(r)
}

View File

@@ -0,0 +1,254 @@
package controllers
import (
"context"
"github.com/summerwind/actions-runner-controller/github/fake"
"time"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
logf "sigs.k8s.io/controller-runtime/pkg/log"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
)
// SetupIntegrationTest will set up a testing environment.
// This includes:
// * creating a Namespace to be used during the test
// * starting all the reconcilers
// * stopping all the reconcilers after the test ends
// Call this function at the start of each of your tests.
func SetupIntegrationTest(ctx context.Context) *corev1.Namespace {
var stopCh chan struct{}
ns := &corev1.Namespace{}
workflowRuns := `{"total_count": 5, "workflow_runs":[{"status":"queued"}, {"status":"queued"}, {"status":"in_progress"}, {"status":"in_progress"}, {"status":"completed"}]}"`
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, workflowRuns))
BeforeEach(func() {
stopCh = make(chan struct{})
*ns = corev1.Namespace{
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
}
err := k8sClient.Create(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to create test namespace")
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
replicasetController := &RunnerReplicaSetReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
}
err = replicasetController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
deploymentsController := &RunnerDeploymentReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerdeployment-controller"),
}
err = deploymentsController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
client := newGithubClient(server)
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
GitHubClient: client,
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
}
err = autoscalerController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
go func() {
defer GinkgoRecover()
err := mgr.Start(stopCh)
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
}()
})
AfterEach(func() {
close(stopCh)
server.Close()
err := k8sClient.Delete(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
})
return ns
}
var _ = Context("Inside of a new namespace", func() {
ctx := context.TODO()
ns := SetupIntegrationTest(ctx)
Describe("when no existing resources exist", func() {
It("should create and scale runners", func() {
name := "example-runnerdeploy"
{
rs := &actionsv1alpha1.RunnerDeployment{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns.Name,
},
Spec: actionsv1alpha1.RunnerDeploymentSpec{
Replicas: intPtr(1),
Template: actionsv1alpha1.RunnerTemplate{
Spec: actionsv1alpha1.RunnerSpec{
Repository: "test/valid",
Image: "bar",
Env: []corev1.EnvVar{
{Name: "FOO", Value: "FOOVALUE"},
},
},
},
},
}
err := k8sClient.Create(ctx, rs)
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerDeployment resource")
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
return len(runnerSets.Items)
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
if len(runnerSets.Items) == 0 {
logf.Log.Info("No runnerreplicasets exist yet")
return -1
}
return *runnerSets.Items[0].Spec.Replicas
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
}
{
// We wrap the update in the Eventually block to avoid the below error that occurs due to concurrent modification
// made by the controller to update .Status.AvailableReplicas and .Status.ReadyReplicas
// Operation cannot be fulfilled on runnersets.actions.summerwind.dev "example-runnerset": the object has been modified; please apply your changes to the latest version and try again
Eventually(func() error {
var rd actionsv1alpha1.RunnerDeployment
err := k8sClient.Get(ctx, types.NamespacedName{Namespace: ns.Name, Name: name}, &rd)
Expect(err).NotTo(HaveOccurred(), "failed to get test RunnerDeployment resource")
rd.Spec.Replicas = intPtr(2)
return k8sClient.Update(ctx, &rd)
},
time.Second*1, time.Millisecond*500).Should(BeNil())
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
return len(runnerSets.Items)
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
return *runnerSets.Items[0].Spec.Replicas
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2))
}
{
rs := &actionsv1alpha1.HorizontalRunnerAutoscaler{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns.Name,
},
Spec: actionsv1alpha1.HorizontalRunnerAutoscalerSpec{
ScaleTargetRef: actionsv1alpha1.ScaleTargetRef{
Name: name,
},
MinReplicas: intPtr(1),
MaxReplicas: intPtr(3),
ScaleDownDelaySecondsAfterScaleUp: nil,
Metrics: nil,
},
}
err := k8sClient.Create(ctx, rs)
Expect(err).NotTo(HaveOccurred(), "failed to create test RunnerDeployment resource")
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
return len(runnerSets.Items)
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
if len(runnerSets.Items) == 0 {
logf.Log.Info("No runnerreplicasets exist yet")
return -1
}
return *runnerSets.Items[0].Spec.Replicas
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(3))
}
})
})
})

View File

@@ -1,7 +1,7 @@
NAME ?= summerwind/actions-runner
RUNNER_VERSION ?= 2.267.1
DOCKER_VERSION ?= 19.03.8
RUNNER_VERSION ?= 2.272.0
DOCKER_VERSION ?= 19.03.12
docker-build:
docker build --build-arg RUNNER_VERSION=${RUNNER_VERSION} --build-arg DOCKER_VERSION=${DOCKER_VERSION} -t ${NAME}:latest -t ${NAME}:v${RUNNER_VERSION} .