feat: HorizontalRunnerAutoscaler Webhook server (#282)

* feat: HorizontalRunnerAutoscaler Webhook server

This introduces a Webhook server that responds GitHub `check_run`, `pull_request`, and `push` events by scaling up matched HorizontalRunnerAutoscaler by 1 replica. This allows you to immediately add "resource slack" for future GitHub Actions job runs, without waiting next sync period to add insufficient runners.

This feature is highly inspired by https://github.com/philips-labs/terraform-aws-github-runner. terraform-aws-github-runner can manage one set of runners per deployment, where actions-runner-controller with this feature can manage as many sets of runners as you declare with HorizontalRunnerAutoscaler and RunnerDeployment pairs.

On each GitHub event received, the webhook server queries repository-wide and organizational runners from the cluster and searches for the single target to scale up. The webhook server tries to match HorizontalRunnerAutoscaler.Spec.ScaleUpTriggers[].GitHubEvent.[CheckRun|Push|PullRequest] against the event and if it finds only one HRA, it is the scale target. If none or two or more targets are found for repository-wide runners, it does the same on organizational runners.

Changes:

* Fix integration test
* Update manifests
* chart: Add support for github webhook server
* dockerfile: Include github-webhook-server binary
* Do not import unversioned go-github
* Update README
This commit is contained in:
Yusuke Kuoka
2021-02-07 17:37:27 +09:00
committed by GitHub
parent a4350d0fc2
commit ab1c39de57
31 changed files with 1993 additions and 45 deletions

View File

@@ -7,6 +7,7 @@ import (
"math"
"strconv"
"strings"
"time"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -19,6 +20,47 @@ const (
defaultScaleDownFactor = 0.7
)
func getValueAvailableAt(now time.Time, from, to *time.Time, reservedValue int) *int {
if to != nil && now.After(*to) {
return nil
}
if from != nil && now.Before(*from) {
return nil
}
return &reservedValue
}
func (r *HorizontalRunnerAutoscalerReconciler) getDesiredReplicasFromCache(hra v1alpha1.HorizontalRunnerAutoscaler) *int {
var entry *v1alpha1.CacheEntry
for i := range hra.Status.CacheEntries {
ent := hra.Status.CacheEntries[i]
if ent.Key != v1alpha1.CacheEntryKeyDesiredReplicas {
continue
}
if !time.Now().Before(ent.ExpirationTime.Time) {
continue
}
entry = &ent
break
}
if entry != nil {
v := getValueAvailableAt(time.Now(), nil, &entry.ExpirationTime.Time, entry.Value)
if v != nil {
return v
}
}
return nil
}
func (r *HorizontalRunnerAutoscalerReconciler) determineDesiredReplicas(rd v1alpha1.RunnerDeployment, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
if hra.Spec.MinReplicas == nil {
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)

View File

@@ -157,7 +157,11 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
_ = v1alpha1.AddToScheme(scheme)
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
server := fake.NewServer(
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns),
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
fake.WithListRunnersResponse(200, fake.RunnersListBody),
)
defer server.Close()
client := newGithubClient(server)
@@ -368,7 +372,11 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
_ = v1alpha1.AddToScheme(scheme)
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
server := fake.NewServer(fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns), fake.WithListWorkflowJobsResponse(200, tc.workflowJobs))
server := fake.NewServer(
fake.WithListRepositoryWorkflowRunsResponse(200, tc.workflowRuns),
fake.WithListWorkflowJobsResponse(200, tc.workflowJobs),
fake.WithListRunnersResponse(200, fake.RunnersListBody),
)
defer server.Close()
client := newGithubClient(server)

View File

@@ -0,0 +1,375 @@
/*
Copyright 2020 The actions-runner-controller authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controllers
import (
"context"
"fmt"
"io/ioutil"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"net/http"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"time"
"github.com/go-logr/logr"
gogithub "github.com/google/go-github/v33/github"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
)
const (
scaleTargetKey = "scaleTarget"
)
// HorizontalRunnerAutoscalerGitHubWebhook autoscales a HorizontalRunnerAutoscaler and the RunnerDeployment on each
// GitHub Webhook received
type HorizontalRunnerAutoscalerGitHubWebhook struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
// SecretKeyBytes is the byte representation of the Webhook secret token
// the administrator is generated and specified in GitHub Web UI.
SecretKeyBytes []byte
// WatchNamespace is the namespace to watch for HorizontalRunnerAutoscaler's to be
// scaled on Webhook.
// Set to empty for letting it watch for all namespaces.
WatchNamespace string
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
return ctrl.Result{}, nil
}
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/finalizers,verbs=get;list;watch;create;update;patch;delete
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Handle(w http.ResponseWriter, r *http.Request) {
var (
ok bool
err error
)
defer func() {
if !ok {
w.WriteHeader(http.StatusInternalServerError)
if err != nil {
msg := err.Error()
if written, err := w.Write([]byte(msg)); err != nil {
autoscaler.Log.Error(err, "failed writing http error response", "msg", msg, "written", written)
}
}
}
}()
defer func() {
if r.Body != nil {
r.Body.Close()
}
}()
var payload []byte
if len(autoscaler.SecretKeyBytes) > 0 {
payload, err = gogithub.ValidatePayload(r, autoscaler.SecretKeyBytes)
if err != nil {
autoscaler.Log.Error(err, "error validating request body")
return
}
} else {
payload, err = ioutil.ReadAll(r.Body)
if err != nil {
autoscaler.Log.Error(err, "error reading request body")
return
}
}
webhookType := gogithub.WebHookType(r)
event, err := gogithub.ParseWebHook(webhookType, payload)
if err != nil {
var s string
if payload != nil {
s = string(payload)
}
autoscaler.Log.Error(err, "could not parse webhook", "webhookType", webhookType, "payload", s)
return
}
var target *ScaleTarget
autoscaler.Log.Info("processing webhook event", "eventType", webhookType)
switch e := event.(type) {
case *gogithub.PushEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
*e.Repo.Name,
*e.Repo.Organization,
autoscaler.MatchPushEvent(e),
)
case *gogithub.PullRequestEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
*e.Repo.Name,
*e.Repo.Organization.Name,
autoscaler.MatchPullRequestEvent(e),
)
case *gogithub.CheckRunEvent:
target, err = autoscaler.getScaleUpTarget(
context.TODO(),
*e.Repo.Name,
*e.Org.Name,
autoscaler.MatchCheckRunEvent(e),
)
case *gogithub.PingEvent:
ok = true
w.WriteHeader(http.StatusOK)
msg := "pong"
if written, err := w.Write([]byte(msg)); err != nil {
autoscaler.Log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
autoscaler.Log.Info("received ping event")
return
default:
autoscaler.Log.Info("unknown event type", "eventType", webhookType)
return
}
if err != nil {
autoscaler.Log.Error(err, "handling check_run event")
return
}
if target == nil {
msg := "no horizontalrunnerautoscaler to scale for this github event"
autoscaler.Log.Info(msg, "eventType", webhookType)
ok = true
w.WriteHeader(http.StatusOK)
if written, err := w.Write([]byte(msg)); err != nil {
autoscaler.Log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
return
}
if err := autoscaler.tryScaleUp(context.TODO(), target); err != nil {
autoscaler.Log.Error(err, "could not scale up")
return
}
ok = true
w.WriteHeader(http.StatusOK)
msg := fmt.Sprintf("scaled %s by 1", target.Name)
autoscaler.Log.Info(msg)
if written, err := w.Write([]byte(msg)); err != nil {
autoscaler.Log.Error(err, "failed writing http response", "msg", msg, "written", written)
}
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) findHRAsByKey(ctx context.Context, value string) ([]v1alpha1.HorizontalRunnerAutoscaler, error) {
ns := autoscaler.WatchNamespace
var defaultListOpts []client.ListOption
if ns != "" {
defaultListOpts = append(defaultListOpts, client.InNamespace(ns))
}
var hras []v1alpha1.HorizontalRunnerAutoscaler
if value != "" {
opts := append([]client.ListOption{}, defaultListOpts...)
opts = append(opts, client.MatchingFields{scaleTargetKey: value})
var hraList v1alpha1.HorizontalRunnerAutoscalerList
if err := autoscaler.List(ctx, &hraList, opts...); err != nil {
return nil, err
}
for _, d := range hraList.Items {
hras = append(hras, d)
}
}
return hras, nil
}
func matchTriggerConditionAgainstEvent(types []string, eventAction *string) bool {
if len(types) == 0 {
return true
}
if eventAction == nil {
return false
}
for _, tpe := range types {
if tpe == *eventAction {
return true
}
}
return false
}
type ScaleTarget struct {
v1alpha1.HorizontalRunnerAutoscaler
v1alpha1.ScaleUpTrigger
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) searchScaleTargets(hras []v1alpha1.HorizontalRunnerAutoscaler, f func(v1alpha1.ScaleUpTrigger) bool) []ScaleTarget {
var matched []ScaleTarget
for _, hra := range hras {
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
continue
}
for _, scaleUpTrigger := range hra.Spec.ScaleUpTriggers {
if !f(scaleUpTrigger) {
continue
}
matched = append(matched, ScaleTarget{
HorizontalRunnerAutoscaler: hra,
ScaleUpTrigger: scaleUpTrigger,
})
}
}
return matched
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleTarget(ctx context.Context, name string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
hras, err := autoscaler.findHRAsByKey(ctx, name)
if err != nil {
return nil, err
}
targets := autoscaler.searchScaleTargets(hras, f)
if len(targets) != 1 {
return nil, nil
}
return &targets[0], nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) getScaleUpTarget(ctx context.Context, repoNameFromWebhook, orgNameFromWebhook string, f func(v1alpha1.ScaleUpTrigger) bool) (*ScaleTarget, error) {
if target, err := autoscaler.getScaleTarget(ctx, repoNameFromWebhook, f); err != nil {
return nil, err
} else if target != nil {
autoscaler.Log.Info("scale up target is repository-wide runners", "repository", repoNameFromWebhook)
return target, nil
}
if target, err := autoscaler.getScaleTarget(ctx, orgNameFromWebhook, f); err != nil {
return nil, err
} else if target != nil {
autoscaler.Log.Info("scale up target is organizational runners", "repository", orgNameFromWebhook)
return target, nil
}
return nil, nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) tryScaleUp(ctx context.Context, target *ScaleTarget) error {
if target == nil {
return nil
}
log := autoscaler.Log.WithValues("horizontalrunnerautoscaler", target.HorizontalRunnerAutoscaler.Name)
copy := target.HorizontalRunnerAutoscaler.DeepCopy()
amount := 1
if target.ScaleUpTrigger.Amount > 0 {
amount = target.ScaleUpTrigger.Amount
}
copy.Spec.CapacityReservations = append(copy.Spec.CapacityReservations, v1alpha1.CapacityReservation{
ExpirationTime: metav1.Time{Time: time.Now().Add(target.ScaleUpTrigger.Duration.Duration)},
Replicas: amount,
})
if err := autoscaler.Client.Update(ctx, copy); err != nil {
log.Error(err, "Failed to update horizontalrunnerautoscaler resource")
return err
}
return nil
}
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr ctrl.Manager) error {
autoscaler.Recorder = mgr.GetEventRecorderFor("webhookbasedautoscaler")
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
if hra.Spec.ScaleTargetRef.Name == "" {
return nil
}
var rd v1alpha1.RunnerDeployment
if err := autoscaler.Client.Get(context.Background(), types.NamespacedName{Namespace: hra.Namespace, Name: hra.Spec.ScaleTargetRef.Name}, &rd); err != nil {
return nil
}
return []string{rd.Spec.Template.Spec.Repository, rd.Spec.Template.Spec.Organization}
}); err != nil {
return err
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.HorizontalRunnerAutoscaler{}).
Complete(autoscaler)
}

View File

@@ -0,0 +1,32 @@
package controllers
import (
"github.com/google/go-github/v33/github"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchCheckRunEvent(event *github.CheckRunEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
cr := g.CheckRun
if cr == nil {
return false
}
if !matchTriggerConditionAgainstEvent(cr.Types, event.Action) {
return false
}
if cr.Status != "" && (event.CheckRun == nil || event.CheckRun.Status == nil || *event.CheckRun.Status != cr.Status) {
return false
}
return true
}
}

View File

@@ -0,0 +1,32 @@
package controllers
import (
"github.com/google/go-github/v33/github"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPullRequestEvent(event *github.PullRequestEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
pr := g.PullRequest
if pr == nil {
return false
}
if !matchTriggerConditionAgainstEvent(pr.Types, event.Action) {
return false
}
if !matchTriggerConditionAgainstEvent(pr.Branches, event.PullRequest.Base.Ref) {
return false
}
return true
}
}

View File

@@ -0,0 +1,24 @@
package controllers
import (
"github.com/google/go-github/v33/github"
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
)
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) MatchPushEvent(event *github.PushEvent) func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
return func(scaleUpTrigger v1alpha1.ScaleUpTrigger) bool {
g := scaleUpTrigger.GitHubEvent
if g == nil {
return false
}
push := g.Push
if push == nil {
return false
}
return true
}
}

View File

@@ -0,0 +1,245 @@
package controllers
import (
"bytes"
"encoding/json"
"fmt"
"github.com/go-logr/logr"
"github.com/google/go-github/v33/github"
actionsv1alpha1 "github.com/summerwind/actions-runner-controller/api/v1alpha1"
"io"
"io/ioutil"
"k8s.io/apimachinery/pkg/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"net/http"
"net/http/httptest"
"net/url"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"testing"
)
var (
sc = runtime.NewScheme()
)
func init() {
_ = clientgoscheme.AddToScheme(sc)
_ = actionsv1alpha1.AddToScheme(sc)
}
func TestWebhookCheckRun(t *testing.T) {
testServer(t,
"check_run",
&github.CheckRunEvent{
CheckRun: &github.CheckRun{
Status: github.String("queued"),
},
Repo: &github.Repository{
Name: github.String("myorg/myrepo"),
},
Org: &github.Organization{
Name: github.String("myorg"),
},
Action: github.String("created"),
},
200,
"no horizontalrunnerautoscaler to scale for this github event",
)
}
func TestWebhookPullRequest(t *testing.T) {
testServer(t,
"pull_request",
&github.PullRequestEvent{
PullRequest: &github.PullRequest{
Base: &github.PullRequestBranch{
Ref: github.String("main"),
},
},
Repo: &github.Repository{
Name: github.String("myorg/myrepo"),
Organization: &github.Organization{
Name: github.String("myorg"),
},
},
Action: github.String("created"),
},
200,
"no horizontalrunnerautoscaler to scale for this github event",
)
}
func TestWebhookPush(t *testing.T) {
testServer(t,
"push",
&github.PushEvent{
Repo: &github.PushEventRepository{
Name: github.String("myrepo"),
Organization: github.String("myorg"),
},
},
200,
"no horizontalrunnerautoscaler to scale for this github event",
)
}
func TestWebhookPing(t *testing.T) {
testServer(t,
"ping",
&github.PingEvent{
Zen: github.String("zen"),
},
200,
"pong",
)
}
func installTestLogger(webhook *HorizontalRunnerAutoscalerGitHubWebhook) *bytes.Buffer {
logs := &bytes.Buffer{}
log := testLogger{
name: "testlog",
writer: logs,
}
webhook.Log = &log
return logs
}
func testServer(t *testing.T, eventType string, event interface{}, wantCode int, wantBody string) {
t.Helper()
hraWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{}
var initObjs []runtime.Object
client := fake.NewFakeClientWithScheme(sc, initObjs...)
logs := installTestLogger(hraWebhook)
defer func() {
if t.Failed() {
t.Logf("diagnostics: %s", logs.String())
}
}()
hraWebhook.Client = client
mux := http.NewServeMux()
mux.HandleFunc("/", hraWebhook.Handle)
server := httptest.NewServer(mux)
defer server.Close()
resp, err := sendWebhook(server, eventType, event)
if err != nil {
t.Fatal(err)
}
defer func() {
if resp != nil {
resp.Body.Close()
}
}()
if resp.StatusCode != wantCode {
t.Error("status:", resp.StatusCode)
}
respBody, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Fatal(err)
}
if string(respBody) != wantBody {
t.Fatal("body:", string(respBody))
}
}
func sendWebhook(server *httptest.Server, eventType string, event interface{}) (*http.Response, error) {
jsonBuf := &bytes.Buffer{}
enc := json.NewEncoder(jsonBuf)
enc.SetIndent(" ", "")
err := enc.Encode(event)
if err != nil {
return nil, fmt.Errorf("[bug in test] encoding event to json: %+v", err)
}
reqBody := jsonBuf.Bytes()
u, err := url.Parse(server.URL)
if err != nil {
return nil, fmt.Errorf("parsing server url: %v", err)
}
req := &http.Request{
Method: http.MethodPost,
URL: u,
Header: map[string][]string{
"X-GitHub-Event": {eventType},
"Content-Type": {"application/json"},
},
Body: ioutil.NopCloser(bytes.NewBuffer(reqBody)),
}
return http.DefaultClient.Do(req)
}
// testLogger is a sample logr.Logger that logs in-memory.
// It's only for testing log outputs.
type testLogger struct {
name string
keyValues map[string]interface{}
writer io.Writer
}
var _ logr.Logger = &testLogger{}
func (l *testLogger) Info(msg string, kvs ...interface{}) {
fmt.Fprintf(l.writer, "%s] %s\t", l.name, msg)
for k, v := range l.keyValues {
fmt.Fprintf(l.writer, "%s=%+v ", k, v)
}
for i := 0; i < len(kvs); i += 2 {
fmt.Fprintf(l.writer, "%s=%+v ", kvs[i], kvs[i+1])
}
fmt.Fprintf(l.writer, "\n")
}
func (_ *testLogger) Enabled() bool {
return true
}
func (l *testLogger) Error(err error, msg string, kvs ...interface{}) {
kvs = append(kvs, "error", err)
l.Info(msg, kvs...)
}
func (l *testLogger) V(_ int) logr.InfoLogger {
return l
}
func (l *testLogger) WithName(name string) logr.Logger {
return &testLogger{
name: l.name + "." + name,
keyValues: l.keyValues,
writer: l.writer,
}
}
func (l *testLogger) WithValues(kvs ...interface{}) logr.Logger {
newMap := make(map[string]interface{}, len(l.keyValues)+len(kvs)/2)
for k, v := range l.keyValues {
newMap[k] = v
}
for i := 0; i < len(kvs); i += 2 {
newMap[kvs[i].(string)] = kvs[i+1]
}
return &testLogger{
name: l.name,
keyValues: newMap,
writer: l.writer,
}
}

View File

@@ -46,6 +46,8 @@ type HorizontalRunnerAutoscalerReconciler struct {
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
CacheDuration time.Duration
}
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerdeployments,verbs=get;list;watch;update;patch
@@ -79,13 +81,23 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
return ctrl.Result{}, nil
}
replicas, err := r.computeReplicas(rd, hra)
if err != nil {
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
var replicas *int
log.Error(err, "Could not compute replicas")
replicasFromCache := r.getDesiredReplicasFromCache(hra)
return ctrl.Result{}, err
if replicasFromCache != nil {
replicas = replicasFromCache
} else {
var err error
replicas, err = r.computeReplicas(rd, hra)
if err != nil {
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
log.Error(err, "Could not compute replicas")
return ctrl.Result{}, err
}
}
const defaultReplicas = 1
@@ -93,6 +105,18 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
currentDesiredReplicas := getIntOrDefault(rd.Spec.Replicas, defaultReplicas)
newDesiredReplicas := getIntOrDefault(replicas, defaultReplicas)
now := time.Now()
for _, reservation := range hra.Spec.CapacityReservations {
if reservation.ExpirationTime.Time.After(now) {
newDesiredReplicas += reservation.Replicas
}
}
if hra.Spec.MaxReplicas != nil && *hra.Spec.MaxReplicas < newDesiredReplicas {
newDesiredReplicas = *hra.Spec.MaxReplicas
}
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
if currentDesiredReplicas != newDesiredReplicas {
copy := rd.DeepCopy()
@@ -103,12 +127,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
return ctrl.Result{}, err
}
return ctrl.Result{}, err
}
var updated *v1alpha1.HorizontalRunnerAutoscaler
if hra.Status.DesiredReplicas == nil || *hra.Status.DesiredReplicas != *replicas {
updated := hra.DeepCopy()
updated = hra.DeepCopy()
if (hra.Status.DesiredReplicas == nil && *replicas > 1) ||
(hra.Status.DesiredReplicas != nil && *replicas > *hra.Status.DesiredReplicas) {
@@ -117,7 +141,37 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl
}
updated.Status.DesiredReplicas = replicas
}
if replicasFromCache == nil {
if updated == nil {
updated = hra.DeepCopy()
}
var cacheEntries []v1alpha1.CacheEntry
for _, ent := range updated.Status.CacheEntries {
if ent.ExpirationTime.Before(&metav1.Time{Time: now}) {
cacheEntries = append(cacheEntries, ent)
}
}
var cacheDuration time.Duration
if r.CacheDuration > 0 {
cacheDuration = r.CacheDuration
} else {
cacheDuration = 10 * time.Minute
}
updated.Status.CacheEntries = append(updated.Status.CacheEntries, v1alpha1.CacheEntry{
Key: v1alpha1.CacheEntryKeyDesiredReplicas,
Value: *replicas,
ExpirationTime: metav1.Time{Time: time.Now().Add(cacheDuration)},
})
}
if updated != nil {
if err := r.Status().Update(ctx, updated); err != nil {
log.Error(err, "Failed to update horizontalrunnerautoscaler status")

View File

@@ -2,6 +2,11 @@ package controllers
import (
"context"
"github.com/google/go-github/v33/github"
github3 "github.com/google/go-github/v33/github"
github2 "github.com/summerwind/actions-runner-controller/github"
"net/http"
"net/http/httptest"
"time"
"github.com/summerwind/actions-runner-controller/github/fake"
@@ -30,6 +35,12 @@ var (
workflowRunsFor1Replicas = `{"total_count": 6, "workflow_runs":[{"status":"queued"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}, {"status":"completed"}]}"`
)
var webhookServer *httptest.Server
var ghClient *github2.Client
var fakeRunnerList *fake.RunnersList
// SetupIntegrationTest will set up a testing environment.
// This includes:
// * creating a Namespace to be used during the test
@@ -41,10 +52,13 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
ns := &corev1.Namespace{}
responses := &fake.FixedResponses{}
responses.ListRunners = fake.DefaultListRunnersHandler()
responses.ListRepositoryWorkflowRuns = &fake.Handler{
Status: 200,
Body: workflowRunsFor3Replicas,
}
fakeRunnerList = fake.NewRunnersList()
responses.ListRunners = fakeRunnerList.HandleList()
fakeGithubServer := fake.NewServer(fake.WithFixedResponses(responses))
BeforeEach(func() {
@@ -59,9 +73,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
mgr, err := ctrl.NewManager(cfg, ctrl.Options{})
Expect(err).NotTo(HaveOccurred(), "failed to create manager")
runnersList = fake.NewRunnersList()
server = runnersList.GetServer()
ghClient := newGithubClient(server)
ghClient = newGithubClient(fakeGithubServer)
replicasetController := &RunnerReplicaSetReconciler{
Client: mgr.GetClient(),
@@ -85,15 +97,30 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
client := newGithubClient(fakeGithubServer)
autoscalerController := &HorizontalRunnerAutoscalerReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
GitHubClient: client,
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
GitHubClient: client,
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
CacheDuration: 1 * time.Second,
}
err = autoscalerController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
autoscalerWebhook := &HorizontalRunnerAutoscalerGitHubWebhook{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
}
err = autoscalerWebhook.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup autoscaler webhook")
mux := http.NewServeMux()
mux.HandleFunc("/", autoscalerWebhook.Handle)
webhookServer = httptest.NewServer(mux)
go func() {
defer GinkgoRecover()
@@ -106,6 +133,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
close(stopCh)
fakeGithubServer.Close()
webhookServer.Close()
err := k8sClient.Delete(ctx, ns)
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
@@ -114,7 +142,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
return &testEnvironment{Namespace: ns, Responses: responses}
}
var _ = Context("Inside of a new namespace", func() {
var _ = Context("INTEGRATION: Inside of a new namespace", func() {
ctx := context.TODO()
env := SetupIntegrationTest(ctx)
ns := env.Namespace
@@ -235,8 +263,20 @@ var _ = Context("Inside of a new namespace", func() {
},
MinReplicas: intPtr(1),
MaxReplicas: intPtr(3),
ScaleDownDelaySecondsAfterScaleUp: nil,
ScaleDownDelaySecondsAfterScaleUp: intPtr(1),
Metrics: nil,
ScaleUpTriggers: []actionsv1alpha1.ScaleUpTrigger{
{
GitHubEvent: &actionsv1alpha1.GitHubEventScaleUpTriggerSpec{
PullRequest: &actionsv1alpha1.PullRequestSpec{
Types: []string{"created"},
Branches: []string{"main"},
},
},
Amount: 1,
Duration: metav1.Duration{Duration: time.Minute},
},
},
},
}
@@ -274,8 +314,33 @@ var _ = Context("Inside of a new namespace", func() {
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(3))
}
{
var runnerList actionsv1alpha1.RunnerList
err := k8sClient.List(ctx, &runnerList, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runners")
}
for i, r := range runnerList.Items {
fakeRunnerList.Add(&github3.Runner{
ID: github.Int64(int64(i)),
Name: github.String(r.Name),
OS: github.String("linux"),
Status: github.String("online"),
Busy: github.Bool(false),
})
}
rs, err := ghClient.ListRunners(context.Background(), "", "", "test/valid")
Expect(err).NotTo(HaveOccurred(), "verifying list fake runners response")
Expect(len(rs)).To(Equal(3), "count of fake list runners")
}
// Scale-down to 1 replica
{
time.Sleep(time.Second)
responses.ListRepositoryWorkflowRuns.Body = workflowRunsFor1Replicas
var hra actionsv1alpha1.HorizontalRunnerAutoscaler
@@ -308,7 +373,60 @@ var _ = Context("Inside of a new namespace", func() {
return *runnerSets.Items[0].Spec.Replicas
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1))
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1), "runners after HRA force update for scale-down")
}
{
resp, err := sendWebhook(webhookServer, "pull_request", &github.PullRequestEvent{
PullRequest: &github.PullRequest{
Base: &github.PullRequestBranch{
Ref: github.String("main"),
},
},
Repo: &github.Repository{
Name: github.String("test/valid"),
Organization: &github.Organization{
Name: github.String("test"),
},
},
Action: github.String("created"),
})
Expect(err).NotTo(HaveOccurred(), "failed to send pull_request event")
Expect(resp.StatusCode).To(Equal(200))
}
// Scale-up to 2 replicas
{
runnerSets := actionsv1alpha1.RunnerReplicaSetList{Items: []actionsv1alpha1.RunnerReplicaSet{}}
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
return len(runnerSets.Items)
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(1), "runner sets after webhook")
Eventually(
func() int {
err := k8sClient.List(ctx, &runnerSets, client.InNamespace(ns.Name))
if err != nil {
logf.Log.Error(err, "list runner sets")
}
if len(runnerSets.Items) == 0 {
logf.Log.Info("No runnerreplicasets exist yet")
return -1
}
return *runnerSets.Items[0].Spec.Replicas
},
time.Second*5, time.Millisecond*500).Should(BeEquivalentTo(2), "runners after webhook")
}
})
})

View File

@@ -177,7 +177,7 @@ func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
rs := oldSets[i]
if err := r.Client.Delete(ctx, &rs); err != nil {
log.Error(err, "Failed to delete runner resource")
log.Error(err, "Failed to delete runnerreplicaset resource")
return ctrl.Result{}, err
}

View File

@@ -117,7 +117,7 @@ func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, e
}
for i := 0; i < n; i++ {
if err := r.Client.Delete(ctx, &notBusy[i]); err != nil {
if err := r.Client.Delete(ctx, &notBusy[i]); client.IgnoreNotFound(err) != nil {
log.Error(err, "Failed to delete runner resource")
return ctrl.Result{}, err

View File

@@ -17,6 +17,8 @@ limitations under the License.
package controllers
import (
"github.com/onsi/ginkgo/config"
"os"
"path/filepath"
"testing"
@@ -43,6 +45,8 @@ var testEnv *envtest.Environment
func TestAPIs(t *testing.T) {
RegisterFailHandler(Fail)
config.GinkgoConfig.FocusString = os.Getenv("GINKGO_FOCUS")
RunSpecsWithDefaultAndCustomReporters(t,
"Controller Suite",
[]Reporter{envtest.NewlineReporter{}})