Enhance support for multi-tenancy (#1371)

This enhances every ARC controller and the various K8s custom resources so that the user can now configure a custom GitHub API credentials (that is different from the default one configured per the ARC instance).

Ref https://github.com/actions-runner-controller/actions-runner-controller/issues/1067#issuecomment-1043716646
This commit is contained in:
Yusuke Kuoka
2022-07-12 09:45:00 +09:00
committed by GitHub
parent 18dd89c884
commit 618276e3d3
29 changed files with 783 additions and 68 deletions

View File

@@ -9,6 +9,7 @@ import (
"strings"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
"github.com/google/go-github/v45/github"
corev1 "k8s.io/api/core/v1"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -21,7 +22,7 @@ const (
defaultScaleDownFactor = 0.7
)
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler) (*int, error) {
if hra.Spec.MinReplicas == nil {
return nil, fmt.Errorf("horizontalrunnerautoscaler %s/%s is missing minReplicas", hra.Namespace, hra.Name)
} else if hra.Spec.MaxReplicas == nil {
@@ -48,9 +49,9 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
switch primaryMetricType {
case v1alpha1.AutoscalingMetricTypeTotalNumberOfQueuedAndInProgressWorkflowRuns:
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &primaryMetric)
suggested, err = r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &primaryMetric)
case v1alpha1.AutoscalingMetricTypePercentageRunnersBusy:
suggested, err = r.suggestReplicasByPercentageRunnersBusy(st, hra, primaryMetric)
suggested, err = r.suggestReplicasByPercentageRunnersBusy(ghc, st, hra, primaryMetric)
default:
return nil, fmt.Errorf("validating autoscaling metrics: unsupported metric type %q", primaryMetric)
}
@@ -83,11 +84,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestDesiredReplicas(st scaleTa
)
}
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(st, hra, &fallbackMetric)
return r.suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc, st, hra, &fallbackMetric)
}
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgressWorkflowRuns(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics *v1alpha1.MetricSpec) (*int, error) {
var repos [][]string
repoID := st.repo
if repoID == "" {
@@ -126,7 +126,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
opt := github.ListWorkflowJobsOptions{ListOptions: github.ListOptions{PerPage: 50}}
var allJobs []*github.WorkflowJob
for {
jobs, resp, err := r.GitHubClient.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
jobs, resp, err := ghc.Actions.ListWorkflowJobs(context.TODO(), user, repoName, runID, &opt)
if err != nil {
r.Log.Error(err, "Error listing workflow jobs")
return //err
@@ -184,7 +184,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
for _, repo := range repos {
user, repoName := repo[0], repo[1]
workflowRuns, err := r.GitHubClient.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
workflowRuns, err := ghc.ListRepositoryWorkflowRuns(context.TODO(), user, repoName)
if err != nil {
return nil, err
}
@@ -226,7 +226,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByQueuedAndInProgr
return &necessaryReplicas, nil
}
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunnersBusy(ghc *arcgithub.Client, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, metrics v1alpha1.MetricSpec) (*int, error) {
ctx := context.Background()
scaleUpThreshold := defaultScaleUpThreshold
scaleDownThreshold := defaultScaleDownThreshold
@@ -295,7 +295,7 @@ func (r *HorizontalRunnerAutoscalerReconciler) suggestReplicasByPercentageRunner
)
// ListRunners will return all runners managed by GitHub - not restricted to ns
runners, err := r.GitHubClient.ListRunners(
runners, err := ghc.ListRunners(
ctx,
enterprise,
organization,

View File

@@ -330,7 +330,6 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
h := &HorizontalRunnerAutoscalerReconciler{
Log: log,
GitHubClient: client,
Scheme: scheme,
DefaultScaleDownDelay: DefaultScaleDownDelay,
}
@@ -379,7 +378,7 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
st := h.scaleTargetFromRD(context.Background(), rd)
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
if err != nil {
if tc.err == "" {
t.Fatalf("unexpected error: expected none, got %v", err)
@@ -720,7 +719,6 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
h := &HorizontalRunnerAutoscalerReconciler{
Log: log,
Scheme: scheme,
GitHubClient: client,
DefaultScaleDownDelay: DefaultScaleDownDelay,
}
@@ -781,7 +779,7 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
st := h.scaleTargetFromRD(context.Background(), rd)
got, err := h.computeReplicasWithCache(log, metav1Now.Time, st, hra, minReplicas)
got, err := h.computeReplicasWithCache(client, log, metav1Now.Time, st, hra, minReplicas)
if err != nil {
if tc.err == "" {
t.Fatalf("unexpected error: expected none, got %v", err)

View File

@@ -24,7 +24,6 @@ import (
corev1 "k8s.io/api/core/v1"
"github.com/actions-runner-controller/actions-runner-controller/github"
"github.com/go-logr/logr"
kerrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
@@ -38,6 +37,7 @@ import (
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/controllers/metrics"
arcgithub "github.com/actions-runner-controller/actions-runner-controller/github"
)
const (
@@ -47,7 +47,7 @@ const (
// HorizontalRunnerAutoscalerReconciler reconciles a HorizontalRunnerAutoscaler object
type HorizontalRunnerAutoscalerReconciler struct {
client.Client
GitHubClient *github.Client
GitHubClient *MultiGitHubClient
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
@@ -73,6 +73,8 @@ func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, re
}
if !hra.ObjectMeta.DeletionTimestamp.IsZero() {
r.GitHubClient.DeinitForHRA(&hra)
return ctrl.Result{}, nil
}
@@ -310,7 +312,12 @@ func (r *HorizontalRunnerAutoscalerReconciler) reconcile(ctx context.Context, re
return ctrl.Result{}, err
}
newDesiredReplicas, err := r.computeReplicasWithCache(log, now, st, hra, minReplicas)
ghc, err := r.GitHubClient.InitForHRA(context.Background(), &hra)
if err != nil {
return ctrl.Result{}, err
}
newDesiredReplicas, err := r.computeReplicasWithCache(ghc, log, now, st, hra, minReplicas)
if err != nil {
r.Recorder.Event(&hra, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
@@ -461,10 +468,10 @@ func (r *HorizontalRunnerAutoscalerReconciler) getMinReplicas(log logr.Logger, n
return minReplicas, active, upcoming, nil
}
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
func (r *HorizontalRunnerAutoscalerReconciler) computeReplicasWithCache(ghc *arcgithub.Client, log logr.Logger, now time.Time, st scaleTarget, hra v1alpha1.HorizontalRunnerAutoscaler, minReplicas int) (int, error) {
var suggestedReplicas int
v, err := r.suggestDesiredReplicas(st, hra)
v, err := r.suggestDesiredReplicas(ghc, st, hra)
if err != nil {
return 0, err
}

View File

@@ -99,12 +99,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
return fmt.Sprintf("%s%s", ns.Name, name)
}
multiClient := NewMultiGitHubClient(mgr.GetClient(), env.ghClient)
runnerController := &RunnerReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
GitHubClient: env.ghClient,
GitHubClient: multiClient,
RunnerImage: "example/runner:test",
DockerImage: "example/docker:test",
Name: controllerName("runner"),
@@ -116,12 +118,11 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")
replicasetController := &RunnerReplicaSetReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
GitHubClient: env.ghClient,
Name: controllerName("runnerreplicaset"),
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
Name: controllerName("runnerreplicaset"),
}
err = replicasetController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup runnerreplicaset controller")
@@ -140,7 +141,7 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
GitHubClient: env.ghClient,
GitHubClient: multiClient,
Recorder: mgr.GetEventRecorderFor("horizontalrunnerautoscaler-controller"),
CacheDuration: 1 * time.Second,
Name: controllerName("horizontalrunnerautoscaler"),

View File

@@ -0,0 +1,389 @@
package controllers
import (
"context"
"crypto/sha1"
"encoding/base64"
"encoding/hex"
"fmt"
"sort"
"strconv"
"sync"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/github"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
const (
// The api creds scret annotation is added by the runner controller or the runnerset controller according to runner.spec.githubAPICredentialsFrom.secretRef.name,
// so that the runner pod controller can share the same GitHub API credentials and the instance of the GitHub API client with the upstream controllers.
annotationKeyGitHubAPICredsSecret = annotationKeyPrefix + "github-api-creds-secret"
)
type runnerOwnerRef struct {
// kind is either StatefulSet or Runner, and populated via the owner reference in the runner pod controller or via the reconcilation target's kind in
// runnerset and runner controllers.
kind string
ns, name string
}
type secretRef struct {
ns, name string
}
// savedClient is the each cache entry that contains the client for the specific set of credentials,
// like a PAT or a pair of key and cert.
// the `hash` is a part of the savedClient not the key because we are going to keep only the client for the latest creds
// in case the operator updated the k8s secret containing the credentials.
type savedClient struct {
hash string
// refs is the map of all the objects that references this client, used for reference counting to gc
// the client if unneeded.
refs map[runnerOwnerRef]struct{}
*github.Client
}
type resourceReader interface {
Get(context.Context, types.NamespacedName, client.Object) error
}
type MultiGitHubClient struct {
mu sync.Mutex
client resourceReader
githubClient *github.Client
// The saved client is freed once all its dependents disappear, or the contents of the secret changed.
// We track dependents via a golang map embedded within the savedClient struct. Each dependent is checked on their respective Kubernetes finalizer,
// so that we won't miss any dependent's termination.
// The change is the secret is determined using the hash of its contents.
clients map[secretRef]savedClient
}
func NewMultiGitHubClient(client resourceReader, githubClient *github.Client) *MultiGitHubClient {
return &MultiGitHubClient{
client: client,
githubClient: githubClient,
clients: map[secretRef]savedClient{},
}
}
// Init sets up and return the *github.Client for the object.
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
func (c *MultiGitHubClient) InitForRunnerPod(ctx context.Context, pod *corev1.Pod) (*github.Client, error) {
// These 3 default values are used only when the user created the pod directly, not via Runner, RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
ref := refFromRunnerPod(pod)
secretName := pod.Annotations[annotationKeyGitHubAPICredsSecret]
// kind can be any of Pod, Runner, RunnerReplicaSet, RunnerDeployment, or RunnerSet depending on which custom resource the user directly created.
return c.initClientWithSecretName(ctx, pod.Namespace, secretName, ref)
}
// Init sets up and return the *github.Client for the object.
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
func (c *MultiGitHubClient) InitForRunner(ctx context.Context, r *v1alpha1.Runner) (*github.Client, error) {
var secretName string
if r.Spec.GitHubAPICredentialsFrom != nil {
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
// These 3 default values are used only when the user created the runner resource directly, not via RunnerReplicaSet, RunnerDeploment, or RunnerSet resources.
ref := refFromRunner(r)
if ref.ns != r.Namespace {
return nil, fmt.Errorf("referencing github api creds secret from owner in another namespace is not supported yet")
}
// kind can be any of Runner, RunnerReplicaSet, or RunnerDeployment depending on which custom resource the user directly created.
return c.initClientWithSecretName(ctx, r.Namespace, secretName, ref)
}
// Init sets up and return the *github.Client for the object.
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
func (c *MultiGitHubClient) InitForRunnerSet(ctx context.Context, rs *v1alpha1.RunnerSet) (*github.Client, error) {
ref := refFromRunnerSet(rs)
var secretName string
if rs.Spec.GitHubAPICredentialsFrom != nil {
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
return c.initClientWithSecretName(ctx, rs.Namespace, secretName, ref)
}
// Init sets up and return the *github.Client for the object.
// In case the object (like RunnerDeployment) does not request a custom client, it returns the default client.
func (c *MultiGitHubClient) InitForHRA(ctx context.Context, hra *v1alpha1.HorizontalRunnerAutoscaler) (*github.Client, error) {
ref := refFromHorizontalRunnerAutoscaler(hra)
var secretName string
if hra.Spec.GitHubAPICredentialsFrom != nil {
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
return c.initClientWithSecretName(ctx, hra.Namespace, secretName, ref)
}
func (c *MultiGitHubClient) DeinitForRunnerPod(p *corev1.Pod) {
secretName := p.Annotations[annotationKeyGitHubAPICredsSecret]
c.derefClient(p.Namespace, secretName, refFromRunnerPod(p))
}
func (c *MultiGitHubClient) DeinitForRunner(r *v1alpha1.Runner) {
var secretName string
if r.Spec.GitHubAPICredentialsFrom != nil {
secretName = r.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
c.derefClient(r.Namespace, secretName, refFromRunner(r))
}
func (c *MultiGitHubClient) DeinitForRunnerSet(rs *v1alpha1.RunnerSet) {
var secretName string
if rs.Spec.GitHubAPICredentialsFrom != nil {
secretName = rs.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
c.derefClient(rs.Namespace, secretName, refFromRunnerSet(rs))
}
func (c *MultiGitHubClient) deinitClientForRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) {
c.derefClient(rs.Namespace, rs.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerReplicaSet(rs))
}
func (c *MultiGitHubClient) deinitClientForRunnerDeployment(rd *v1alpha1.RunnerDeployment) {
c.derefClient(rd.Namespace, rd.Spec.Template.Spec.GitHubAPICredentialsFrom.SecretRef.Name, refFromRunnerDeployment(rd))
}
func (c *MultiGitHubClient) DeinitForHRA(hra *v1alpha1.HorizontalRunnerAutoscaler) {
var secretName string
if hra.Spec.GitHubAPICredentialsFrom != nil {
secretName = hra.Spec.GitHubAPICredentialsFrom.SecretRef.Name
}
c.derefClient(hra.Namespace, secretName, refFromHorizontalRunnerAutoscaler(hra))
}
func (c *MultiGitHubClient) initClientForSecret(secret *corev1.Secret, dependent *runnerOwnerRef) (*savedClient, error) {
secRef := secretRef{
ns: secret.Namespace,
name: secret.Name,
}
cliRef := c.clients[secRef]
var ks []string
for k := range secret.Data {
ks = append(ks, k)
}
sort.SliceStable(ks, func(i, j int) bool { return ks[i] < ks[j] })
hash := sha1.New()
for _, k := range ks {
hash.Write(secret.Data[k])
}
hashStr := hex.EncodeToString(hash.Sum(nil))
if cliRef.hash != hashStr {
delete(c.clients, secRef)
conf, err := secretDataToGitHubClientConfig(secret.Data)
if err != nil {
return nil, err
}
cli, err := conf.NewClient()
if err != nil {
return nil, err
}
cliRef = savedClient{
hash: hashStr,
refs: map[runnerOwnerRef]struct{}{},
Client: cli,
}
c.clients[secRef] = cliRef
}
if dependent != nil {
c.clients[secRef].refs[*dependent] = struct{}{}
}
return &cliRef, nil
}
func (c *MultiGitHubClient) initClientWithSecretName(ctx context.Context, ns, secretName string, runRef *runnerOwnerRef) (*github.Client, error) {
c.mu.Lock()
defer c.mu.Unlock()
if secretName == "" {
return c.githubClient, nil
}
secRef := secretRef{
ns: ns,
name: secretName,
}
if _, ok := c.clients[secRef]; !ok {
c.clients[secRef] = savedClient{}
}
var sec corev1.Secret
if err := c.client.Get(ctx, types.NamespacedName{Namespace: ns, Name: secretName}, &sec); err != nil {
return nil, err
}
savedClient, err := c.initClientForSecret(&sec, runRef)
if err != nil {
return nil, err
}
return savedClient.Client, nil
}
func (c *MultiGitHubClient) derefClient(ns, secretName string, dependent *runnerOwnerRef) {
c.mu.Lock()
defer c.mu.Unlock()
secRef := secretRef{
ns: ns,
name: secretName,
}
if dependent != nil {
delete(c.clients[secRef].refs, *dependent)
}
cliRef := c.clients[secRef]
if dependent == nil || len(cliRef.refs) == 0 {
delete(c.clients, secRef)
}
}
func decodeBase64(s []byte) (string, error) {
enc := base64.RawStdEncoding
dbuf := make([]byte, enc.DecodedLen(len(s)))
n, err := enc.Decode(dbuf, []byte(s))
if err != nil {
return "", err
}
return string(dbuf[:n]), nil
}
func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, error) {
var (
conf github.Config
err error
)
conf.URL, err = decodeBase64(data["github_url"])
if err != nil {
return nil, err
}
conf.UploadURL, err = decodeBase64(data["github_upload_url"])
if err != nil {
return nil, err
}
conf.EnterpriseURL, err = decodeBase64(data["github_enterprise_url"])
if err != nil {
return nil, err
}
conf.RunnerGitHubURL, err = decodeBase64(data["github_runner_url"])
if err != nil {
return nil, err
}
conf.Token, err = decodeBase64(data["github_token"])
if err != nil {
return nil, err
}
appID, err := decodeBase64(data["github_app_id"])
if err != nil {
return nil, err
}
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
if err != nil {
return nil, err
}
instID, err := decodeBase64(data["github_app_installation_id"])
if err != nil {
return nil, err
}
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
if err != nil {
return nil, err
}
conf.AppPrivateKey, err = decodeBase64(data["github_app_private_key"])
if err != nil {
return nil, err
}
return &conf, nil
}
func refFromRunnerDeployment(rd *v1alpha1.RunnerDeployment) *runnerOwnerRef {
return &runnerOwnerRef{
kind: rd.Kind,
ns: rd.Namespace,
name: rd.Name,
}
}
func refFromRunnerReplicaSet(rs *v1alpha1.RunnerReplicaSet) *runnerOwnerRef {
return &runnerOwnerRef{
kind: rs.Kind,
ns: rs.Namespace,
name: rs.Name,
}
}
func refFromRunner(r *v1alpha1.Runner) *runnerOwnerRef {
return &runnerOwnerRef{
kind: r.Kind,
ns: r.Namespace,
name: r.Name,
}
}
func refFromRunnerPod(po *corev1.Pod) *runnerOwnerRef {
return &runnerOwnerRef{
kind: po.Kind,
ns: po.Namespace,
name: po.Name,
}
}
func refFromRunnerSet(rs *v1alpha1.RunnerSet) *runnerOwnerRef {
return &runnerOwnerRef{
kind: rs.Kind,
ns: rs.Namespace,
name: rs.Name,
}
}
func refFromHorizontalRunnerAutoscaler(hra *v1alpha1.HorizontalRunnerAutoscaler) *runnerOwnerRef {
return &runnerOwnerRef{
kind: hra.Kind,
ns: hra.Namespace,
name: hra.Name,
}
}

View File

@@ -10,7 +10,9 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
@@ -1129,13 +1131,20 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
for i := range testcases {
tc := testcases[i]
rr := &testResourceReader{
objects: map[types.NamespacedName]client.Object{},
}
multiClient := NewMultiGitHubClient(rr, &github.Client{GithubBaseURL: githubBaseURL})
t.Run(tc.description, func(t *testing.T) {
r := &RunnerReconciler{
RunnerImage: defaultRunnerImage,
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
DockerImage: defaultDockerImage,
DockerRegistryMirror: defaultDockerRegistryMirror,
GitHubClient: &github.Client{GithubBaseURL: githubBaseURL},
GitHubClient: multiClient,
Scheme: scheme,
}
got, err := r.newPod(tc.runner)

View File

@@ -6,7 +6,6 @@ import (
"net/http"
"time"
"github.com/actions-runner-controller/actions-runner-controller/github"
"github.com/go-logr/logr"
"gomodules.xyz/jsonpatch/v2"
admissionv1 "k8s.io/api/admission/v1"
@@ -29,7 +28,7 @@ type PodRunnerTokenInjector struct {
Name string
Log logr.Logger
Recorder record.EventRecorder
GitHubClient *github.Client
GitHubClient *MultiGitHubClient
decoder *admission.Decoder
}
@@ -66,7 +65,12 @@ func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Reque
return newEmptyResponse()
}
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
ghc, err := t.GitHubClient.InitForRunnerPod(ctx, &pod)
if err != nil {
return admission.Errored(http.StatusInternalServerError, err)
}
rt, err := ghc.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
if err != nil {
t.Log.Error(err, "Failed to get new registration token")
return admission.Errored(http.StatusInternalServerError, err)

View File

@@ -40,7 +40,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/github"
)
const (
@@ -64,7 +63,7 @@ type RunnerReconciler struct {
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
GitHubClient *github.Client
GitHubClient *MultiGitHubClient
RunnerImage string
RunnerImagePullSecrets []string
DockerImage string
@@ -121,6 +120,8 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
return r.processRunnerDeletion(runner, ctx, log, nil)
}
r.GitHubClient.DeinitForRunner(&runner)
return r.processRunnerDeletion(runner, ctx, log, &pod)
}
@@ -401,7 +402,12 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
log := r.Log.WithValues("runner", runner.Name)
rt, err := r.GitHubClient.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
ghc, err := r.GitHubClient.InitForRunner(ctx, &runner)
if err != nil {
return false, err
}
rt, err := ghc.GetRegistrationToken(ctx, runner.Spec.Enterprise, runner.Spec.Organization, runner.Spec.Repository, runner.Name)
if err != nil {
// An error can be a permanent, permission issue like the below:
// POST https://api.github.com/enterprises/YOUR_ENTERPRISE/actions/runners/registration-token: 403 Resource not accessible by integration []
@@ -441,6 +447,11 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
labels[k] = v
}
ghc, err := r.GitHubClient.InitForRunner(context.Background(), &runner)
if err != nil {
return corev1.Pod{}, err
}
// This implies that...
//
// (1) We recreate the runner pod whenever the runner has changes in:
@@ -464,7 +475,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
runner.ObjectMeta.Annotations,
runner.Spec,
r.GitHubClient.GithubBaseURL,
ghc.GithubBaseURL,
// Token change should trigger replacement.
// We need to include this explicitly here because
// runner.Spec does not contain the possibly updated token stored in the
@@ -542,7 +553,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
}
}
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubClient.GithubBaseURL, r.UseRunnerStatusUpdateHook)
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook)
if err != nil {
return pod, err
}
@@ -729,6 +740,9 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
// This label selector is used by default when rd.Spec.Selector is empty.
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunner, "")
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
if runnerSpec.GitHubAPICredentialsFrom != nil {
template.ObjectMeta.Annotations = CloneAndAddLabel(template.ObjectMeta.Annotations, annotationKeyGitHubAPICredsSecret, runnerSpec.GitHubAPICredentialsFrom.SecretRef.Name)
}
workDir := runnerSpec.WorkDir
if workDir == "" {

View File

@@ -32,8 +32,6 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
corev1 "k8s.io/api/core/v1"
"github.com/actions-runner-controller/actions-runner-controller/github"
)
// RunnerPodReconciler reconciles a Runner object
@@ -42,7 +40,7 @@ type RunnerPodReconciler struct {
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
GitHubClient *github.Client
GitHubClient *MultiGitHubClient
Name string
RegistrationRecheckInterval time.Duration
RegistrationRecheckJitter time.Duration
@@ -97,6 +95,11 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
}
ghc, err := r.GitHubClient.InitForRunnerPod(ctx, &runnerPod)
if err != nil {
return ctrl.Result{}, err
}
if runnerPod.ObjectMeta.DeletionTimestamp.IsZero() {
finalizers, added := addFinalizer(runnerPod.ObjectMeta.Finalizers, runnerPodFinalizerName)
@@ -148,7 +151,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
// In a standard scenario, the upstream controller, like runnerset-controller, ensures this runner to be gracefully stopped before the deletion timestamp is set.
// But for the case that the user manually deleted it for whatever reason,
// we have to ensure it to gracefully stop now.
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
updatedPod, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
if res != nil {
return *res, err
}
@@ -164,6 +167,8 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
log.V(2).Info("Removed finalizer")
r.GitHubClient.DeinitForRunnerPod(updatedPod)
return ctrl.Result{}, nil
}
@@ -202,7 +207,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
po, res, err := ensureRunnerPodRegistered(ctx, log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
po, res, err := ensureRunnerPodRegistered(ctx, log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
if res != nil {
return *res, err
}
@@ -216,7 +221,7 @@ func (r *RunnerPodReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
//
// In a standard scenario, ARC starts the unregistration process before marking the pod for deletion at all,
// so that it isn't subject to terminationGracePeriod and can safely take hours to finish it's work.
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, r.GitHubClient, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
_, res, err := tickRunnerGracefulStop(ctx, r.unregistrationRetryDelay(), log, ghc, r.Client, enterprise, org, repo, runnerPod.Name, &runnerPod)
if res != nil {
return *res, err
}

View File

@@ -32,17 +32,15 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/actions-runner-controller/actions-runner-controller/api/v1alpha1"
"github.com/actions-runner-controller/actions-runner-controller/github"
)
// RunnerReplicaSetReconciler reconciles a Runner object
type RunnerReplicaSetReconciler struct {
client.Client
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
GitHubClient *github.Client
Name string
Log logr.Logger
Recorder record.EventRecorder
Scheme *runtime.Scheme
Name string
}
const (

View File

@@ -52,15 +52,13 @@ func SetupTest(ctx2 context.Context) *corev1.Namespace {
runnersList = fake.NewRunnersList()
server = runnersList.GetServer()
ghClient := newGithubClient(server)
controller := &RunnerReplicaSetReconciler{
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
GitHubClient: ghClient,
Name: "runnerreplicaset-" + ns.Name,
Client: mgr.GetClient(),
Scheme: scheme.Scheme,
Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
Name: "runnerreplicaset-" + ns.Name,
}
err = controller.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")

View File

@@ -46,7 +46,7 @@ type RunnerSetReconciler struct {
Scheme *runtime.Scheme
CommonRunnerLabels []string
GitHubBaseURL string
GitHubClient *MultiGitHubClient
RunnerImage string
RunnerImagePullSecrets []string
DockerImage string
@@ -81,6 +81,8 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
}
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
r.GitHubClient.DeinitForRunnerSet(runnerSet)
return ctrl.Result{}, nil
}
@@ -98,7 +100,7 @@ func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
desiredStatefulSet, err := r.newStatefulSet(ctx, runnerSet)
if err != nil {
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
@@ -186,7 +188,7 @@ func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
var LabelValuePodMutation = "true"
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, r.CommonRunnerLabels...)
@@ -222,7 +224,14 @@ func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*ap
template.ObjectMeta.Labels = CloneAndAddLabel(template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, r.GitHubBaseURL, r.UseRunnerStatusUpdateHook)
ghc, err := r.GitHubClient.InitForRunnerSet(ctx, runnerSet)
if err != nil {
return nil, err
}
githubBaseURL := ghc.GithubBaseURL
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook)
if err != nil {
return nil, err
}

View File

@@ -0,0 +1,31 @@
package controllers
import (
"context"
"errors"
"reflect"
kerrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
type testResourceReader struct {
objects map[types.NamespacedName]client.Object
}
func (r *testResourceReader) Get(_ context.Context, nsName types.NamespacedName, obj client.Object) error {
ret, ok := r.objects[nsName]
if !ok {
return &kerrors.StatusError{ErrStatus: metav1.Status{Reason: metav1.StatusReasonNotFound}}
}
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
return errors.New("obj must be a pointer")
}
v.Elem().Set(reflect.ValueOf(ret).Elem())
return nil
}

View File

@@ -0,0 +1,35 @@
package controllers
import (
"context"
"testing"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func TestResourceReader(t *testing.T) {
rr := &testResourceReader{
objects: map[types.NamespacedName]client.Object{
{Namespace: "default", Name: "sec1"}: &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: "default",
Name: "sec1",
},
Data: map[string][]byte{
"foo": []byte("bar"),
},
},
},
}
var sec corev1.Secret
err := rr.Get(context.Background(), types.NamespacedName{Namespace: "default", Name: "sec1"}, &sec)
require.NoError(t, err)
require.Equal(t, []byte("bar"), sec.Data["foo"])
}