mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-12 04:26:51 +00:00
feat: RunnerSet backed by StatefulSet (#629)
* feat: RunnerSet backed by StatefulSet Unlike a runner deployment, a runner set can manage a set of stateful runners by combining a statefulset and an admission webhook that mutates statefulset-managed pods with required envvars and registration tokens. Resolves #613 Ref #612 * Upgrade controller-runtime to 0.9.0 * Bump Go to 1.16.x following controller-runtime 0.9.0 * Upgrade kubebuilder to 2.3.2 for updated etcd and apiserver following local setup * Fix startup failure due to missing LeaderElectionID * Fix the issue that any pods become unable to start once actions-runner-controller got failed after the mutating webhook has been registered * Allow force-updating statefulset * Fix runner container missing work and certs-client volume mounts and DOCKER_HOST and DOCKER_TLS_VERIFY envvars when dockerdWithinRunner=false * Fix runnerset-controller not applying statefulset.spec.template.spec changes when there were no changes in runnerset spec * Enable running acceptance tests against arbitrary kind cluster * RunnerSet supports non-ephemeral runners only today * fix: docker-build from root Makefile on intel mac * fix: arch check fixes for mac and ARM * ci: aligning test data format and patching checks * fix: removing namespace in test data * chore: adding more ignores * chore: removing leading space in shebang * Re-add metrics to org hra testdata * Bump cert-manager to v1.1.1 and fix deploy.sh Co-authored-by: toast-gear <15716903+toast-gear@users.noreply.github.com> Co-authored-by: Callum James Tait <callum.tait@photobox.com>
This commit is contained in:
@@ -203,7 +203,9 @@ func TestDetermineDesiredReplicas_RepositoryRunner(t *testing.T) {
|
||||
Spec: v1alpha1.RunnerDeploymentSpec{
|
||||
Template: v1alpha1.RunnerTemplate{
|
||||
Spec: v1alpha1.RunnerSpec{
|
||||
Repository: tc.repo,
|
||||
RunnerConfig: v1alpha1.RunnerConfig{
|
||||
Repository: tc.repo,
|
||||
},
|
||||
},
|
||||
},
|
||||
Replicas: tc.fixed,
|
||||
@@ -458,7 +460,9 @@ func TestDetermineDesiredReplicas_OrganizationalRunner(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.RunnerSpec{
|
||||
Organization: tc.org,
|
||||
RunnerConfig: v1alpha1.RunnerConfig{
|
||||
Organization: tc.org,
|
||||
},
|
||||
},
|
||||
},
|
||||
Replicas: tc.fixed,
|
||||
|
||||
@@ -61,7 +61,7 @@ type HorizontalRunnerAutoscalerGitHubWebhook struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(request reconcile.Request) (reconcile.Result, error) {
|
||||
func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) Reconcile(_ context.Context, request reconcile.Request) (reconcile.Result, error) {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -443,7 +443,7 @@ func (autoscaler *HorizontalRunnerAutoscalerGitHubWebhook) SetupWithManager(mgr
|
||||
|
||||
autoscaler.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj runtime.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.HorizontalRunnerAutoscaler{}, scaleTargetKey, func(rawObj client.Object) []string {
|
||||
hra := rawObj.(*v1alpha1.HorizontalRunnerAutoscaler)
|
||||
|
||||
if hra.Spec.ScaleTargetRef.Name == "" {
|
||||
|
||||
@@ -63,8 +63,7 @@ const defaultReplicas = 1
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=horizontalrunnerautoscalers/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *HorizontalRunnerAutoscalerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("horizontalrunnerautoscaler", req.NamespacedName)
|
||||
|
||||
var hra v1alpha1.HorizontalRunnerAutoscaler
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/google/go-github/v33/github"
|
||||
github2 "github.com/summerwind/actions-runner-controller/github"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/github/fake"
|
||||
|
||||
@@ -52,8 +51,9 @@ var (
|
||||
// * starting all the reconcilers
|
||||
// * stopping all the reconcilers after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
var stopCh chan struct{}
|
||||
func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
env := &testEnvironment{
|
||||
@@ -63,7 +63,7 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -166,13 +166,13 @@ func SetupIntegrationTest(ctx context.Context) *testEnvironment {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
env.fakeGithubServer.Close()
|
||||
env.webhookServer.Close()
|
||||
@@ -214,11 +214,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Organization: "test",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Organization: "test",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -301,11 +305,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -432,11 +440,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -537,11 +549,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -631,11 +647,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -764,11 +784,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -857,11 +881,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -956,11 +984,15 @@ var _ = Context("INTEGRATION: Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Group: "baz",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -1175,7 +1207,7 @@ func (env *testEnvironment) SyncRunnerRegistrations() {
|
||||
env.fakeRunnerList.Sync(runnerList.Items)
|
||||
}
|
||||
|
||||
func ExpectCreate(ctx context.Context, rd runtime.Object, s string) {
|
||||
func ExpectCreate(ctx context.Context, rd client.Object, s string) {
|
||||
err := k8sClient.Create(ctx, rd)
|
||||
|
||||
ExpectWithOffset(1, err).NotTo(HaveOccurred(), fmt.Sprintf("failed to create %s resource", s))
|
||||
|
||||
37
controllers/metrics/runnerset.go
Normal file
37
controllers/metrics/runnerset.go
Normal file
@@ -0,0 +1,37 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
)
|
||||
|
||||
const (
|
||||
rsName = "runnerset"
|
||||
rsNamespace = "namespace"
|
||||
)
|
||||
|
||||
var (
|
||||
runnerSetMetrics = []prometheus.Collector{
|
||||
runnerSetReplicas,
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
runnerSetReplicas = prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "runnerset_spec_replicas",
|
||||
Help: "replicas of RunnerSet",
|
||||
},
|
||||
[]string{rsName, rsNamespace},
|
||||
)
|
||||
)
|
||||
|
||||
func SetRunnerSet(rd v1alpha1.RunnerSet) {
|
||||
labels := prometheus.Labels{
|
||||
rsName: rd.Name,
|
||||
rsNamespace: rd.Namespace,
|
||||
}
|
||||
if rd.Spec.Replicas != nil {
|
||||
runnerSetReplicas.With(labels).Set(float64(*rd.Spec.Replicas))
|
||||
}
|
||||
}
|
||||
128
controllers/pod_runner_token_injector.go
Normal file
128
controllers/pod_runner_token_injector.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/summerwind/actions-runner-controller/github"
|
||||
"gomodules.xyz/jsonpatch/v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/webhook/admission"
|
||||
)
|
||||
|
||||
// +kubebuilder:webhook:path=/mutate-runner-set-pod,mutating=true,failurePolicy=ignore,groups="",resources=pods,verbs=create,versions=v1,name=mutate-runner-pod.webhook.actions.summerwind.dev,sideEffects=None,admissionReviewVersions=v1beta1
|
||||
|
||||
type PodRunnerTokenInjector struct {
|
||||
client.Client
|
||||
|
||||
Name string
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
GitHubClient *github.Client
|
||||
decoder *admission.Decoder
|
||||
}
|
||||
|
||||
func (t *PodRunnerTokenInjector) Handle(ctx context.Context, req admission.Request) admission.Response {
|
||||
var pod corev1.Pod
|
||||
err := t.decoder.Decode(req, &pod)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to decode request object")
|
||||
return admission.Errored(http.StatusBadRequest, err)
|
||||
}
|
||||
|
||||
if pod.Annotations == nil {
|
||||
pod.Annotations = map[string]string{}
|
||||
}
|
||||
|
||||
var runnerContainer *corev1.Container
|
||||
|
||||
for i := range pod.Spec.Containers {
|
||||
c := pod.Spec.Containers[i]
|
||||
|
||||
if c.Name == "runner" {
|
||||
runnerContainer = &c
|
||||
}
|
||||
}
|
||||
|
||||
if runnerContainer == nil {
|
||||
return newEmptyResponse()
|
||||
}
|
||||
|
||||
enterprise, okEnterprise := getEnv(runnerContainer, "RUNNER_ENTERPRISE")
|
||||
repo, okRepo := getEnv(runnerContainer, "RUNNER_REPO")
|
||||
org, okOrg := getEnv(runnerContainer, "RUNNER_ORG")
|
||||
if !okRepo || !okOrg || !okEnterprise {
|
||||
return newEmptyResponse()
|
||||
}
|
||||
|
||||
rt, err := t.GitHubClient.GetRegistrationToken(context.Background(), enterprise, org, repo, pod.Name)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to get new registration token")
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
ts := rt.GetExpiresAt().Format(time.RFC3339)
|
||||
|
||||
updated := mutatePod(&pod, *rt.Token)
|
||||
|
||||
updated.Annotations["actions-runner-controller/token-expires-at"] = ts
|
||||
|
||||
if pod.Spec.RestartPolicy != corev1.RestartPolicyOnFailure {
|
||||
updated.Spec.RestartPolicy = corev1.RestartPolicyOnFailure
|
||||
}
|
||||
|
||||
buf, err := json.Marshal(updated)
|
||||
if err != nil {
|
||||
t.Log.Error(err, "Failed to encode new object")
|
||||
return admission.Errored(http.StatusInternalServerError, err)
|
||||
}
|
||||
|
||||
res := admission.PatchResponseFromRaw(req.Object.Raw, buf)
|
||||
return res
|
||||
}
|
||||
|
||||
func getEnv(container *corev1.Container, key string) (string, bool) {
|
||||
for _, env := range container.Env {
|
||||
if env.Name == key {
|
||||
return env.Value, true
|
||||
}
|
||||
}
|
||||
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (t *PodRunnerTokenInjector) InjectDecoder(d *admission.Decoder) error {
|
||||
t.decoder = d
|
||||
return nil
|
||||
}
|
||||
|
||||
func newEmptyResponse() admission.Response {
|
||||
pt := admissionv1.PatchTypeJSONPatch
|
||||
return admission.Response{
|
||||
Patches: []jsonpatch.Operation{},
|
||||
AdmissionResponse: admissionv1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
PatchType: &pt,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r *PodRunnerTokenInjector) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "pod-runner-token-injector"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
mgr.GetWebhookServer().Register("/mutate-runner-set-pod", &admission.Webhook{Handler: r})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -74,8 +74,7 @@ type RunnerReconciler struct {
|
||||
// +kubebuilder:rbac:groups=core,resources=pods/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runner", req.NamespacedName)
|
||||
|
||||
var runner v1alpha1.Runner
|
||||
@@ -563,89 +562,11 @@ func (r *RunnerReconciler) updateRegistrationToken(ctx context.Context, runner v
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runner.Spec.DockerdWithinRunnerContainer != nil && *runner.Spec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runner.Spec.DockerEnabled == nil || *runner.Spec.DockerEnabled
|
||||
ephemeral bool = runner.Spec.Ephemeral == nil || *runner.Spec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
)
|
||||
|
||||
runnerImage := runner.Spec.Image
|
||||
if runnerImage == "" {
|
||||
runnerImage = r.RunnerImage
|
||||
}
|
||||
|
||||
workDir := runner.Spec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
|
||||
runnerImagePullPolicy := runner.Spec.ImagePullPolicy
|
||||
if runnerImagePullPolicy == "" {
|
||||
runnerImagePullPolicy = corev1.PullAlways
|
||||
}
|
||||
|
||||
env := []corev1.EnvVar{
|
||||
{
|
||||
Name: "RUNNER_NAME",
|
||||
Value: runner.Name,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_ORG",
|
||||
Value: runner.Spec.Organization,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_REPO",
|
||||
Value: runner.Spec.Repository,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_ENTERPRISE",
|
||||
Value: runner.Spec.Enterprise,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_LABELS",
|
||||
Value: strings.Join(runner.Spec.Labels, ","),
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_GROUP",
|
||||
Value: runner.Spec.Group,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_TOKEN",
|
||||
Value: runner.Status.Registration.Token,
|
||||
},
|
||||
{
|
||||
Name: "DOCKERD_IN_RUNNER",
|
||||
Value: fmt.Sprintf("%v", dockerdInRunner),
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_URL",
|
||||
Value: r.GitHubClient.GithubBaseURL,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_WORKDIR",
|
||||
Value: workDir,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_EPHEMERAL",
|
||||
Value: fmt.Sprintf("%v", ephemeral),
|
||||
},
|
||||
}
|
||||
|
||||
if metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly) {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RUNNER_REGISTRATION_ONLY",
|
||||
Value: "true",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
env = append(env, runner.Spec.Env...)
|
||||
var template corev1.Pod
|
||||
|
||||
labels := map[string]string{}
|
||||
|
||||
for k, v := range runner.Labels {
|
||||
for k, v := range runner.ObjectMeta.Labels {
|
||||
labels[k] = v
|
||||
}
|
||||
|
||||
@@ -669,61 +590,275 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
//
|
||||
// See https://github.com/summerwind/actions-runner-controller/issues/143 for more context.
|
||||
labels[LabelKeyPodTemplateHash] = hash.FNVHashStringObjects(
|
||||
filterLabels(runner.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.Annotations,
|
||||
filterLabels(runner.ObjectMeta.Labels, LabelKeyRunnerTemplateHash),
|
||||
runner.ObjectMeta.Annotations,
|
||||
runner.Spec,
|
||||
r.GitHubClient.GithubBaseURL,
|
||||
)
|
||||
|
||||
objectMeta := metav1.ObjectMeta{
|
||||
Name: runner.ObjectMeta.Name,
|
||||
Namespace: runner.ObjectMeta.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: runner.ObjectMeta.Annotations,
|
||||
}
|
||||
|
||||
template.ObjectMeta = objectMeta
|
||||
|
||||
if len(runner.Spec.Containers) == 0 {
|
||||
template.Spec.Containers = append(template.Spec.Containers, corev1.Container{
|
||||
Name: "runner",
|
||||
ImagePullPolicy: runner.Spec.ImagePullPolicy,
|
||||
EnvFrom: runner.Spec.EnvFrom,
|
||||
Env: runner.Spec.Env,
|
||||
Resources: runner.Spec.Resources,
|
||||
}, corev1.Container{
|
||||
Name: "docker",
|
||||
VolumeMounts: runner.Spec.DockerVolumeMounts,
|
||||
Resources: runner.Spec.DockerdContainerResources,
|
||||
})
|
||||
} else {
|
||||
template.Spec.Containers = runner.Spec.Containers
|
||||
}
|
||||
|
||||
template.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||
|
||||
registrationOnly := metav1.HasAnnotation(runner.ObjectMeta, annotationKeyRegistrationOnly)
|
||||
|
||||
pod, err := newRunnerPod(template, runner.Spec.RunnerConfig, r.RunnerImage, r.DockerImage, r.GitHubClient.GithubBaseURL, registrationOnly)
|
||||
if err != nil {
|
||||
return pod, err
|
||||
}
|
||||
|
||||
// Customize the pod spec according to the runner spec
|
||||
runnerSpec := runner.Spec
|
||||
|
||||
if len(runnerSpec.VolumeMounts) != 0 {
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runnerSpec.VolumeMounts...)
|
||||
}
|
||||
|
||||
if len(runnerSpec.Volumes) != 0 {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runnerSpec.Volumes...)
|
||||
}
|
||||
if len(runnerSpec.InitContainers) != 0 {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runnerSpec.InitContainers...)
|
||||
}
|
||||
|
||||
if runnerSpec.NodeSelector != nil {
|
||||
pod.Spec.NodeSelector = runnerSpec.NodeSelector
|
||||
}
|
||||
if runnerSpec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
|
||||
}
|
||||
if runnerSpec.AutomountServiceAccountToken != nil {
|
||||
pod.Spec.AutomountServiceAccountToken = runnerSpec.AutomountServiceAccountToken
|
||||
}
|
||||
|
||||
if len(runnerSpec.SidecarContainers) != 0 {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, runnerSpec.SidecarContainers...)
|
||||
}
|
||||
|
||||
if len(runnerSpec.ImagePullSecrets) != 0 {
|
||||
pod.Spec.ImagePullSecrets = runnerSpec.ImagePullSecrets
|
||||
}
|
||||
|
||||
if runnerSpec.Affinity != nil {
|
||||
pod.Spec.Affinity = runnerSpec.Affinity
|
||||
}
|
||||
|
||||
if len(runnerSpec.Tolerations) != 0 {
|
||||
pod.Spec.Tolerations = runnerSpec.Tolerations
|
||||
}
|
||||
|
||||
if len(runnerSpec.EphemeralContainers) != 0 {
|
||||
pod.Spec.EphemeralContainers = runnerSpec.EphemeralContainers
|
||||
}
|
||||
|
||||
if runnerSpec.TerminationGracePeriodSeconds != nil {
|
||||
pod.Spec.TerminationGracePeriodSeconds = runnerSpec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
if len(runnerSpec.HostAliases) != 0 {
|
||||
pod.Spec.HostAliases = runnerSpec.HostAliases
|
||||
}
|
||||
|
||||
if runnerSpec.RuntimeClassName != nil {
|
||||
pod.Spec.RuntimeClassName = runnerSpec.RuntimeClassName
|
||||
}
|
||||
|
||||
pod.ObjectMeta.Name = runner.ObjectMeta.Name
|
||||
|
||||
// Inject the registration token and the runner name
|
||||
updated := mutatePod(&pod, runner.Status.Registration.Token)
|
||||
|
||||
if err := ctrl.SetControllerReference(&runner, updated, r.Scheme); err != nil {
|
||||
return pod, err
|
||||
}
|
||||
|
||||
return *updated, nil
|
||||
}
|
||||
|
||||
func mutatePod(pod *corev1.Pod, token string) *corev1.Pod {
|
||||
updated := pod.DeepCopy()
|
||||
|
||||
for i := range pod.Spec.Containers {
|
||||
if pod.Spec.Containers[i].Name == "runner" {
|
||||
updated.Spec.Containers[i].Env = append(updated.Spec.Containers[i].Env,
|
||||
corev1.EnvVar{
|
||||
Name: "RUNNER_NAME",
|
||||
Value: pod.ObjectMeta.Name,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "RUNNER_TOKEN",
|
||||
Value: token,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return updated
|
||||
}
|
||||
|
||||
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage, defaultDockerImage, githubBaseURL string, registrationOnly bool) (corev1.Pod, error) {
|
||||
var (
|
||||
privileged bool = true
|
||||
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
|
||||
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
|
||||
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
|
||||
dockerdInRunnerPrivileged bool = dockerdInRunner
|
||||
)
|
||||
|
||||
runnerImage := runnerSpec.Image
|
||||
if runnerImage == "" {
|
||||
runnerImage = defaultRunnerImage
|
||||
}
|
||||
|
||||
workDir := runnerSpec.WorkDir
|
||||
if workDir == "" {
|
||||
workDir = "/runner/_work"
|
||||
}
|
||||
|
||||
env := []corev1.EnvVar{
|
||||
{
|
||||
Name: "RUNNER_ORG",
|
||||
Value: runnerSpec.Organization,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_REPO",
|
||||
Value: runnerSpec.Repository,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_ENTERPRISE",
|
||||
Value: runnerSpec.Enterprise,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_LABELS",
|
||||
Value: strings.Join(runnerSpec.Labels, ","),
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_GROUP",
|
||||
Value: runnerSpec.Group,
|
||||
},
|
||||
{
|
||||
Name: "DOCKERD_IN_RUNNER",
|
||||
Value: fmt.Sprintf("%v", dockerdInRunner),
|
||||
},
|
||||
{
|
||||
Name: "GITHUB_URL",
|
||||
Value: githubBaseURL,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_WORKDIR",
|
||||
Value: workDir,
|
||||
},
|
||||
{
|
||||
Name: "RUNNER_EPHEMERAL",
|
||||
Value: fmt.Sprintf("%v", ephemeral),
|
||||
},
|
||||
}
|
||||
|
||||
if registrationOnly {
|
||||
env = append(env, corev1.EnvVar{
|
||||
Name: "RUNNER_REGISTRATION_ONLY",
|
||||
Value: "true",
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
var seLinuxOptions *corev1.SELinuxOptions
|
||||
if runner.Spec.SecurityContext != nil {
|
||||
seLinuxOptions = runner.Spec.SecurityContext.SELinuxOptions
|
||||
if template.Spec.SecurityContext != nil {
|
||||
seLinuxOptions = template.Spec.SecurityContext.SELinuxOptions
|
||||
if seLinuxOptions != nil {
|
||||
privileged = false
|
||||
dockerdInRunnerPrivileged = false
|
||||
}
|
||||
}
|
||||
|
||||
pod := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runner.Name,
|
||||
Namespace: runner.Namespace,
|
||||
Labels: labels,
|
||||
Annotations: runner.Annotations,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: "OnFailure",
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: containerName,
|
||||
Image: runnerImage,
|
||||
ImagePullPolicy: runnerImagePullPolicy,
|
||||
Env: env,
|
||||
EnvFrom: runner.Spec.EnvFrom,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
// Runner need to run privileged if it contains DinD
|
||||
Privileged: &dockerdInRunnerPrivileged,
|
||||
},
|
||||
Resources: runner.Spec.Resources,
|
||||
},
|
||||
},
|
||||
},
|
||||
var runnerContainerIndex, dockerdContainerIndex int
|
||||
var runnerContainer, dockerdContainer *corev1.Container
|
||||
|
||||
for i := range template.Spec.Containers {
|
||||
c := template.Spec.Containers[i]
|
||||
if c.Name == containerName {
|
||||
runnerContainerIndex = i
|
||||
runnerContainer = &c
|
||||
} else if c.Name == "docker" {
|
||||
dockerdContainerIndex = i
|
||||
dockerdContainer = &c
|
||||
}
|
||||
}
|
||||
|
||||
if mtu := runner.Spec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
if runnerContainer == nil {
|
||||
runnerContainerIndex = -1
|
||||
runnerContainer = &corev1.Container{
|
||||
Name: containerName,
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
// Runner need to run privileged if it contains DinD
|
||||
Privileged: &dockerdInRunnerPrivileged,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
if dockerdContainer == nil {
|
||||
dockerdContainerIndex = -1
|
||||
dockerdContainer = &corev1.Container{
|
||||
Name: "docker",
|
||||
}
|
||||
}
|
||||
|
||||
runnerContainer.Image = runnerImage
|
||||
if runnerContainer.ImagePullPolicy == "" {
|
||||
runnerContainer.ImagePullPolicy = corev1.PullAlways
|
||||
}
|
||||
|
||||
runnerContainer.Env = append(runnerContainer.Env, env...)
|
||||
|
||||
if runnerContainer.SecurityContext == nil {
|
||||
runnerContainer.SecurityContext = &corev1.SecurityContext{}
|
||||
}
|
||||
// Runner need to run privileged if it contains DinD
|
||||
runnerContainer.SecurityContext.Privileged = &dockerdInRunnerPrivileged
|
||||
|
||||
pod := template.DeepCopy()
|
||||
|
||||
if pod.Spec.RestartPolicy == "" {
|
||||
pod.Spec.RestartPolicy = "OnFailure"
|
||||
}
|
||||
|
||||
if mtu := runnerSpec.DockerMTU; mtu != nil && dockerdInRunner {
|
||||
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "MTU",
|
||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
Value: fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||
},
|
||||
}...)
|
||||
}
|
||||
|
||||
if mirror := runner.Spec.DockerRegistryMirror; mirror != nil && dockerdInRunner {
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
if mirror := runnerSpec.DockerRegistryMirror; mirror != nil && dockerdInRunner {
|
||||
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_REGISTRY_MIRROR",
|
||||
Value: *runner.Spec.DockerRegistryMirror,
|
||||
Value: *runnerSpec.DockerRegistryMirror,
|
||||
},
|
||||
}...)
|
||||
}
|
||||
@@ -739,8 +874,8 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
runnerVolumeMountPath := "/runner"
|
||||
runnerVolumeEmptyDir := &corev1.EmptyDirVolumeSource{}
|
||||
|
||||
if runner.Spec.VolumeSizeLimit != nil {
|
||||
runnerVolumeEmptyDir.SizeLimit = runner.Spec.VolumeSizeLimit
|
||||
if runnerSpec.VolumeSizeLimit != nil {
|
||||
runnerVolumeEmptyDir.SizeLimit = runnerSpec.VolumeSizeLimit
|
||||
}
|
||||
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes,
|
||||
@@ -752,7 +887,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
},
|
||||
)
|
||||
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: runnerVolumeName,
|
||||
MountPath: runnerVolumeMountPath,
|
||||
@@ -774,7 +909,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
},
|
||||
},
|
||||
)
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts,
|
||||
runnerContainer.VolumeMounts = append(runnerContainer.VolumeMounts,
|
||||
corev1.VolumeMount{
|
||||
Name: "work",
|
||||
MountPath: workDir,
|
||||
@@ -785,7 +920,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
ReadOnly: true,
|
||||
},
|
||||
)
|
||||
pod.Spec.Containers[0].Env = append(pod.Spec.Containers[0].Env, []corev1.EnvVar{
|
||||
runnerContainer.Env = append(runnerContainer.Env, []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_HOST",
|
||||
Value: "tcp://localhost:2376",
|
||||
@@ -816,120 +951,66 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
|
||||
MountPath: "/certs/client",
|
||||
},
|
||||
}
|
||||
if extraDockerVolumeMounts := runner.Spec.DockerVolumeMounts; extraDockerVolumeMounts != nil {
|
||||
dockerVolumeMounts = append(dockerVolumeMounts, extraDockerVolumeMounts...)
|
||||
|
||||
if dockerdContainer.Image == "" {
|
||||
dockerdContainer.Image = defaultDockerImage
|
||||
}
|
||||
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, corev1.Container{
|
||||
Name: "docker",
|
||||
Image: r.DockerImage,
|
||||
VolumeMounts: dockerVolumeMounts,
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "DOCKER_TLS_CERTDIR",
|
||||
Value: "/certs",
|
||||
},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
SELinuxOptions: seLinuxOptions,
|
||||
},
|
||||
Resources: runner.Spec.DockerdContainerResources,
|
||||
dockerdContainer.Env = append(dockerdContainer.Env, corev1.EnvVar{
|
||||
Name: "DOCKER_TLS_CERTDIR",
|
||||
Value: "/certs",
|
||||
})
|
||||
|
||||
if mtu := runner.Spec.DockerMTU; mtu != nil {
|
||||
pod.Spec.Containers[1].Env = append(pod.Spec.Containers[1].Env, []corev1.EnvVar{
|
||||
if dockerdContainer.SecurityContext == nil {
|
||||
dockerdContainer.SecurityContext = &corev1.SecurityContext{
|
||||
Privileged: &privileged,
|
||||
SELinuxOptions: seLinuxOptions,
|
||||
}
|
||||
}
|
||||
|
||||
dockerdContainer.VolumeMounts = append(dockerdContainer.VolumeMounts, dockerVolumeMounts...)
|
||||
|
||||
if mtu := runnerSpec.DockerMTU; mtu != nil {
|
||||
dockerdContainer.Env = append(dockerdContainer.Env, []corev1.EnvVar{
|
||||
// See https://docs.docker.com/engine/security/rootless/
|
||||
{
|
||||
Name: "DOCKERD_ROOTLESS_ROOTLESSKIT_MTU",
|
||||
Value: fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
Value: fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||
},
|
||||
}...)
|
||||
|
||||
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||
dockerdContainer.Args = append(dockerdContainer.Args,
|
||||
"--mtu",
|
||||
fmt.Sprintf("%d", *runner.Spec.DockerMTU),
|
||||
fmt.Sprintf("%d", *runnerSpec.DockerMTU),
|
||||
)
|
||||
}
|
||||
|
||||
if mirror := runner.Spec.DockerRegistryMirror; mirror != nil {
|
||||
pod.Spec.Containers[1].Args = append(pod.Spec.Containers[1].Args,
|
||||
fmt.Sprintf("--registry-mirror=%s", *runner.Spec.DockerRegistryMirror),
|
||||
if mirror := runnerSpec.DockerRegistryMirror; mirror != nil {
|
||||
dockerdContainer.Args = append(dockerdContainer.Args,
|
||||
fmt.Sprintf("--registry-mirror=%s", *runnerSpec.DockerRegistryMirror),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
if len(runner.Spec.Containers) != 0 {
|
||||
pod.Spec.Containers = runner.Spec.Containers
|
||||
for i := 0; i < len(pod.Spec.Containers); i++ {
|
||||
if pod.Spec.Containers[i].Name == containerName {
|
||||
pod.Spec.Containers[i].Env = append(pod.Spec.Containers[i].Env, env...)
|
||||
}
|
||||
if runnerContainerIndex == -1 {
|
||||
pod.Spec.Containers = append([]corev1.Container{*runnerContainer}, pod.Spec.Containers...)
|
||||
|
||||
if dockerdContainerIndex != -1 {
|
||||
dockerdContainerIndex++
|
||||
}
|
||||
} else {
|
||||
pod.Spec.Containers[runnerContainerIndex] = *runnerContainer
|
||||
}
|
||||
|
||||
if !dockerdInRunner && dockerEnabled {
|
||||
if dockerdContainerIndex == -1 {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, *dockerdContainer)
|
||||
} else {
|
||||
pod.Spec.Containers[dockerdContainerIndex] = *dockerdContainer
|
||||
}
|
||||
}
|
||||
|
||||
if len(runner.Spec.VolumeMounts) != 0 {
|
||||
pod.Spec.Containers[0].VolumeMounts = append(pod.Spec.Containers[0].VolumeMounts, runner.Spec.VolumeMounts...)
|
||||
}
|
||||
|
||||
if len(runner.Spec.Volumes) != 0 {
|
||||
pod.Spec.Volumes = append(pod.Spec.Volumes, runner.Spec.Volumes...)
|
||||
}
|
||||
if len(runner.Spec.InitContainers) != 0 {
|
||||
pod.Spec.InitContainers = append(pod.Spec.InitContainers, runner.Spec.InitContainers...)
|
||||
}
|
||||
|
||||
if runner.Spec.NodeSelector != nil {
|
||||
pod.Spec.NodeSelector = runner.Spec.NodeSelector
|
||||
}
|
||||
if runner.Spec.ServiceAccountName != "" {
|
||||
pod.Spec.ServiceAccountName = runner.Spec.ServiceAccountName
|
||||
}
|
||||
if runner.Spec.AutomountServiceAccountToken != nil {
|
||||
pod.Spec.AutomountServiceAccountToken = runner.Spec.AutomountServiceAccountToken
|
||||
}
|
||||
|
||||
if len(runner.Spec.SidecarContainers) != 0 {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, runner.Spec.SidecarContainers...)
|
||||
}
|
||||
|
||||
if runner.Spec.SecurityContext != nil {
|
||||
pod.Spec.SecurityContext = runner.Spec.SecurityContext
|
||||
}
|
||||
|
||||
if len(runner.Spec.ImagePullSecrets) != 0 {
|
||||
pod.Spec.ImagePullSecrets = runner.Spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
if runner.Spec.Affinity != nil {
|
||||
pod.Spec.Affinity = runner.Spec.Affinity
|
||||
}
|
||||
|
||||
if len(runner.Spec.Tolerations) != 0 {
|
||||
pod.Spec.Tolerations = runner.Spec.Tolerations
|
||||
}
|
||||
|
||||
if len(runner.Spec.EphemeralContainers) != 0 {
|
||||
pod.Spec.EphemeralContainers = runner.Spec.EphemeralContainers
|
||||
}
|
||||
|
||||
if runner.Spec.TerminationGracePeriodSeconds != nil {
|
||||
pod.Spec.TerminationGracePeriodSeconds = runner.Spec.TerminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
if len(runner.Spec.HostAliases) != 0 {
|
||||
pod.Spec.HostAliases = runner.Spec.HostAliases
|
||||
}
|
||||
|
||||
if runner.Spec.RuntimeClassName != nil {
|
||||
pod.Spec.RuntimeClassName = runner.Spec.RuntimeClassName
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(&runner, &pod, r.Scheme); err != nil {
|
||||
return pod, err
|
||||
}
|
||||
|
||||
return pod, nil
|
||||
return *pod, nil
|
||||
}
|
||||
|
||||
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
|
||||
@@ -65,8 +65,7 @@ type RunnerDeploymentReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnerreplicasets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerDeploymentReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *RunnerDeploymentReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerdeployment", req.NamespacedName)
|
||||
|
||||
var rd v1alpha1.RunnerDeployment
|
||||
@@ -439,7 +438,7 @@ func (r *RunnerDeploymentReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
if err := mgr.GetFieldIndexer().IndexField(&v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj runtime.Object) []string {
|
||||
if err := mgr.GetFieldIndexer().IndexField(context.TODO(), &v1alpha1.RunnerReplicaSet{}, runnerSetOwnerKey, func(rawObj client.Object) []string {
|
||||
runnerSet := rawObj.(*v1alpha1.RunnerReplicaSet)
|
||||
owner := metav1.GetControllerOf(runnerSet)
|
||||
if owner == nil {
|
||||
|
||||
@@ -50,7 +50,9 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Labels: []string{"project1"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Labels: []string{"project1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -126,12 +128,13 @@ func TestNewRunnerReplicaSet(t *testing.T) {
|
||||
// * starting the 'RunnerDeploymentReconciler'
|
||||
// * stopping the 'RunnerDeploymentReconciler" after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
func SetupDeploymentTest(ctx2 context.Context) *corev1.Namespace {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -157,13 +160,13 @@ func SetupDeploymentTest(ctx context.Context) *corev1.Namespace {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete test namespace")
|
||||
@@ -201,10 +204,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -297,10 +304,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -393,10 +404,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
Replicas: intPtr(1),
|
||||
Template: actionsv1alpha1.RunnerTemplate{
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -56,8 +56,7 @@ type RunnerReplicaSetReconciler struct {
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
|
||||
func (r *RunnerReplicaSetReconciler) Reconcile(req ctrl.Request) (ctrl.Result, error) {
|
||||
ctx := context.Background()
|
||||
func (r *RunnerReplicaSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerreplicaset", req.NamespacedName)
|
||||
|
||||
var rs v1alpha1.RunnerReplicaSet
|
||||
|
||||
@@ -33,12 +33,13 @@ var (
|
||||
// * starting the 'RunnerReconciler'
|
||||
// * stopping the 'RunnerReplicaSetReconciler" after the test ends
|
||||
// Call this function at the start of each of your tests.
|
||||
func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
var stopCh chan struct{}
|
||||
func SetupTest(ctx2 context.Context) *corev1.Namespace {
|
||||
var ctx context.Context
|
||||
var cancel func()
|
||||
ns := &corev1.Namespace{}
|
||||
|
||||
BeforeEach(func() {
|
||||
stopCh = make(chan struct{})
|
||||
ctx, cancel = context.WithCancel(ctx2)
|
||||
*ns = corev1.Namespace{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "testns-" + randStringRunes(5)},
|
||||
}
|
||||
@@ -69,13 +70,13 @@ func SetupTest(ctx context.Context) *corev1.Namespace {
|
||||
go func() {
|
||||
defer GinkgoRecover()
|
||||
|
||||
err := mgr.Start(stopCh)
|
||||
err := mgr.Start(ctx)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to start manager")
|
||||
}()
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
close(stopCh)
|
||||
defer cancel()
|
||||
|
||||
server.Close()
|
||||
err := k8sClient.Delete(ctx, ns)
|
||||
@@ -128,10 +129,14 @@ var _ = Context("Inside of a new namespace", func() {
|
||||
},
|
||||
},
|
||||
Spec: actionsv1alpha1.RunnerSpec{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
RunnerConfig: actionsv1alpha1.RunnerConfig{
|
||||
Repository: "test/valid",
|
||||
Image: "bar",
|
||||
},
|
||||
RunnerPodSpec: actionsv1alpha1.RunnerPodSpec{
|
||||
Env: []corev1.EnvVar{
|
||||
{Name: "FOO", Value: "FOOVALUE"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
316
controllers/runnerset_controller.go
Normal file
316
controllers/runnerset_controller.go
Normal file
@@ -0,0 +1,316 @@
|
||||
/*
|
||||
Copyright 2021 The actions-runner-controller authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/record"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/summerwind/actions-runner-controller/api/v1alpha1"
|
||||
"github.com/summerwind/actions-runner-controller/controllers/metrics"
|
||||
)
|
||||
|
||||
const (
|
||||
LabelKeyRunnerSetName = "runnerset-name"
|
||||
)
|
||||
|
||||
// RunnerSetReconciler reconciles a Runner object
|
||||
type RunnerSetReconciler struct {
|
||||
Name string
|
||||
|
||||
client.Client
|
||||
Log logr.Logger
|
||||
Recorder record.EventRecorder
|
||||
Scheme *runtime.Scheme
|
||||
|
||||
CommonRunnerLabels []string
|
||||
GitHubBaseURL string
|
||||
RunnerImage, DockerImage string
|
||||
}
|
||||
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/finalizers,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets,verbs=get;list;watch;create;update;patch;delete
|
||||
// +kubebuilder:rbac:groups=apps,resources=statefulsets/status,verbs=get;update;patch
|
||||
// +kubebuilder:rbac:groups=core,resources=events,verbs=create;patch
|
||||
// +kubebuilder:rbac:groups=coordination.k8s.io,resources=leases,verbs=get;list;create;update
|
||||
|
||||
// Note that coordination.k8s.io/leases permission must be added to any of the controllers to avoid the following error:
|
||||
// E0613 07:02:08.004278 1 leaderelection.go:325] error retrieving resource lock actions-runner-system/actions-runner-controller: leases.coordination.k8s.io "actions-runner-controller" is forbidden: User "system:serviceaccount:actions-runner-system:actions-runner-controller" cannot get resource "leases" in API group "coordination.k8s.io" in the namespace "actions-runner-system"
|
||||
|
||||
func (r *RunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
|
||||
log := r.Log.WithValues("runnerset", req.NamespacedName)
|
||||
|
||||
runnerSet := &v1alpha1.RunnerSet{}
|
||||
if err := r.Get(ctx, req.NamespacedName, runnerSet); err != nil {
|
||||
err = client.IgnoreNotFound(err)
|
||||
|
||||
if err != nil {
|
||||
log.Error(err, "Could not get RunnerSet")
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if !runnerSet.ObjectMeta.DeletionTimestamp.IsZero() {
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
metrics.SetRunnerSet(*runnerSet)
|
||||
|
||||
desiredStatefulSet, err := r.newStatefulSet(runnerSet)
|
||||
if err != nil {
|
||||
r.Recorder.Event(runnerSet, corev1.EventTypeNormal, "RunnerAutoscalingFailure", err.Error())
|
||||
|
||||
log.Error(err, "Could not create statefulset")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
liveStatefulSet := &appsv1.StatefulSet{}
|
||||
if err := r.Get(ctx, types.NamespacedName{Namespace: runnerSet.Namespace, Name: runnerSet.Name}, liveStatefulSet); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
log.Error(err, "Failed to get live statefulset")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if err := r.Client.Create(ctx, desiredStatefulSet); err != nil {
|
||||
log.Error(err, "Failed to create statefulset resource")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
liveTemplateHash, ok := getStatefulSetTemplateHash(liveStatefulSet)
|
||||
if !ok {
|
||||
log.Info("Failed to get template hash of newest statefulset resource. It must be in an invalid state. Please manually delete the statefulset so that it is recreated")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
desiredTemplateHash, ok := getStatefulSetTemplateHash(desiredStatefulSet)
|
||||
if !ok {
|
||||
log.Info("Failed to get template hash of desired statefulset. It must be in an invalid state. Please manually delete the statefulset so that it is recreated")
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
if liveTemplateHash != desiredTemplateHash {
|
||||
copy := liveStatefulSet.DeepCopy()
|
||||
copy.Spec = desiredStatefulSet.Spec
|
||||
|
||||
if err := r.Client.Patch(ctx, copy, client.MergeFrom(liveStatefulSet)); err != nil {
|
||||
log.Error(err, "Failed to patch statefulset", "reason", errors.ReasonForError(err))
|
||||
|
||||
if errors.IsInvalid(err) {
|
||||
// NOTE: This might not be ideal but deal the forbidden error by recreating the statefulset
|
||||
// Probably we'd better create a registration-only runner to prevent queued jobs from immediately failing.
|
||||
//
|
||||
// 2021-06-13T07:19:52.760Z ERROR actions-runner-controller.runnerset Failed to patch statefulset
|
||||
// {"runnerset": "default/example-runnerset", "error": "StatefulSet.apps \"example-runnerset\" is invalid: s
|
||||
// pec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy'
|
||||
// are forbidden"}
|
||||
//
|
||||
// Even though the error message includes "Forbidden", this error's reason is "Invalid".
|
||||
// That's why we're using errors.IsInvalid above.
|
||||
|
||||
if err := r.Client.Delete(ctx, liveStatefulSet); err != nil {
|
||||
log.Error(err, "Failed to delete statefulset for force-update")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
log.Info("Deleted statefulset for force-update")
|
||||
}
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
// We requeue in order to clean up old runner replica sets later.
|
||||
// Otherwise, they aren't cleaned up until the next re-sync interval.
|
||||
return ctrl.Result{RequeueAfter: 5 * time.Second}, nil
|
||||
}
|
||||
|
||||
const defaultReplicas = 1
|
||||
|
||||
var replicasOfLiveStatefulSet *int
|
||||
if liveStatefulSet.Spec.Replicas != nil {
|
||||
v := int(*liveStatefulSet.Spec.Replicas)
|
||||
replicasOfLiveStatefulSet = &v
|
||||
}
|
||||
|
||||
var replicasOfDesiredStatefulSet *int
|
||||
if desiredStatefulSet.Spec.Replicas != nil {
|
||||
v := int(*desiredStatefulSet.Spec.Replicas)
|
||||
replicasOfDesiredStatefulSet = &v
|
||||
}
|
||||
|
||||
currentDesiredReplicas := getIntOrDefault(replicasOfLiveStatefulSet, defaultReplicas)
|
||||
newDesiredReplicas := getIntOrDefault(replicasOfDesiredStatefulSet, defaultReplicas)
|
||||
|
||||
// Please add more conditions that we can in-place update the newest runnerreplicaset without disruption
|
||||
if currentDesiredReplicas != newDesiredReplicas {
|
||||
v := int32(newDesiredReplicas)
|
||||
|
||||
updated := liveStatefulSet.DeepCopy()
|
||||
updated.Spec.Replicas = &v
|
||||
|
||||
if err := r.Client.Patch(ctx, updated, client.MergeFrom(liveStatefulSet)); err != nil {
|
||||
log.Error(err, "Failed to update statefulset")
|
||||
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
statusReplicas := int(liveStatefulSet.Status.Replicas)
|
||||
statusReadyReplicas := int(liveStatefulSet.Status.ReadyReplicas)
|
||||
totalCurrentReplicas := int(liveStatefulSet.Status.CurrentReplicas)
|
||||
updatedReplicas := int(liveStatefulSet.Status.UpdatedReplicas)
|
||||
|
||||
status := runnerSet.Status.DeepCopy()
|
||||
|
||||
status.CurrentReplicas = &totalCurrentReplicas
|
||||
status.ReadyReplicas = &statusReadyReplicas
|
||||
status.DesiredReplicas = &newDesiredReplicas
|
||||
status.Replicas = &statusReplicas
|
||||
status.UpdatedReplicas = &updatedReplicas
|
||||
|
||||
if !reflect.DeepEqual(runnerSet.Status, status) {
|
||||
updated := runnerSet.DeepCopy()
|
||||
updated.Status = *status
|
||||
|
||||
if err := r.Status().Patch(ctx, updated, client.MergeFrom(runnerSet)); err != nil {
|
||||
log.Info("Failed to patch runnerset status. Retrying immediately", "error", err.Error())
|
||||
return ctrl.Result{
|
||||
Requeue: true,
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
func getStatefulSetTemplateHash(rs *appsv1.StatefulSet) (string, bool) {
|
||||
hash, ok := rs.Labels[LabelKeyRunnerTemplateHash]
|
||||
|
||||
return hash, ok
|
||||
}
|
||||
|
||||
func getRunnerSetSelector(runnerSet *v1alpha1.RunnerSet) *metav1.LabelSelector {
|
||||
selector := runnerSet.Spec.Selector
|
||||
if selector == nil {
|
||||
selector = &metav1.LabelSelector{MatchLabels: map[string]string{LabelKeyRunnerSetName: runnerSet.Name}}
|
||||
}
|
||||
|
||||
return selector
|
||||
}
|
||||
|
||||
var LabelKeyPodMutation = "actions-runner-controller/inject-registration-token"
|
||||
var LabelValuePodMutation = "true"
|
||||
|
||||
func (r *RunnerSetReconciler) newStatefulSet(runnerSet *v1alpha1.RunnerSet) (*appsv1.StatefulSet, error) {
|
||||
runnerSetWithOverrides := *runnerSet.Spec.DeepCopy()
|
||||
|
||||
for _, l := range r.CommonRunnerLabels {
|
||||
runnerSetWithOverrides.Labels = append(runnerSetWithOverrides.Labels, l)
|
||||
}
|
||||
|
||||
// This label selector is used by default when rd.Spec.Selector is empty.
|
||||
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
|
||||
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
|
||||
template := corev1.Pod{
|
||||
ObjectMeta: runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta,
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec.Template.Spec,
|
||||
}
|
||||
|
||||
pod, err := newRunnerPod(template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.DockerImage, r.GitHubBaseURL, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.ObjectMeta = pod.ObjectMeta
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec = pod.Spec
|
||||
// NOTE: Seems like the only supported restart policy for statefulset is "Always"?
|
||||
// I got errosr like the below when tried to use "OnFailure":
|
||||
// StatefulSet.apps \"example-runnersetpg9rx\" is invalid: [spec.template.metadata.labels: Invalid value: map[string]string{\"runner-template-hash\"
|
||||
// :\"85d7578bd6\", \"runnerset-name\":\"example-runnerset\"}: `selector` does not match template `labels`, spec.
|
||||
// template.spec.restartPolicy: Unsupported value: \"OnFailure\": supported values: \"Always\"]
|
||||
runnerSetWithOverrides.StatefulSetSpec.Template.Spec.RestartPolicy = corev1.RestartPolicyAlways
|
||||
|
||||
templateHash := ComputeHash(pod.Spec)
|
||||
|
||||
// Add template hash label to selector.
|
||||
runnerSetWithOverrides.Template.ObjectMeta.Labels = CloneAndAddLabel(runnerSetWithOverrides.Template.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash)
|
||||
|
||||
selector := getRunnerSetSelector(runnerSet)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerTemplateHash, templateHash)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyRunnerSetName, runnerSet.Name)
|
||||
selector = CloneSelectorAndAddLabel(selector, LabelKeyPodMutation, LabelValuePodMutation)
|
||||
|
||||
runnerSetWithOverrides.StatefulSetSpec.Selector = selector
|
||||
|
||||
rs := appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: runnerSet.ObjectMeta.Name,
|
||||
Namespace: runnerSet.ObjectMeta.Namespace,
|
||||
Labels: CloneAndAddLabel(runnerSet.ObjectMeta.Labels, LabelKeyRunnerTemplateHash, templateHash),
|
||||
},
|
||||
Spec: runnerSetWithOverrides.StatefulSetSpec,
|
||||
}
|
||||
|
||||
if err := ctrl.SetControllerReference(runnerSet, &rs, r.Scheme); err != nil {
|
||||
return &rs, err
|
||||
}
|
||||
|
||||
return &rs, nil
|
||||
}
|
||||
|
||||
func (r *RunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
|
||||
name := "runnerset-controller"
|
||||
if r.Name != "" {
|
||||
name = r.Name
|
||||
}
|
||||
|
||||
r.Recorder = mgr.GetEventRecorderFor(name)
|
||||
|
||||
return ctrl.NewControllerManagedBy(mgr).
|
||||
For(&v1alpha1.RunnerSet{}).
|
||||
Owns(&appsv1.StatefulSet{}).
|
||||
Named(name).
|
||||
Complete(r)
|
||||
}
|
||||
@@ -17,11 +17,12 @@ limitations under the License.
|
||||
package controllers
|
||||
|
||||
import (
|
||||
"github.com/onsi/ginkgo/config"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/onsi/ginkgo/config"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
@@ -30,6 +31,7 @@ import (
|
||||
"k8s.io/client-go/rest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest"
|
||||
"sigs.k8s.io/controller-runtime/pkg/envtest/printer"
|
||||
logf "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
// +kubebuilder:scaffold:imports
|
||||
@@ -45,15 +47,15 @@ var testEnv *envtest.Environment
|
||||
func TestAPIs(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
|
||||
config.GinkgoConfig.FocusString = os.Getenv("GINKGO_FOCUS")
|
||||
config.GinkgoConfig.FocusStrings = append(config.GinkgoConfig.FocusStrings, os.Getenv("GINKGO_FOCUS"))
|
||||
|
||||
RunSpecsWithDefaultAndCustomReporters(t,
|
||||
"Controller Suite",
|
||||
[]Reporter{envtest.NewlineReporter{}})
|
||||
[]Reporter{printer.NewlineReporter{}})
|
||||
}
|
||||
|
||||
var _ = BeforeSuite(func(done Done) {
|
||||
logf.SetLogger(zap.LoggerTo(GinkgoWriter, true))
|
||||
logf.SetLogger(zap.New(zap.UseDevMode(true), zap.WriteTo(GinkgoWriter)))
|
||||
|
||||
var apiServerFlags []string
|
||||
|
||||
|
||||
Reference in New Issue
Block a user