Compare commits

...

5 Commits

Author SHA1 Message Date
Nikola Jokic
104bc6b0b0 Fix chart version for publishing (#4415) 2026-03-19 18:13:17 +00:00
Nikola Jokic
8b7f232dc4 Prepare 0.14.0 release (#4413) 2026-03-19 18:53:37 +01:00
Nikola Jokic
19f22b85e7 Add @steve-glass to CODEOWNERS (#4414) 2026-03-19 18:24:00 +01:00
Nikola Jokic
802dc28d38 Add multi-label support to scalesets (#4408) 2026-03-19 15:29:40 +01:00
Nikola Jokic
9bc1c9e53e Shutdown the scaleset when runner is deprecated (#4404)
Co-authored-by: Copilot Autofix powered by AI <175728472+Copilot@users.noreply.github.com>
2026-03-19 13:30:20 +01:00
59 changed files with 1071 additions and 296 deletions

View File

@@ -23,11 +23,21 @@ on:
required: true
type: boolean
default: false
publish_gha_runner_scale_set_controller_experimental_chart:
description: "Publish new helm chart for gha-runner-scale-set-controller-experimental"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_chart:
description: "Publish new helm chart for gha-runner-scale-set"
required: true
type: boolean
default: false
publish_gha_runner_scale_set_experimental_chart:
description: "Publish new helm chart for gha-runner-scale-set-experimental"
required: true
type: boolean
default: false
env:
HELM_VERSION: v3.8.0
@@ -159,6 +169,54 @@ jobs:
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set-controller Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
publish-helm-chart-gha-runner-scale-set-controller-experimental:
if: ${{ inputs.publish_gha_runner_scale_set_controller_experimental_chart == true }}
needs: build-push-image
name: Publish Helm chart for gha-runner-scale-set-controller-experimental
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
- name: Resolve parameters
id: resolve_parameters
run: |
resolvedRef="${{ inputs.ref }}"
if [ -z "$resolvedRef" ]
then
resolvedRef="${{ github.ref }}"
fi
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
- name: Publish new helm chart for gha-runner-scale-set-controller-experimental
run: |
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-controller-experimental/Chart.yaml | grep version: | cut -d " " -d '"' -f 2)
echo "GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}" >> $GITHUB_ENV
helm package charts/gha-runner-scale-set-controller-experimental/ --version="${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}"
helm push gha-runner-scale-set-controller-experimental-"${GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
- name: Job summary
run: |
echo "New helm chart for gha-runner-scale-set-controller-experimental published successfully!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolved_ref }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set-controller-experimental Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CONTROLLER_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
publish-helm-chart-gha-runner-scale-set:
if: ${{ inputs.publish_gha_runner_scale_set_chart == true }}
needs: build-push-image
@@ -206,3 +264,52 @@ jobs:
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolvedRef }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY
publish-helm-chart-gha-runner-scale-set-experimental:
if: ${{ inputs.publish_gha_runner_scale_set_experimental_chart == true }}
needs: build-push-image
name: Publish Helm chart for gha-runner-scale-set-experimental
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v6
with:
# If inputs.ref is empty, it'll resolve to the default branch
ref: ${{ inputs.ref }}
- name: Resolve parameters
id: resolve_parameters
run: |
resolvedRef="${{ inputs.ref }}"
if [ -z "$resolvedRef" ]
then
resolvedRef="${{ github.ref }}"
fi
echo "resolved_ref=$resolvedRef" >> $GITHUB_OUTPUT
echo "INFO: Resolving short SHA for $resolvedRef"
echo "short_sha=$(git rev-parse --short $resolvedRef)" >> $GITHUB_OUTPUT
echo "INFO: Normalizing repository name (lowercase)"
echo "repository_owner=$(echo ${{ github.repository_owner }} | tr '[:upper:]' '[:lower:]')" >> $GITHUB_OUTPUT
- name: Set up Helm
uses: azure/setup-helm@1a275c3b69536ee54be43f2070a358922e12c8d4
with:
version: ${{ env.HELM_VERSION }}
- name: Publish new helm chart for gha-runner-scale-set-experimental
run: |
echo ${{ secrets.GITHUB_TOKEN }} | helm registry login ghcr.io --username ${{ github.actor }} --password-stdin
GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=$(cat charts/gha-runner-scale-set-experimental/Chart.yaml | grep version: | cut -d " " -d '"' -f 2)
echo "GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG=${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}" >> $GITHUB_ENV
helm package charts/gha-runner-scale-set-experimental/ --version="${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}"
helm push gha-runner-scale-set-experimental-"${GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG}".tgz oci://ghcr.io/${{ steps.resolve_parameters.outputs.repository_owner }}/actions-runner-controller-charts
- name: Job summary
run: |
echo "New helm chart for gha-runner-scale-set-experimental published successfully!" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Parameters:**" >> $GITHUB_STEP_SUMMARY
echo "- Ref: ${{ steps.resolve_parameters.outputs.resolved_ref }}" >> $GITHUB_STEP_SUMMARY
echo "- Short SHA: ${{ steps.resolve_parameters.outputs.short_sha }}" >> $GITHUB_STEP_SUMMARY
echo "- gha-runner-scale-set-experimental Chart version: ${{ env.GHA_RUNNER_SCALE_SET_CHART_VERSION_TAG }}" >> $GITHUB_STEP_SUMMARY

View File

@@ -140,3 +140,7 @@ jobs:
run: go test ./charts/gha-runner-scale-set/...
- name: Test gha-runner-scale-set-controller
run: go test ./charts/gha-runner-scale-set-controller/...
- name: Test gha-runner-scale-set-experimental
run: go test ./charts/gha-runner-scale-set-experimental/...
- name: Test gha-runner-scale-set-controller-experimental
run: go test ./charts/gha-runner-scale-set-controller-experimental/...

View File

@@ -1,2 +1,2 @@
# actions-runner-controller maintainers
* @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass
* @mumoshu @toast-gear @actions/actions-launch @actions/actions-compute @nikola-jokic @rentziass @steve-glass

View File

@@ -66,6 +66,9 @@ type AutoscalingRunnerSetSpec struct {
// +optional
RunnerScaleSetName string `json:"runnerScaleSetName,omitempty"`
// +optional
RunnerScaleSetLabels []string `json:"runnerScaleSetLabels,omitempty"`
// +optional
Proxy *ProxyConfig `json:"proxy,omitempty"`
@@ -315,7 +318,7 @@ type AutoscalingRunnerSetStatus struct {
CurrentRunners int `json:"currentRunners"`
// +optional
State string `json:"state"`
Phase AutoscalingRunnerSetPhase `json:"phase"`
// EphemeralRunner counts separated by the stage ephemeral runners are in, taken from the EphemeralRunnerSet
@@ -327,6 +330,30 @@ type AutoscalingRunnerSetStatus struct {
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
}
type AutoscalingRunnerSetPhase string
const (
// AutoscalingRunnerSetPhasePending phase means that the listener is not
// yet started
AutoscalingRunnerSetPhasePending AutoscalingRunnerSetPhase = "Pending"
AutoscalingRunnerSetPhaseRunning AutoscalingRunnerSetPhase = "Running"
AutoscalingRunnerSetPhaseOutdated AutoscalingRunnerSetPhase = "Outdated"
)
func (ars *AutoscalingRunnerSet) Hash() string {
type data struct {
Spec *AutoscalingRunnerSetSpec
Labels map[string]string
}
d := &data{
Spec: ars.Spec.DeepCopy(),
Labels: ars.Labels,
}
return hash.ComputeTemplateHash(d)
}
func (ars *AutoscalingRunnerSet) ListenerSpecHash() string {
arsSpec := ars.Spec.DeepCopy()
spec := arsSpec

View File

@@ -48,7 +48,7 @@ type EphemeralRunner struct {
}
func (er *EphemeralRunner) IsDone() bool {
return er.Status.Phase == corev1.PodSucceeded || er.Status.Phase == corev1.PodFailed
return er.Status.Phase == EphemeralRunnerPhaseSucceeded || er.Status.Phase == EphemeralRunnerPhaseFailed || er.Status.Phase == EphemeralRunnerPhaseOutdated
}
func (er *EphemeralRunner) HasJob() bool {
@@ -143,14 +143,14 @@ type EphemeralRunnerStatus struct {
// The PodSucceded phase should be set only when confirmed that EphemeralRunner
// actually executed the job and has been removed from the service.
// +optional
Phase corev1.PodPhase `json:"phase,omitempty"`
Phase EphemeralRunnerPhase `json:"phase,omitempty"`
// +optional
Reason string `json:"reason,omitempty"`
// +optional
Message string `json:"message,omitempty"`
// +optional
RunnerId int `json:"runnerId,omitempty"`
RunnerID int `json:"runnerId,omitempty"`
// +optional
RunnerName string `json:"runnerName,omitempty"`
@@ -158,7 +158,7 @@ type EphemeralRunnerStatus struct {
Failures map[string]metav1.Time `json:"failures,omitempty"`
// +optional
JobRequestId int64 `json:"jobRequestId,omitempty"`
JobRequestID int64 `json:"jobRequestId,omitempty"`
// +optional
JobID string `json:"jobId,omitempty"`
@@ -170,12 +170,33 @@ type EphemeralRunnerStatus struct {
JobWorkflowRef string `json:"jobWorkflowRef,omitempty"`
// +optional
WorkflowRunId int64 `json:"workflowRunId,omitempty"`
WorkflowRunID int64 `json:"workflowRunId,omitempty"`
// +optional
JobDisplayName string `json:"jobDisplayName,omitempty"`
}
// EphemeralRunnerPhase is the phase of the ephemeral runner.
// It must be a superset of the pod phase.
type EphemeralRunnerPhase string
const (
// EphemeralRunnerPhasePending is a phase set when the ephemeral runner is
// being provisioned and is not yet online.
EphemeralRunnerPhasePending EphemeralRunnerPhase = "Pending"
// EphemeralRunnerPhaseRunning is a phase set when the ephemeral runner is online and
// waiting for a job to execute.
EphemeralRunnerPhaseRunning EphemeralRunnerPhase = "Running"
// EphemeralRunnerPhaseSucceeded is a phase set when the ephemeral runner
// successfully executed the job and has been removed from the service.
EphemeralRunnerPhaseSucceeded EphemeralRunnerPhase = "Succeeded"
// EphemeralRunnerPhaseFailed is a phase set when the ephemeral runner
// fails with unrecoverable failure.
EphemeralRunnerPhaseFailed EphemeralRunnerPhase = "Failed"
// EphemeralRunnerPhaseOutdated is a special phase that indicates the runner is outdated and should be upgraded.
EphemeralRunnerPhaseOutdated EphemeralRunnerPhase = "Outdated"
)
func (s *EphemeralRunnerStatus) LastFailure() metav1.Time {
var maxTime metav1.Time
if len(s.Failures) == 0 {

View File

@@ -42,8 +42,20 @@ type EphemeralRunnerSetStatus struct {
RunningEphemeralRunners int `json:"runningEphemeralRunners"`
// +optional
FailedEphemeralRunners int `json:"failedEphemeralRunners"`
// +optional
Phase EphemeralRunnerSetPhase `json:"phase"`
}
// EphemeralRunnerSetPhase is the phase of the ephemeral runner set resource
type EphemeralRunnerSetPhase string
const (
EphemeralRunnerSetPhaseRunning EphemeralRunnerSetPhase = "Running"
// EphemeralRunnerSetPhaseOutdated is set when at least one ephemeral runner
// contains the outdated phase
EphemeralRunnerSetPhaseOutdated EphemeralRunnerSetPhase = "Outdated"
)
// +kubebuilder:object:root=true
// +kubebuilder:subresource:status
// +kubebuilder:printcolumn:JSONPath=".spec.replicas",name="DesiredReplicas",type="integer"

View File

@@ -227,6 +227,11 @@ func (in *AutoscalingRunnerSetList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *AutoscalingRunnerSetSpec) DeepCopyInto(out *AutoscalingRunnerSetSpec) {
*out = *in
if in.RunnerScaleSetLabels != nil {
in, out := &in.RunnerScaleSetLabels, &out.RunnerScaleSetLabels
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Proxy != nil {
in, out := &in.Proxy, &out.Proxy
*out = new(ProxyConfig)

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.13.1
version: "0.14.0"
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.13.1"
appVersion: "0.14.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -8385,6 +8385,10 @@ spec:
type: object
runnerGroup:
type: string
runnerScaleSetLabels:
items:
type: string
type: array
runnerScaleSetName:
type: string
template:
@@ -16547,10 +16551,10 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
type: string
runningEphemeralRunners:
type: integer
state:
type: string
type: object
type: object
served: true

View File

@@ -8318,6 +8318,9 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
description: EphemeralRunnerSetPhase is the phase of the ephemeral runner set resource
type: string
runningEphemeralRunners:
type: integer
required:

View File

@@ -0,0 +1,52 @@
package tests
import (
"os"
"path/filepath"
"strings"
"testing"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/logger"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
appsv1 "k8s.io/api/apps/v1"
)
type Chart struct {
Version string `yaml:"version"`
AppVersion string `yaml:"appVersion"`
}
func TestTemplate_RenderedDeployment_UsesChartMetadataLabels(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller-experimental")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-arc"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
var deployment appsv1.Deployment
helm.UnmarshalK8SYaml(t, output, &deployment)
assert.Equal(t, "gha-rs-controller-"+chart.Version, deployment.Labels["helm.sh/chart"])
assert.Equal(t, chart.AppVersion, deployment.Labels["app.kubernetes.io/version"])
}

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.13.1
version: 0.14.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.13.1"
appVersion: "0.14.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -8385,6 +8385,10 @@ spec:
type: object
runnerGroup:
type: string
runnerScaleSetLabels:
items:
type: string
type: array
runnerScaleSetName:
type: string
template:
@@ -16547,10 +16551,10 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
type: string
runningEphemeralRunners:
type: integer
state:
type: string
type: object
type: object
served: true

View File

@@ -8318,6 +8318,9 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
description: EphemeralRunnerSetPhase is the phase of the ephemeral runner set resource
type: string
runningEphemeralRunners:
type: integer
required:

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: "0.13.1"
version: "0.14.0"
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.13.1"
appVersion: "0.14.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -83,6 +83,18 @@ spec:
githubConfigSecret: {{ include "github-secret.name" . | quote }}
runnerGroup: {{ .Values.scaleset.runnerGroup | quote }}
runnerScaleSetName: {{ .Values.scaleset.name | quote }}
{{- if and .Values.scaleset.labels (kindIs "slice" .Values.scaleset.labels) }}
{{- range .Values.scaleset.labels }}
{{- if empty . }}
{{- fail "scaleset.labels contains an empty string, each label must be a non-empty string of less than 256 characters" }}
{{- end }}
{{- if ge (len .) 256 }}
{{- fail "scaleset.labels contains a label that is 256 characters or more, each label must be a non-empty string of less than 256 characters" }}
{{- end }}
{{- end }}
runnerScaleSetLabels:
{{- toYaml .Values.scaleset.labels | nindent 4 }}
{{- end }}
{{- if .Values.githubServerTLS }}
githubServerTLS:

View File

@@ -12,12 +12,7 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -33,9 +28,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"
@@ -66,9 +61,6 @@ tests:
- equal:
path: metadata.labels["environment"]
value: "production"
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -84,9 +76,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"
@@ -117,9 +109,6 @@ tests:
- equal:
path: metadata.labels["owner"]
value: "devops"
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -135,9 +124,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"
@@ -176,9 +165,6 @@ tests:
- equal:
path: metadata.labels["environment"]
value: "staging"
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -194,9 +180,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"

View File

@@ -26,8 +26,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: spec.template.metadata.labels["purpose"]
@@ -35,12 +33,12 @@ tests:
- equal:
path: spec.template.metadata.labels["team"]
value: "platform"
- equal:
path: spec.template.metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: spec.template.metadata.labels["app.kubernetes.io/name"]
value: "test-name"
- notEqual:
path: spec.template.metadata.labels["app.kubernetes.io/version"]
value: ""
- equal:
path: spec.template.metadata.labels["app.kubernetes.io/managed-by"]
value: "Helm"

View File

@@ -0,0 +1,58 @@
suite: "AutoscalingRunnerSet scale set labels"
templates:
- autoscalingrunnserset.yaml
tests:
- it: should apply scaleset labels slice as runnerScaleSetLabels
set:
scaleset.name: "test"
scaleset.labels:
- "linux"
- "x64"
auth.url: "https://github.com/org"
auth.githubToken: "gh_token12345"
controllerServiceAccount.name: "arc"
controllerServiceAccount.namespace: "arc-system"
release:
name: "test-name"
namespace: "test-namespace"
asserts:
- equal:
path: spec.runnerScaleSetLabels[0]
value: "linux"
- equal:
path: spec.runnerScaleSetLabels[1]
value: "x64"
- it: should fail when a scaleset label is empty
set:
scaleset.name: "test"
scaleset.labels:
- "linux"
- ""
auth.url: "https://github.com/org"
auth.githubToken: "gh_token12345"
controllerServiceAccount.name: "arc"
controllerServiceAccount.namespace: "arc-system"
release:
name: "test-name"
namespace: "test-namespace"
asserts:
- failedTemplate:
errorMessage: "scaleset.labels contains an empty string, each label must be a non-empty string of less than 256 characters"
- it: should fail when a scaleset label is 256 characters or more
set:
scaleset.name: "test"
scaleset.labels:
- "linux"
- "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
auth.url: "https://github.com/org"
auth.githubToken: "gh_token12345"
controllerServiceAccount.name: "arc"
controllerServiceAccount.namespace: "arc-system"
release:
name: "test-name"
namespace: "test-namespace"
asserts:
- failedTemplate:
errorMessage: "scaleset.labels contains a label that is 256 characters or more, each label must be a non-empty string of less than 256 characters"

View File

@@ -10,12 +10,7 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -31,9 +26,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"
@@ -109,9 +104,9 @@ tests:
name: "test-name"
namespace: "test-namespace"
asserts:
- equal:
- notEqual:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
value: "bad"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"

View File

@@ -12,8 +12,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: apiVersion
@@ -159,12 +157,10 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
- notEqual:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
value: "bad"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"

View File

@@ -12,8 +12,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: apiVersion

View File

@@ -12,8 +12,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: apiVersion
@@ -144,12 +142,10 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
- notEqual:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
value: "bad"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"

View File

@@ -9,12 +9,7 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: metadata.labels["helm.sh/chart"]
value: "gha-rs-0.13.1"
- equal:
path: metadata.labels["app.kubernetes.io/name"]
value: "test-name"
@@ -30,9 +25,9 @@ tests:
- equal:
path: metadata.labels["app.kubernetes.io/part-of"]
value: "gha-rs"
- equal:
- notEqual:
path: metadata.labels["app.kubernetes.io/version"]
value: "0.13.1"
value: ""
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]
value: "test-name"
@@ -58,8 +53,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: metadata.labels["owner"]
@@ -83,8 +76,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: metadata.labels["actions.github.com/scale-set-name"]

View File

@@ -6,8 +6,6 @@ tests:
release:
name: "test-name"
namespace: "test-namespace"
chart:
appVersion: "0.13.1"
asserts:
- equal:
path: apiVersion

View File

@@ -0,0 +1,61 @@
package tests
import (
"os"
"path/filepath"
"strings"
"testing"
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/gruntwork-io/terratest/modules/helm"
"github.com/gruntwork-io/terratest/modules/k8s"
"github.com/gruntwork-io/terratest/modules/logger"
"github.com/gruntwork-io/terratest/modules/random"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"gopkg.in/yaml.v2"
)
type Chart struct {
Version string `yaml:"version"`
AppVersion string `yaml:"appVersion"`
}
func TestTemplate_RenderedAutoscalingRunnerSet_UsesChartMetadataLabels(t *testing.T) {
t.Parallel()
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-experimental")
require.NoError(t, err)
chartContent, err := os.ReadFile(filepath.Join(helmChartPath, "Chart.yaml"))
require.NoError(t, err)
chart := new(Chart)
err = yaml.Unmarshal(chartContent, chart)
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"scaleset.name": "test",
"auth.url": "https://github.com/actions",
"auth.githubToken": "gh_token12345",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnserset.yaml"})
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
assert.Equal(t, "gha-rs-"+chart.Version, autoscalingRunnerSet.Labels["helm.sh/chart"])
assert.Equal(t, chart.AppVersion, autoscalingRunnerSet.Labels["app.kubernetes.io/version"])
assert.Equal(t, "gha-rs-"+chart.Version, autoscalingRunnerSet.Spec.Template.Labels["helm.sh/chart"])
assert.Equal(t, chart.AppVersion, autoscalingRunnerSet.Spec.Template.Labels["app.kubernetes.io/version"])
}

View File

@@ -5,6 +5,10 @@ scaleset:
# Name of the scaleset
name: ""
runnerGroup: "default"
# Labels are optional list of strings that will be applied to the scaleset
# allowing scaleset to be selected by the listener based on the labels specified in the workflow.
# https://docs.github.com/en/actions/how-tos/manage-runners/self-hosted-runners/apply-labels
labels: []
## minRunners is the min number of idle runners. The target number of runners created will be
## calculated as a sum of minRunners and the number of jobs assigned to the scale set.
# minRunners: 0

View File

@@ -15,13 +15,13 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.13.1
version: 0.14.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
# It is recommended to use it with quotes.
appVersion: "0.13.1"
appVersion: "0.14.0"
home: https://github.com/actions/actions-runner-controller

View File

@@ -75,6 +75,18 @@ spec:
{{- with .Values.runnerScaleSetName }}
runnerScaleSetName: {{ . }}
{{- end }}
{{- if and .Values.scaleSetLabels (kindIs "slice" .Values.scaleSetLabels) }}
{{- range .Values.scaleSetLabels }}
{{- if empty . }}
{{- fail "scaleSetLabels contains an empty string, each label must be a non-empty string of less than 256 characters" }}
{{- end }}
{{- if ge (len .) 256 }}
{{- fail "scaleSetLabels contains a label that is 256 characters or more, each label must be a non-empty string of less than 256 characters" }}
{{- end }}
{{- end }}
runnerScaleSetLabels:
{{- toYaml .Values.scaleSetLabels | nindent 4 }}
{{- end }}
{{- if .Values.githubServerTLS }}
githubServerTLS:

View File

@@ -474,6 +474,37 @@ func TestTemplateRenderedAutoScalingRunnerSet_RunnerScaleSetName(t *testing.T) {
assert.Equal(t, "ghcr.io/actions/actions-runner:latest", ars.Spec.Template.Spec.Containers[0].Image)
}
func TestTemplateRenderedAutoScalingRunnerSet_ScaleSetLabels(t *testing.T) {
t.Parallel()
// Path to the helm chart we will test
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
require.NoError(t, err)
releaseName := "test-runners"
namespaceName := "test-" + strings.ToLower(random.UniqueId())
options := &helm.Options{
Logger: logger.Discard,
SetValues: map[string]string{
"githubConfigUrl": "https://github.com/actions",
"githubConfigSecret.github_token": "gh_token12345",
"scaleSetLabels[0]": "linux",
"scaleSetLabels[1]": "x64",
"controllerServiceAccount.name": "arc",
"controllerServiceAccount.namespace": "arc-system",
},
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
}
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
var ars v1alpha1.AutoscalingRunnerSet
helm.UnmarshalK8SYaml(t, output, &ars)
assert.Equal(t, []string{"linux", "x64"}, ars.Spec.RunnerScaleSetLabels)
}
func TestTemplateRenderedAutoScalingRunnerSet_ProvideMetadata(t *testing.T) {
t.Parallel()

View File

@@ -2,6 +2,8 @@
## ex: https://github.com/myorg/myrepo or https://github.com/myorg or https://github.com/enterprises/myenterprise
githubConfigUrl: ""
scaleSetLabels: []
## githubConfigSecret is the k8s secret information to use when authenticating via the GitHub API.
## You can choose to supply:
## A) a PAT token,

View File

@@ -110,10 +110,10 @@ func (w *Scaler) HandleJobStarted(ctx context.Context, jobInfo *scaleset.JobStar
patch, err := json.Marshal(
&v1alpha1.EphemeralRunner{
Status: v1alpha1.EphemeralRunnerStatus{
JobRequestId: jobInfo.RunnerRequestID,
JobRequestID: jobInfo.RunnerRequestID,
JobRepositoryName: fmt.Sprintf("%s/%s", jobInfo.OwnerName, jobInfo.RepositoryName),
JobID: jobInfo.JobID,
WorkflowRunId: jobInfo.WorkflowRunID,
WorkflowRunID: jobInfo.WorkflowRunID,
JobWorkflowRef: jobInfo.JobWorkflowRef,
JobDisplayName: jobInfo.JobDisplayName,
},

View File

@@ -8385,6 +8385,10 @@ spec:
type: object
runnerGroup:
type: string
runnerScaleSetLabels:
items:
type: string
type: array
runnerScaleSetName:
type: string
template:
@@ -16547,10 +16551,10 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
type: string
runningEphemeralRunners:
type: integer
state:
type: string
type: object
type: object
served: true

View File

@@ -8318,6 +8318,9 @@ spec:
type: integer
pendingEphemeralRunners:
type: integer
phase:
description: EphemeralRunnerSetPhase is the phase of the ephemeral runner set resource
type: string
runningEphemeralRunners:
type: integer
required:

View File

@@ -22,6 +22,7 @@ import (
"sort"
"strconv"
"strings"
"time"
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
"github.com/actions/actions-runner-controller/build"
@@ -46,6 +47,7 @@ const (
// This is used to determine if the values have changed, so we can
// re-create listener.
annotationKeyValuesHash = "actions.github.com/values-hash"
annotationKeyChangeHash = "actions.github.com/change-hash"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
runnerScaleSetIDAnnotationKey = "runner-scale-set-id"
@@ -104,32 +106,16 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
log.Info("Deleting resources")
done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log)
done, err := r.cleanUpResources(ctx, autoscalingRunnerSet, nil, log)
if err != nil {
log.Error(err, "Failed to clean up listener")
log.Error(err, "Failed to clean up resources during deletion")
return ctrl.Result{}, err
}
if !done {
// we are going to get notified anyway to proceed with rest of the
// cleanup. No need to re-queue
log.Info("Waiting for listener to be deleted")
return ctrl.Result{}, nil
}
done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to clean up ephemeral runner sets")
return ctrl.Result{}, err
}
if !done {
log.Info("Waiting for ephemeral runner sets to be deleted")
return ctrl.Result{}, nil
}
err = r.deleteRunnerScaleSet(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to delete runner scale set")
return ctrl.Result{}, err
log.Info("Waiting for resources to be cleaned up before removing finalizer")
return ctrl.Result{
RequeueAfter: 5 * time.Second,
}, nil
}
if err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log); err != nil {
@@ -176,34 +162,54 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
log.Info("Successfully added finalizer")
return ctrl.Result{}, nil
}
scaleSetIDRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
if !ok {
// Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
if targetHash := autoscalingRunnerSet.Hash(); autoscalingRunnerSet.Annotations[annotationKeyChangeHash] != targetHash {
if err := patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
if obj.Annotations == nil {
obj.Annotations = map[string]string{}
}
obj.Annotations[annotationKeyChangeHash] = targetHash
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set with change hash annotation")
return ctrl.Result{}, err
}
if err := r.updateStatus(ctx, autoscalingRunnerSet, nil, v1alpha1.AutoscalingRunnerSetPhasePending, log); err != nil {
log.Error(err, "Failed to update autoscaling runner set status to pending")
return ctrl.Result{}, err
}
}
if id, err := strconv.Atoi(scaleSetIDRaw); err != nil || id <= 0 {
log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.")
// something modified the scaleSetId. Try to create one
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
}
outdated := autoscalingRunnerSet.Status.Phase == v1alpha1.AutoscalingRunnerSetPhaseOutdated
// Make sure the runner group of the scale set is up to date
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
}
if !outdated {
scaleSetIDRaw, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIDAnnotationKey]
if !ok {
// Need to create a new runner scale set on Actions service
log.Info("Runner scale set id annotation does not exist. Creating a new runner scale set.")
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
}
// Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
if id, err := strconv.Atoi(scaleSetIDRaw); err != nil || id <= 0 {
log.Info("Runner scale set id annotation is not an id, or is <= 0. Creating a new runner scale set.")
// something modified the scaleSetId. Try to create one
return r.createRunnerScaleSet(ctx, autoscalingRunnerSet, log)
}
// Make sure the runner group of the scale set is up to date
currentRunnerGroupName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerGroup) > 0 && !strings.EqualFold(currentRunnerGroupName, autoscalingRunnerSet.Spec.RunnerGroup)) {
log.Info("AutoScalingRunnerSet runner group changed. Updating the runner scale set.")
return r.updateRunnerScaleSetRunnerGroup(ctx, autoscalingRunnerSet, log)
}
// Make sure the runner scale set name is up to date
currentRunnerScaleSetName, ok := autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerScaleSetName]
if !ok || (len(autoscalingRunnerSet.Spec.RunnerScaleSetName) > 0 && !strings.EqualFold(currentRunnerScaleSetName, autoscalingRunnerSet.Spec.RunnerScaleSetName)) {
log.Info("AutoScalingRunnerSet runner scale set name changed. Updating the runner scale set.")
return r.updateRunnerScaleSetName(ctx, autoscalingRunnerSet, log)
}
}
existingRunnerSets, err := r.listEphemeralRunnerSets(ctx, autoscalingRunnerSet)
@@ -213,7 +219,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
}
latestRunnerSet := existingRunnerSets.latest()
if latestRunnerSet == nil {
if latestRunnerSet == nil && !outdated {
log.Info("Latest runner set does not exist. Creating a new runner set.")
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
}
@@ -222,6 +228,8 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Annotations[annotationKeyRunnerSpecHash])
}
outdated = outdated || (latestRunnerSet != nil && latestRunnerSet.Status.Phase == v1alpha1.EphemeralRunnerSetPhaseOutdated)
// Make sure the AutoscalingListener is up and running in the controller namespace
listener := new(v1alpha1.AutoscalingListener)
listenerFound := true
@@ -235,6 +243,30 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
log.Info("AutoscalingListener does not exist.")
}
if outdated {
log.Info("Ephemeral runner set is outdated")
if autoscalingRunnerSet.Status.Phase != v1alpha1.AutoscalingRunnerSetPhaseOutdated {
if err := r.updateStatus(ctx, autoscalingRunnerSet, latestRunnerSet, v1alpha1.AutoscalingRunnerSetPhaseOutdated, log); err != nil {
log.Error(err, "Failed to update autoscaling runner set status to outdated")
return ctrl.Result{}, err
}
}
done, err := r.cleanUpResources(ctx, autoscalingRunnerSet, latestRunnerSet, log)
if err != nil {
log.Error(err, "Failed to clean up resources for outdated runner set")
return ctrl.Result{}, err
}
if done {
return ctrl.Result{}, nil
}
log.Info("Waiting for resources to be cleaned up for outdated runner set")
return ctrl.Result{
RequeueAfter: 5 * time.Second,
}, nil
}
// Our listener pod is out of date, so we need to delete it to get a new recreate.
listenerValuesHashChanged := listener.Annotations[annotationKeyValuesHash] != autoscalingRunnerSet.Annotations[annotationKeyValuesHash]
listenerSpecHashChanged := listener.Annotations[annotationKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash()
@@ -291,22 +323,71 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
return r.createAutoScalingListenerForRunnerSet(ctx, autoscalingRunnerSet, latestRunnerSet, log)
}
// Update the status of autoscaling runner set.
if latestRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners {
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Status.CurrentRunners = latestRunnerSet.Status.CurrentReplicas
obj.Status.PendingEphemeralRunners = latestRunnerSet.Status.PendingEphemeralRunners
obj.Status.RunningEphemeralRunners = latestRunnerSet.Status.RunningEphemeralRunners
obj.Status.FailedEphemeralRunners = latestRunnerSet.Status.FailedEphemeralRunners
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
return ctrl.Result{}, err
}
if err := r.updateStatus(ctx, autoscalingRunnerSet, latestRunnerSet, v1alpha1.AutoscalingRunnerSetPhaseRunning, log); err != nil {
log.Error(err, "Failed to update autoscaling runner set status to running")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
func (r *AutoscalingRunnerSetReconciler) cleanUpResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, latestRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
log.Info("Deleting the listener")
done, err := r.cleanupListener(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to clean up listener")
return false, err
}
if !done {
log.Info("Waiting for listener to be deleted")
return false, nil
}
log.Info("deleting ephemeral runner sets")
done, err = r.cleanupEphemeralRunnerSets(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to clean up ephemeral runner sets")
return false, err
}
if !done {
log.Info("Waiting for ephemeral runner sets to be deleted")
return false, nil
}
log.Info("deleting runner scale set")
err = r.deleteRunnerScaleSet(ctx, autoscalingRunnerSet, log)
if err != nil {
log.Error(err, "Failed to delete runner scale set")
return false, err
}
return true, nil
}
// Update the status of autoscaling runner set if necessary
func (r *AutoscalingRunnerSetReconciler) updateStatus(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, phase v1alpha1.AutoscalingRunnerSetPhase, log logr.Logger) error {
countDiff := ephemeralRunnerSet != nil && ephemeralRunnerSet.Status.CurrentReplicas != autoscalingRunnerSet.Status.CurrentRunners
phaseDiff := phase != autoscalingRunnerSet.Status.Phase
if countDiff || phaseDiff {
if err := patchSubResource(ctx, r.Status(), autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
obj.Status.Phase = phase
var ephemeralRunnerSetStatus v1alpha1.EphemeralRunnerSetStatus
if ephemeralRunnerSet != nil {
ephemeralRunnerSetStatus = ephemeralRunnerSet.Status
}
obj.Status.CurrentRunners = ephemeralRunnerSetStatus.CurrentReplicas
obj.Status.PendingEphemeralRunners = ephemeralRunnerSetStatus.PendingEphemeralRunners
obj.Status.RunningEphemeralRunners = ephemeralRunnerSetStatus.RunningEphemeralRunners
obj.Status.FailedEphemeralRunners = ephemeralRunnerSetStatus.FailedEphemeralRunners
}); err != nil {
log.Error(err, "Failed to update autoscaling runner set status with current runner count")
return err
}
}
return nil
}
// Prevents overprovisioning of runners.
// We reach this code path when runner scale set has been patched with a new runner spec but there are still running ephemeral runners.
// The safest approach is to wait for the running ephemeral runners to finish before creating a new runner set.
@@ -424,17 +505,35 @@ func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Contex
}
if runnerScaleSet == nil {
labels := []scaleset.Label{
{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
Type: "System",
},
}
if labelCount := len(autoscalingRunnerSet.Spec.RunnerScaleSetLabels); labelCount > 0 {
unique := make(map[string]bool, labelCount+1)
unique[autoscalingRunnerSet.Spec.RunnerScaleSetName] = true
for _, label := range autoscalingRunnerSet.Spec.RunnerScaleSetLabels {
if _, exists := unique[label]; exists {
logger.Info("Duplicate label found. Skipping adding duplicate label to runner scale set", "label", label)
continue
}
labels = append(labels, scaleset.Label{
Name: label,
Type: "System",
})
unique[label] = true
}
}
runnerScaleSet, err = actionsClient.CreateRunnerScaleSet(
ctx,
&scaleset.RunnerScaleSet{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
RunnerGroupID: runnerGroupID,
Labels: []scaleset.Label{
{
Name: autoscalingRunnerSet.Spec.RunnerScaleSetName,
Type: "System",
},
},
Labels: labels,
RunnerSetting: scaleset.RunnerSetting{
DisableUpdate: true,
},

View File

@@ -171,10 +171,18 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
return "", nil
}
return fmt.Sprintf("%s_%s", created.Annotations[runnerScaleSetIDAnnotationKey], created.Annotations[AnnotationKeyGitHubRunnerGroupName]), nil
return fmt.Sprintf(
"%s_%s",
created.Annotations[runnerScaleSetIDAnnotationKey],
created.Annotations[AnnotationKeyGitHubRunnerGroupName],
), nil
},
autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).Should(BeEquivalentTo("1_testgroup"), "RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation")
autoscalingRunnerSetTestInterval,
).Should(
BeEquivalentTo("1_testgroup"),
"RunnerScaleSet should be created/fetched and update the AutoScalingRunnerSet's annotation",
)
Eventually(
func() (string, error) {
@@ -548,7 +556,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
CurrentRunners: activeRunnerSet.Status.CurrentReplicas,
State: "",
Phase: v1alpha1.AutoscalingRunnerSetPhaseRunning,
PendingEphemeralRunners: activeRunnerSet.Status.PendingEphemeralRunners,
RunningEphemeralRunners: activeRunnerSet.Status.RunningEphemeralRunners,
FailedEphemeralRunners: activeRunnerSet.Status.FailedEphemeralRunners,
@@ -654,7 +662,7 @@ var _ = Describe("Test AutoScalingRunnerSet controller", Ordered, func() {
desiredStatus := v1alpha1.AutoscalingRunnerSetStatus{
CurrentRunners: statusUpdate.Status.CurrentReplicas,
State: "",
Phase: v1alpha1.AutoscalingRunnerSetPhaseRunning,
PendingEphemeralRunners: statusUpdate.Status.PendingEphemeralRunners,
RunningEphemeralRunners: statusUpdate.Status.RunningEphemeralRunners,
FailedEphemeralRunners: statusUpdate.Status.FailedEphemeralRunners,

View File

@@ -12,6 +12,9 @@ const (
const (
EnvVarRunnerJITConfig = "ACTIONS_RUNNER_INPUT_JITCONFIG"
EnvVarRunnerExtraUserAgent = "GITHUB_ACTIONS_RUNNER_EXTRA_USER_AGENT"
// Environment variable setting the exit code to return when the runner version is deprecated.
// This is used by the runner to signal to the controller that it should switch off the scaleset.
EnvVarRunnerDeprecatedExitCode = "ACTIONS_RUNNER_RETURN_VERSION_DEPRECATED_EXIT_CODE"
)
// Environment variable names used to set proxy variables for containers

View File

@@ -201,7 +201,7 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
}
if ephemeralRunner.Status.RunnerId == 0 {
if ephemeralRunner.Status.RunnerID == 0 {
log.Info("Updating ephemeral runner status with runnerId and runnerName")
runnerID, err := strconv.Atoi(string(secret.Data["runnerId"]))
if err != nil {
@@ -216,12 +216,12 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
runnerName := string(secret.Data["runnerName"])
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.RunnerId = runnerID
obj.Status.RunnerID = runnerID
obj.Status.RunnerName = runnerName
}); err != nil {
return ctrl.Result{}, fmt.Errorf("failed to update runner status for RunnerId/RunnerName/RunnerJITConfig: %w", err)
}
ephemeralRunner.Status.RunnerId = runnerID
ephemeralRunner.Status.RunnerID = runnerID
ephemeralRunner.Status.RunnerName = runnerName
log.Info("Updated ephemeral runner status with runnerId and runnerName")
}
@@ -324,7 +324,8 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
}
if cs.State.Terminated.ExitCode == 0 {
switch cs.State.Terminated.ExitCode {
case 0:
log.Info("Runner container has succeeded but pod is in failed phase; Assume successful exit")
// If the pod is in a failed state, that means that at least one container exited with non-zero exit code.
// If the runner container exits with 0, we assume that the runner has finished successfully.
@@ -335,6 +336,12 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
case 7:
if err := r.markAsOutdated(ctx, ephemeralRunner, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Outdated")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
}
log.Error(
@@ -357,6 +364,13 @@ func (r *EphemeralRunnerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
}
return ctrl.Result{}, nil
case cs.State.Terminated.ExitCode == 7: // outdated
if err := r.markAsOutdated(ctx, ephemeralRunner, log); err != nil {
log.Error(err, "Failed to set ephemeral runner to phase Outdated")
return ctrl.Result{}, err
}
return ctrl.Result{}, nil
case cs.State.Terminated.ExitCode != 0: // failed
log.Info("Ephemeral runner container failed", "exitCode", cs.State.Terminated.ExitCode)
return ctrl.Result{}, r.deleteEphemeralRunnerOrPod(ctx, ephemeralRunner, pod, log)
@@ -390,7 +404,7 @@ func (r *EphemeralRunnerReconciler) deleteEphemeralRunnerOrPod(ctx context.Conte
log.Error(err, "Failed to get actions client for removing the runner from the service")
return nil
}
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerID)); err != nil {
log.Error(err, "Failed to remove the runner from the service")
return nil
}
@@ -549,7 +563,7 @@ func (r *EphemeralRunnerReconciler) cleanupRunnerLinkedSecrets(ctx context.Conte
func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, errMessage string, reason string, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Failed")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = corev1.PodFailed
obj.Status.Phase = v1alpha1.EphemeralRunnerPhaseFailed
obj.Status.Reason = reason
obj.Status.Message = errMessage
}); err != nil {
@@ -565,6 +579,24 @@ func (r *EphemeralRunnerReconciler) markAsFailed(ctx context.Context, ephemeralR
return nil
}
func (r *EphemeralRunnerReconciler) markAsOutdated(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, log logr.Logger) error {
log.Info("Updating ephemeral runner status to Outdated")
if err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = v1alpha1.EphemeralRunnerPhaseOutdated
obj.Status.Reason = "Outdated"
obj.Status.Message = "Runner is deprecated"
}); err != nil {
return fmt.Errorf("failed to update ephemeral runner status Phase/Message: %w", err)
}
log.Info("Removing the runner from the service")
if err := r.deleteRunnerFromService(ctx, ephemeralRunner, log); err != nil {
return fmt.Errorf("failed to remove the runner from service: %w", err)
}
return nil
}
// deletePodAsFailed is responsible for deleting the pod and updating the .Status.Failures for tracking failure count.
// It should not be responsible for setting the status to Failed.
//
@@ -722,7 +754,7 @@ func (r *EphemeralRunnerReconciler) createPod(ctx context.Context, runner *v1alp
log.Info("Created ephemeral runner pod",
"runnerScaleSetId", runner.Spec.RunnerScaleSetID,
"runnerName", runner.Status.RunnerName,
"runnerId", runner.Status.RunnerId,
"runnerId", runner.Status.RunnerID,
"configUrl", runner.Spec.GitHubConfigUrl,
"podName", newPod.Name)
@@ -765,7 +797,8 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
}
}
phaseChanged := ephemeralRunner.Status.Phase != pod.Status.Phase
phase := v1alpha1.EphemeralRunnerPhase(pod.Status.Phase)
phaseChanged := ephemeralRunner.Status.Phase != phase
readyChanged := ready != ephemeralRunner.Status.Ready
if !phaseChanged && !readyChanged {
@@ -780,7 +813,7 @@ func (r *EphemeralRunnerReconciler) updateRunStatusFromPod(ctx context.Context,
"ready", ready,
)
err := patchSubResource(ctx, r.Status(), ephemeralRunner, func(obj *v1alpha1.EphemeralRunner) {
obj.Status.Phase = pod.Status.Phase
obj.Status.Phase = phase
obj.Status.Ready = ready
obj.Status.Reason = pod.Status.Reason
obj.Status.Message = pod.Status.Message
@@ -799,13 +832,13 @@ func (r *EphemeralRunnerReconciler) deleteRunnerFromService(ctx context.Context,
return fmt.Errorf("failed to get actions client for runner: %w", err)
}
log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerId)
err = client.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId))
log.Info("Removing runner from the service", "runnerId", ephemeralRunner.Status.RunnerID)
err = client.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerID))
if err != nil {
return fmt.Errorf("failed to remove runner from the service: %w", err)
}
log.Info("Removed runner from the service", "runnerId", ephemeralRunner.Status.RunnerId)
log.Info("Removed runner from the service", "runnerId", ephemeralRunner.Status.RunnerID)
return nil
}

View File

@@ -511,7 +511,7 @@ var _ = Describe("EphemeralRunner", func() {
updated := new(v1alpha1.EphemeralRunner)
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
err := k8sClient.Get(
ctx,
client.ObjectKey{Name: invalideEphemeralRunner.Name, Namespace: invalideEphemeralRunner.Namespace},
@@ -524,7 +524,7 @@ var _ = Describe("EphemeralRunner", func() {
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodFailed))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerPhaseFailed))
Expect(updated.Status.Reason).Should(Equal("InvalidPod"))
Expect(updated.Status.Message).Should(Equal("Failed to create the pod: pods \"invalid-ephemeral-runner\" is forbidden: no PriorityClass with name notexist was found"))
@@ -676,7 +676,7 @@ var _ = Describe("EphemeralRunner", func() {
if err != nil {
return 0, err
}
return updatedEphemeralRunner.Status.RunnerId, nil
return updatedEphemeralRunner.Status.RunnerID, nil
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
@@ -711,7 +711,7 @@ var _ = Describe("EphemeralRunner", func() {
var updated *v1alpha1.EphemeralRunner
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated = new(v1alpha1.EphemeralRunner)
err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated)
if err != nil {
@@ -833,10 +833,10 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil(), "failed to patch pod status")
Consistently(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return corev1.PodUnknown, err
return "Unknown", err
}
return updated.Status.Phase, nil
},
@@ -1059,7 +1059,7 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil())
Eventually(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return "", err
@@ -1068,7 +1068,7 @@ var _ = Describe("EphemeralRunner", func() {
},
ephemeralRunnerTimeout,
ephemeralRunnerInterval,
).Should(BeEquivalentTo(corev1.PodRunning))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerPhaseRunning))
// set phase to succeeded
pod.Status.Phase = corev1.PodSucceeded
@@ -1076,7 +1076,7 @@ var _ = Describe("EphemeralRunner", func() {
Expect(err).To(BeNil())
Consistently(
func() (corev1.PodPhase, error) {
func() (v1alpha1.EphemeralRunnerPhase, error) {
updated := new(v1alpha1.EphemeralRunner)
if err := k8sClient.Get(ctx, client.ObjectKey{Name: ephemeralRunner.Name, Namespace: ephemeralRunner.Namespace}, updated); err != nil {
return "", err
@@ -1084,7 +1084,7 @@ var _ = Describe("EphemeralRunner", func() {
return updated.Status.Phase, nil
},
ephemeralRunnerTimeout,
).Should(BeEquivalentTo(corev1.PodRunning))
).Should(BeEquivalentTo(v1alpha1.EphemeralRunnerPhaseRunning))
})
})

View File

@@ -125,6 +125,11 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
return ctrl.Result{}, nil
}
if ephemeralRunnerSet.Status.Phase == v1alpha1.EphemeralRunnerSetPhaseOutdated {
log.Info("ephemeral runner set is outdated, waiting for autoscaling runner set to remove it")
return ctrl.Result{}, nil
}
// Create proxy secret if not present
if ephemeralRunnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
proxySecret := new(corev1.Secret)
@@ -145,25 +150,25 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
// Find all EphemeralRunner with matching namespace and own by this EphemeralRunnerSet.
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
err := r.List(
if err := r.List(
ctx,
ephemeralRunnerList,
client.InNamespace(req.Namespace),
client.MatchingFields{resourceOwnerKey: req.Name},
)
if err != nil {
); err != nil {
log.Error(err, "Unable to list child ephemeral runners")
return ctrl.Result{}, err
}
ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
ephemeralRunnersByState := newEphemeralRunnersByStates(ephemeralRunnerList)
log.Info("Ephemeral runner counts",
"pending", len(ephemeralRunnerState.pending),
"running", len(ephemeralRunnerState.running),
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"deleting", len(ephemeralRunnerState.deleting),
"outdated", len(ephemeralRunnersByState.outdated),
"pending", len(ephemeralRunnersByState.pending),
"running", len(ephemeralRunnersByState.running),
"finished", len(ephemeralRunnersByState.finished),
"failed", len(ephemeralRunnersByState.failed),
"deleting", len(ephemeralRunnersByState.deleting),
)
if r.PublishMetrics {
@@ -183,16 +188,16 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
Organization: parsedURL.Organization,
Enterprise: parsedURL.Enterprise,
},
len(ephemeralRunnerState.pending),
len(ephemeralRunnerState.running),
len(ephemeralRunnerState.failed),
len(ephemeralRunnersByState.pending),
len(ephemeralRunnersByState.running),
len(ephemeralRunnersByState.failed),
)
}
total := ephemeralRunnerState.scaleTotal()
if ephemeralRunnerSet.Spec.PatchID == 0 || ephemeralRunnerSet.Spec.PatchID != ephemeralRunnerState.latestPatchID {
total := ephemeralRunnersByState.scaleTotal()
if ephemeralRunnerSet.Spec.PatchID == 0 || ephemeralRunnerSet.Spec.PatchID != ephemeralRunnersByState.latestPatchID {
defer func() {
if err := r.cleanupFinishedEphemeralRunners(ctx, ephemeralRunnerState.finished, log); err != nil {
if err := r.cleanupFinishedEphemeralRunners(ctx, ephemeralRunnersByState.finished, log); err != nil {
log.Error(err, "failed to cleanup finished ephemeral runners")
}
}()
@@ -217,8 +222,8 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
if err := r.deleteIdleEphemeralRunners(
ctx,
ephemeralRunnerSet,
ephemeralRunnerState.pending,
ephemeralRunnerState.running,
ephemeralRunnersByState.pending,
ephemeralRunnersByState.running,
count,
log,
); err != nil {
@@ -228,25 +233,41 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
}
}
return ctrl.Result{}, r.updateStatus(ctx, ephemeralRunnerSet, ephemeralRunnersByState, log)
}
func (r *EphemeralRunnerSetReconciler) updateStatus(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, state *ephemeralRunnersByState, log logr.Logger) error {
total := state.scaleTotal()
var phase v1alpha1.EphemeralRunnerSetPhase
switch {
case len(state.outdated) > 0:
phase = v1alpha1.EphemeralRunnerSetPhaseOutdated
case ephemeralRunnerSet.Status.Phase == "":
phase = v1alpha1.EphemeralRunnerSetPhaseRunning
default:
phase = ephemeralRunnerSet.Status.Phase
}
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: total,
PendingEphemeralRunners: len(ephemeralRunnerState.pending),
RunningEphemeralRunners: len(ephemeralRunnerState.running),
FailedEphemeralRunners: len(ephemeralRunnerState.failed),
Phase: phase,
PendingEphemeralRunners: len(state.pending),
RunningEphemeralRunners: len(state.running),
FailedEphemeralRunners: len(state.failed),
}
// Update the status if needed.
if ephemeralRunnerSet.Status != desiredStatus {
log.Info("Updating status with current runners count", "count", total)
ephemeralRunnerSet := ephemeralRunnerSet.DeepCopy()
ephemeralRunnerSet.Status.CurrentReplicas = -1 // ALWAYS update current replicas
if err := patchSubResource(ctx, r.Status(), ephemeralRunnerSet, func(obj *v1alpha1.EphemeralRunnerSet) {
obj.Status = desiredStatus
}); err != nil {
log.Error(err, "Failed to update status with current runners count")
return ctrl.Result{}, err
return err
}
}
return ctrl.Result{}, nil
return nil
}
func (r *EphemeralRunnerSetReconciler) cleanupFinishedEphemeralRunners(ctx context.Context, finishedEphemeralRunners []*v1alpha1.EphemeralRunner, log logr.Logger) error {
@@ -290,7 +311,6 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return false, fmt.Errorf("failed to list child ephemeral runners: %w", err)
}
log.Info("Actual Ephemeral runner counts", "count", len(ephemeralRunnerList.Items))
// only if there are no ephemeral runners left, return true
if len(ephemeralRunnerList.Items) == 0 {
err := r.cleanUpProxySecret(ctx, ephemeralRunnerSet, log)
@@ -301,7 +321,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
return true, nil
}
ephemeralRunnerState := newEphemeralRunnerState(ephemeralRunnerList)
ephemeralRunnerState := newEphemeralRunnersByStates(ephemeralRunnerList)
log.Info("Clean up runner counts",
"pending", len(ephemeralRunnerState.pending),
@@ -309,11 +329,12 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
"finished", len(ephemeralRunnerState.finished),
"failed", len(ephemeralRunnerState.failed),
"deleting", len(ephemeralRunnerState.deleting),
"outdated", len(ephemeralRunnerState.outdated),
)
log.Info("Cleanup finished or failed ephemeral runners")
log.Info("Cleanup terminated ephemeral runners")
var errs []error
for _, ephemeralRunner := range append(ephemeralRunnerState.finished, ephemeralRunnerState.failed...) {
for _, ephemeralRunner := range ephemeralRunnerState.terminated() {
log.Info("Deleting ephemeral runner", "name", ephemeralRunner.Name)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
errs = append(errs, err)
@@ -359,7 +380,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Conte
func (r *EphemeralRunnerSetReconciler) createEphemeralRunners(ctx context.Context, runnerSet *v1alpha1.EphemeralRunnerSet, count int, log logr.Logger) error {
// Track multiple errors at once and return the bundle.
errs := make([]error, 0)
for i := 0; i < count; i++ {
for i := range count {
ephemeralRunner := r.newEphemeralRunner(runnerSet)
if runnerSet.Spec.EphemeralRunnerSpec.Proxy != nil {
ephemeralRunner.Spec.ProxySecretRef = proxyEphemeralRunnerSetSecretName(runnerSet)
@@ -448,7 +469,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
for runners.next() {
ephemeralRunner := runners.object()
isDone := ephemeralRunner.IsDone()
if !isDone && ephemeralRunner.Status.RunnerId == 0 {
if !isDone && ephemeralRunner.Status.RunnerID == 0 {
log.Info("Skipping ephemeral runner since it is not registered yet", "name", ephemeralRunner.Name)
continue
}
@@ -457,7 +478,7 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
log.Info(
"Skipping ephemeral runner since it is running a job",
"name", ephemeralRunner.Name,
"workflowRunId", ephemeralRunner.Status.WorkflowRunId,
"workflowRunId", ephemeralRunner.Status.WorkflowRunID,
"jobId", ephemeralRunner.Status.JobID,
)
continue
@@ -482,21 +503,21 @@ func (r *EphemeralRunnerSetReconciler) deleteIdleEphemeralRunners(ctx context.Co
}
func (r *EphemeralRunnerSetReconciler) deleteEphemeralRunnerWithActionsClient(ctx context.Context, ephemeralRunner *v1alpha1.EphemeralRunner, actionsClient multiclient.Client, log logr.Logger) (bool, error) {
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerId)); err != nil {
if err := actionsClient.RemoveRunner(ctx, int64(ephemeralRunner.Status.RunnerID)); err != nil {
if errors.Is(err, scaleset.JobStillRunningError) {
log.Info("Runner is still running a job, skipping deletion", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
log.Info("Runner is still running a job, skipping deletion", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerID)
return false, nil
}
return false, err
}
log.Info("Deleting ephemeral runner after removing from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
log.Info("Deleting ephemeral runner after removing from the service", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerID)
if err := r.Delete(ctx, ephemeralRunner); err != nil && !kerrors.IsNotFound(err) {
return false, err
}
log.Info("Deleted ephemeral runner", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerId)
log.Info("Deleted ephemeral runner", "name", ephemeralRunner.Name, "runnerId", ephemeralRunner.Status.RunnerID)
return true, nil
}
@@ -553,18 +574,19 @@ func (s *ephemeralRunnerStepper) len() int {
return len(s.items)
}
type ephemeralRunnerState struct {
type ephemeralRunnersByState struct {
pending []*v1alpha1.EphemeralRunner
running []*v1alpha1.EphemeralRunner
finished []*v1alpha1.EphemeralRunner
failed []*v1alpha1.EphemeralRunner
deleting []*v1alpha1.EphemeralRunner
outdated []*v1alpha1.EphemeralRunner
latestPatchID int
}
func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) *ephemeralRunnerState {
var ephemeralRunnerState ephemeralRunnerState
func newEphemeralRunnersByStates(ephemeralRunnerList *v1alpha1.EphemeralRunnerList) *ephemeralRunnersByState {
var ephemeralRunnerState ephemeralRunnersByState
for i := range ephemeralRunnerList.Items {
r := &ephemeralRunnerList.Items[i]
@@ -578,12 +600,14 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
}
switch r.Status.Phase {
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerPhaseRunning:
ephemeralRunnerState.running = append(ephemeralRunnerState.running, r)
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerPhaseSucceeded:
ephemeralRunnerState.finished = append(ephemeralRunnerState.finished, r)
case corev1.PodFailed:
case v1alpha1.EphemeralRunnerPhaseFailed:
ephemeralRunnerState.failed = append(ephemeralRunnerState.failed, r)
case v1alpha1.EphemeralRunnerPhaseOutdated:
ephemeralRunnerState.outdated = append(ephemeralRunnerState.outdated, r)
default:
// Pending or no phase should be considered as pending.
//
@@ -595,6 +619,10 @@ func newEphemeralRunnerState(ephemeralRunnerList *v1alpha1.EphemeralRunnerList)
return &ephemeralRunnerState
}
func (s *ephemeralRunnerState) scaleTotal() int {
func (s *ephemeralRunnersByState) terminated() []*v1alpha1.EphemeralRunner {
return append(s.finished, append(s.failed, s.outdated...)...)
}
func (s *ephemeralRunnersByState) scaleTotal() int {
return len(s.pending) + len(s.running) + len(s.failed)
}

View File

@@ -159,10 +159,10 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Set status to simulate a configured EphemeralRunner
refetch := false
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
if runner.Status.RunnerID == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.RunnerId = i + 100
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
updatedRunner.Status.RunnerID = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
refetch = true
@@ -219,10 +219,10 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Set status to simulate a configured EphemeralRunner
refetch := false
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
if runner.Status.RunnerID == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.RunnerId = i + 100
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
updatedRunner.Status.RunnerID = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
refetch = true
@@ -382,12 +382,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -460,12 +460,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhasePending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -512,12 +512,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -547,7 +547,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
for _, runner := range runnerList.Items {
if runner.Status.Phase == corev1.PodSucceeded {
if runner.Status.Phase == v1alpha1.EphemeralRunnerPhaseSucceeded {
return fmt.Errorf("Runner %s is in Succeeded phase", runner.Name)
}
}
@@ -586,12 +586,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeEquivalentTo(2), "2 EphemeralRunner should be created")
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhasePending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -607,9 +607,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
succeeded := 0
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerPhaseSucceeded:
succeeded++
case corev1.PodPending:
case v1alpha1.EphemeralRunnerPhasePending:
pending++
}
}
@@ -649,7 +649,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
return fmt.Errorf("Expected 1 runner, got %d", len(runnerList.Items))
}
if runnerList.Items[0].Status.Phase != corev1.PodPending {
if runnerList.Items[0].Status.Phase != v1alpha1.EphemeralRunnerPhasePending {
return fmt.Errorf("Expected runner to be in Pending, got %s", runnerList.Items[0].Status.Phase)
}
@@ -661,7 +661,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Now, the ephemeral runner finally is done and we can scale down to 0
updatedRunner = runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -707,12 +707,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Put one runner in Pending and one in Running
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodPending
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhasePending
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -730,9 +730,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodPending:
case v1alpha1.EphemeralRunnerPhasePending:
pending++
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerPhaseRunning:
running++
}
@@ -777,12 +777,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Now, let's say ephemeral runner controller patched these ephemeral runners with the registration.
updatedRunner = runnerList.Items[0].DeepCopy()
updatedRunner.Status.RunnerId = 1
updatedRunner.Status.RunnerID = 1
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.RunnerId = 2
updatedRunner.Status.RunnerID = 2
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -830,12 +830,12 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
// Put one runner in Succeeded and one in Running
updatedRunner := runnerList.Items[0].DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
updatedRunner = runnerList.Items[1].DeepCopy()
updatedRunner.Status.Phase = corev1.PodRunning
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runnerList.Items[1]))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
@@ -854,9 +854,9 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
for _, runner := range runnerList.Items {
switch runner.Status.Phase {
case corev1.PodSucceeded:
case v1alpha1.EphemeralRunnerPhaseSucceeded:
succeeded++
case corev1.PodRunning:
case v1alpha1.EphemeralRunnerPhaseRunning:
running++
}
}
@@ -898,7 +898,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
}
for _, runner := range runnerList.Items {
if runner.Status.Phase == corev1.PodSucceeded {
if runner.Status.Phase == v1alpha1.EphemeralRunnerPhaseSucceeded {
return fmt.Errorf("Expected no runners in Succeeded phase, got one")
}
}
@@ -943,7 +943,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
var failedOriginal *v1alpha1.EphemeralRunner
var empty []*v1alpha1.EphemeralRunner
for _, runner := range runnerList.Items {
switch runner.Status.RunnerId {
switch runner.Status.RunnerID {
case 101:
pendingOriginal = runner.DeepCopy()
case 102:
@@ -962,8 +962,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
empty = empty[1:]
pending := pendingOriginal.DeepCopy()
pending.Status.RunnerId = 101
pending.Status.Phase = corev1.PodPending
pending.Status.RunnerID = 101
pending.Status.Phase = v1alpha1.EphemeralRunnerPhasePending
err = k8sClient.Status().Patch(ctx, pending, client.MergeFrom(pendingOriginal))
if err != nil {
@@ -976,8 +976,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
runningOriginal = empty[0]
empty = empty[1:]
running := runningOriginal.DeepCopy()
running.Status.RunnerId = 102
running.Status.Phase = corev1.PodRunning
running.Status.RunnerID = 102
running.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
err = k8sClient.Status().Patch(ctx, running, client.MergeFrom(runningOriginal))
if err != nil {
@@ -990,8 +990,8 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
failedOriginal = empty[0]
failed := pendingOriginal.DeepCopy()
failed.Status.RunnerId = 103
failed.Status.Phase = corev1.PodFailed
failed.Status.RunnerID = 103
failed.Status.Phase = v1alpha1.EphemeralRunnerPhaseFailed
err = k8sClient.Status().Patch(ctx, failed, client.MergeFrom(failedOriginal))
if err != nil {
@@ -1006,6 +1006,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
).Should(BeTrue(), "Failed to eventually update to one pending, one running and one failed")
desiredStatus := v1alpha1.EphemeralRunnerSetStatus{
Phase: v1alpha1.EphemeralRunnerSetPhaseRunning,
CurrentReplicas: 3,
PendingEphemeralRunners: 1,
RunningEphemeralRunners: 1,
@@ -1052,6 +1053,7 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
PendingEphemeralRunners: 0,
RunningEphemeralRunners: 0,
FailedEphemeralRunners: 1,
Phase: v1alpha1.EphemeralRunnerSetPhaseRunning,
}
Eventually(
@@ -1070,7 +1072,13 @@ var _ = Describe("Test EphemeralRunnerSet controller", func() {
err = k8sClient.Delete(ctx, &runnerList.Items[0])
Expect(err).To(BeNil(), "Failed to delete failed ephemeral runner")
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{} // empty
desiredStatus = v1alpha1.EphemeralRunnerSetStatus{
CurrentReplicas: 0,
PendingEphemeralRunners: 0,
RunningEphemeralRunners: 0,
FailedEphemeralRunners: 0,
Phase: v1alpha1.EphemeralRunnerSetPhaseRunning,
}
Eventually(
func() (v1alpha1.EphemeralRunnerSetStatus, error) {
updated := new(v1alpha1.EphemeralRunnerSet)
@@ -1222,10 +1230,10 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
// Set status to simulate a configured EphemeralRunner
refetch := false
for i, runner := range runnerList.Items {
if runner.Status.RunnerId == 0 {
if runner.Status.RunnerID == 0 {
updatedRunner := runner.DeepCopy()
updatedRunner.Status.Phase = corev1.PodSucceeded
updatedRunner.Status.RunnerId = i + 100
updatedRunner.Status.Phase = v1alpha1.EphemeralRunnerPhaseSucceeded
updatedRunner.Status.RunnerID = i + 100
err = k8sClient.Status().Patch(ctx, updatedRunner, client.MergeFrom(&runner))
Expect(err).NotTo(HaveOccurred(), "failed to update EphemeralRunner")
refetch = true
@@ -1355,8 +1363,8 @@ var _ = Describe("Test EphemeralRunnerSet controller with proxy settings", func(
).Should(BeEquivalentTo(1), "failed to create ephemeral runner")
runner := runnerList.Items[0].DeepCopy()
runner.Status.Phase = corev1.PodRunning
runner.Status.RunnerId = 100
runner.Status.Phase = v1alpha1.EphemeralRunnerPhaseRunning
runner.Status.RunnerID = 100
err = k8sClient.Status().Patch(ctx, runner, client.MergeFrom(&runnerList.Items[0]))
Expect(err).NotTo(HaveOccurred(), "failed to update ephemeral runner status")

View File

@@ -716,6 +716,10 @@ func (b *ResourceBuilder) newEphemeralRunnerPod(runner *v1alpha1.EphemeralRunner
Name: EnvVarRunnerExtraUserAgent,
Value: fmt.Sprintf("actions-runner-controller/%s", build.Version),
},
corev1.EnvVar{
Name: EnvVarRunnerDeprecatedExitCode,
Value: "1",
},
)
c.Env = append(c.Env, envs...)
}

View File

@@ -43,6 +43,24 @@ You can follow [this troubleshooting guide](https://docs.github.com/en/actions/h
## Changelog
### 0.14.0
1. Fix ActivityId typo in error strings [#4359](https://github.com/actions/actions-runner-controller/pull/4359)
1. Include the HTTP status code in jit error [#4361](https://github.com/actions/actions-runner-controller/pull/4361)
1. Fix tests and generate mocks [#4384](https://github.com/actions/actions-runner-controller/pull/4384)
1. Switch client to scaleset library for the listener and update mocks [#4383](https://github.com/actions/actions-runner-controller/pull/4383)
1. feat: add default linux nodeSelector to listener pod [#4377](https://github.com/actions/actions-runner-controller/pull/4377)
1. Bump Go version [#4398](https://github.com/actions/actions-runner-controller/pull/4398)
1. Allow users to apply labels and annotations to internal resources [#4400](https://github.com/actions/actions-runner-controller/pull/4400)
1. Moving to scaleset client for the controller [#4390](https://github.com/actions/actions-runner-controller/pull/4390)
1. Introduce experimental chart release [#4373](https://github.com/actions/actions-runner-controller/pull/4373)
1. Manually bump dependencies since it needs fixes related to the controller runtime API [#4406](https://github.com/actions/actions-runner-controller/pull/4406)
1. Regenerate manifests for experimental charts [#4407](https://github.com/actions/actions-runner-controller/pull/4407)
1. Remove actions client [#4405](https://github.com/actions/actions-runner-controller/pull/4405)
1. Add chart-level API to customize internal resources [#4410](https://github.com/actions/actions-runner-controller/pull/4410)
1. Shutdown the scaleset when runner is deprecated [#4404](https://github.com/actions/actions-runner-controller/pull/4404)
1. Add multi-label support to scalesets [#4408](https://github.com/actions/actions-runner-controller/pull/4408)
### 0.13.1
1. Make restart pod more flexible to different failure scenarios [#4340](https://github.com/actions/actions-runner-controller/pull/4340)

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -22,9 +22,6 @@ function install_arc() {
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -22,9 +22,6 @@ function install_arc() {
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -0,0 +1,113 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
VERSION="$(chart_version "${ROOT_DIR}/charts/gha-runner-scale-set-controller-experimental/Chart.yaml")" || exit 1
export VERSION
SCALE_SET_NAME="custom-label-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
SCALE_SET_LABEL="custom-$(date +'%s')${RANDOM}"
WORKFLOW_FILE="arc-custom-label.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set controller.manager.container.image="${IMAGE_NAME}:${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller-experimental" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME} with label ${SCALE_SET_LABEL}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set controllerServiceAccount.name="${ARC_NAME}-gha-rs-controller" \
--set controllerServiceAccount.namespace="${ARC_NAMESPACE}" \
--set auth.url="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set auth.githubToken="${GITHUB_TOKEN}" \
--set scaleset.labels[0]="${SCALE_SET_LABEL}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-experimental" \
--version="${VERSION}"
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function verify_scale_set_label() {
local actual_label
actual_label="$(kubectl get autoscalingrunnersets.actions.github.com -n "${SCALE_SET_NAMESPACE}" -l app.kubernetes.io/instance="${SCALE_SET_NAME}" -o jsonpath='{.items[0].spec.runnerScaleSetLabels[0]}')"
if [[ "${actual_label}" != "${SCALE_SET_LABEL}" ]]; then
echo "Expected scale set label '${SCALE_SET_LABEL}', got '${actual_label}'" >&2
return 1
fi
}
function run_custom_label_workflow() {
local repo="${TARGET_ORG}/${TARGET_REPO}"
local queue_time
queue_time="$(date -u +%FT%TZ)"
gh workflow run -R "${repo}" "${WORKFLOW_FILE}" \
-f scaleset-label="${SCALE_SET_LABEL}" || return 1
local count=0
local run_id=
while true; do
if [[ "${count}" -ge 12 ]]; then
echo "Timeout waiting for custom label workflow to start" >&2
return 1
fi
run_id="$(gh run list -R "${repo}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json databaseId --jq '.[0].databaseId' | head -n1)"
if [[ -n "${run_id}" ]]; then
break
fi
sleep 5
count=$((count + 1))
done
gh run watch "${run_id}" -R "${repo}" --exit-status
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
verify_scale_set_label || failed+=("verify_scale_set_label")
run_custom_label_workflow || failed+=("run_custom_label_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -0,0 +1,112 @@
#!/bin/bash
set -euo pipefail
DIR="$(realpath "$(dirname "${BASH_SOURCE[0]}")")"
ROOT_DIR="$(realpath "${DIR}/../..")"
source "${DIR}/helper.sh"
export VERSION="$(chart_version "${ROOT_DIR}/charts/gha-runner-scale-set-controller/Chart.yaml")"
SCALE_SET_NAME="custom-label-$(date +'%M%S')$(((RANDOM + 100) % 100 + 1))"
SCALE_SET_NAMESPACE="arc-runners"
SCALE_SET_LABEL="custom-$(date +'%s')${RANDOM}"
WORKFLOW_FILE="arc-custom-label.yaml"
ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \
--set image.repository="${IMAGE_NAME}" \
--set image.tag="${IMAGE_TAG}" \
"${ROOT_DIR}/charts/gha-runner-scale-set-controller" \
--debug
if ! NAME="${ARC_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_arc; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function install_scale_set() {
echo "Installing scale set ${SCALE_SET_NAMESPACE}/${SCALE_SET_NAME} with label ${SCALE_SET_LABEL}"
helm install "${SCALE_SET_NAME}" \
--namespace "${SCALE_SET_NAMESPACE}" \
--create-namespace \
--set githubConfigUrl="https://github.com/${TARGET_ORG}/${TARGET_REPO}" \
--set githubConfigSecret.github_token="${GITHUB_TOKEN}" \
--set scaleSetLabels[0]="${SCALE_SET_LABEL}" \
"${ROOT_DIR}/charts/gha-runner-scale-set" \
--version="${VERSION}" \
--debug
if ! NAME="${SCALE_SET_NAME}" NAMESPACE="${ARC_NAMESPACE}" wait_for_scale_set; then
NAMESPACE="${ARC_NAMESPACE}" log_arc
return 1
fi
}
function verify_scale_set_label() {
local actual_label
actual_label="$(kubectl get autoscalingrunnersets.actions.github.com -n "${SCALE_SET_NAMESPACE}" -l app.kubernetes.io/instance="${SCALE_SET_NAME}" -o jsonpath='{.items[0].spec.runnerScaleSetLabels[0]}')"
if [[ "${actual_label}" != "${SCALE_SET_LABEL}" ]]; then
echo "Expected scale set label '${SCALE_SET_LABEL}', got '${actual_label}'" >&2
return 1
fi
}
function run_custom_label_workflow() {
local repo="${TARGET_ORG}/${TARGET_REPO}"
local queue_time
queue_time="$(date -u +%FT%TZ)"
gh workflow run -R "${repo}" "${WORKFLOW_FILE}" \
-f scaleset-label="${SCALE_SET_LABEL}" || return 1
local count=0
local run_id=
while true; do
if [[ "${count}" -ge 12 ]]; then
echo "Timeout waiting for custom label workflow to start" >&2
return 1
fi
run_id="$(gh run list -R "${repo}" --workflow "${WORKFLOW_FILE}" --created ">${queue_time}" --json databaseId --jq '.[0].databaseId' | head -n1)"
if [[ -n "${run_id}" ]]; then
break
fi
sleep 5
count=$((count + 1))
done
gh run watch "${run_id}" -R "${repo}" --exit-status
}
function main() {
local failed=()
build_image
create_cluster
install_arc
install_scale_set
verify_scale_set_label || failed+=("verify_scale_set_label")
run_custom_label_workflow || failed+=("run_custom_label_workflow")
INSTALLATION_NAME="${SCALE_SET_NAME}" NAMESPACE="${SCALE_SET_NAMESPACE}" cleanup_scale_set || failed+=("cleanup_scale_set")
NAMESPACE="${ARC_NAMESPACE}" log_arc || failed+=("log_arc")
delete_cluster
print_results "${failed[@]}"
}
main

View File

@@ -18,9 +18,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -22,9 +22,6 @@ function install_arc() {
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -22,9 +22,6 @@ function install_arc() {
return 1
}
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="${SCALE_SET_NAMESPACE}"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -17,9 +17,6 @@ ARC_NAME="arc"
ARC_NAMESPACE="${SCALE_SET_NAMESPACE}"
function install_arc() {
echo "Creating namespace ${ARC_NAMESPACE}"
kubectl create namespace "${SCALE_SET_NAMESPACE}"
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \

View File

@@ -21,7 +21,6 @@ ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \

View File

@@ -21,7 +21,6 @@ ARC_NAMESPACE="arc-systems"
function install_arc() {
echo "Installing ARC"
helm install "${ARC_NAME}" \
--namespace "${ARC_NAMESPACE}" \
--create-namespace \