mirror of
https://github.com/actions/actions-runner-controller.git
synced 2025-12-10 11:41:27 +00:00
Compare commits
11 Commits
actions-ru
...
gha-runner
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e80bc21fa5 | ||
|
|
56754094ea | ||
|
|
8fa4520376 | ||
|
|
a804bf8b00 | ||
|
|
5dea6db412 | ||
|
|
2a0b770a63 | ||
|
|
a7ef871248 | ||
|
|
e45e4c53f1 | ||
|
|
a608abd124 | ||
|
|
02d9add322 | ||
|
|
f5ac134787 |
116
.github/workflows/e2e-test-linux-vm.yaml
vendored
116
.github/workflows/e2e-test-linux-vm.yaml
vendored
@@ -565,3 +565,119 @@ jobs:
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
self-signed-ca-setup:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
|
||||
env:
|
||||
WORKFLOW_FILE: "arc-test-workflow.yaml"
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{github.head_ref}}
|
||||
|
||||
- uses: ./.github/actions/setup-arc-e2e
|
||||
id: setup
|
||||
with:
|
||||
app-id: ${{secrets.E2E_TESTS_ACCESS_APP_ID}}
|
||||
app-pk: ${{secrets.E2E_TESTS_ACCESS_PK}}
|
||||
image-name: ${{env.IMAGE_NAME}}
|
||||
image-tag: ${{env.IMAGE_VERSION}}
|
||||
target-org: ${{env.TARGET_ORG}}
|
||||
|
||||
- name: Install gha-runner-scale-set-controller
|
||||
id: install_arc_controller
|
||||
run: |
|
||||
helm install arc \
|
||||
--namespace "arc-systems" \
|
||||
--create-namespace \
|
||||
--set image.repository=${{ env.IMAGE_NAME }} \
|
||||
--set image.tag=${{ env.IMAGE_VERSION }} \
|
||||
./charts/gha-runner-scale-set-controller \
|
||||
--debug
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
|
||||
kubectl get pod -n arc-systems
|
||||
kubectl describe deployment arc-gha-runner-scale-set-controller -n arc-systems
|
||||
|
||||
- name: Install gha-runner-scale-set
|
||||
id: install_arc
|
||||
run: |
|
||||
docker run -d \
|
||||
--rm \
|
||||
--name mitmproxy \
|
||||
--publish 8080:8080 \
|
||||
-v ${{ github.workspace }}/mitmproxy:/home/mitmproxy/.mitmproxy \
|
||||
mitmproxy/mitmproxy:latest \
|
||||
mitmdump
|
||||
count=0
|
||||
while true; do
|
||||
if [ -f "${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem" ]; then
|
||||
echo "CA cert generated"
|
||||
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for mitmproxy generate its CA cert"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
|
||||
kubectl create namespace arc-runners
|
||||
kubectl -n arc-runners create configmap ca-cert --from-file="${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt"
|
||||
kubectl -n arc-runners get configmap ca-cert -o yaml
|
||||
ARC_NAME=${{github.job}}-$(date +'%M%S')$((($RANDOM + 100) % 100 + 1))
|
||||
helm install "$ARC_NAME" \
|
||||
--namespace "arc-runners" \
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="https://github.com/${{ env.TARGET_ORG }}/${{env.TARGET_REPO}}" \
|
||||
--set githubConfigSecret.github_token="${{ steps.setup.outputs.token }}" \
|
||||
--set proxy.https.url="http://host.minikube.internal:8080" \
|
||||
--set "proxy.noProxy[0]=10.96.0.1:443" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.name=ca-cert" \
|
||||
--set "githubServerTLS.certificateFrom.configMapKeyRef.key=mitmproxy-ca-cert.crt" \
|
||||
--set "githubServerTLS.runnerMountPath=/usr/local/share/ca-certificates/" \
|
||||
./charts/gha-runner-scale-set \
|
||||
--debug
|
||||
echo "ARC_NAME=$ARC_NAME" >> $GITHUB_OUTPUT
|
||||
count=0
|
||||
while true; do
|
||||
POD_NAME=$(kubectl get pods -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME -o name)
|
||||
if [ -n "$POD_NAME" ]; then
|
||||
echo "Pod found: $POD_NAME"
|
||||
break
|
||||
fi
|
||||
if [ "$count" -ge 10 ]; then
|
||||
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
|
||||
exit 1
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
|
||||
kubectl get pod -n arc-systems
|
||||
|
||||
- name: Test ARC E2E
|
||||
uses: ./.github/actions/execute-assert-arc-e2e
|
||||
timeout-minutes: 10
|
||||
with:
|
||||
auth-token: ${{ steps.setup.outputs.token }}
|
||||
repo-owner: ${{ env.TARGET_ORG }}
|
||||
repo-name: ${{env.TARGET_REPO}}
|
||||
workflow-file: ${{env.WORKFLOW_FILE}}
|
||||
arc-name: ${{steps.install_arc.outputs.ARC_NAME}}
|
||||
arc-namespace: "arc-runners"
|
||||
arc-controller-namespace: "arc-systems"
|
||||
|
||||
24
.github/workflows/publish-chart.yaml
vendored
24
.github/workflows/publish-chart.yaml
vendored
@@ -20,7 +20,7 @@ env:
|
||||
HELM_VERSION: v3.8.0
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
lint-chart:
|
||||
@@ -173,10 +173,28 @@ jobs:
|
||||
--pages-branch 'gh-pages' \
|
||||
--pages-index-path 'index.yaml'
|
||||
|
||||
# This step is required to not throw away changes made to the index.yaml on every new chart release.
|
||||
#
|
||||
# We update the index.yaml in the actions-runner-controller.github.io repo
|
||||
# by appending the new chart version to the index.yaml saved in actions-runner-controller repo
|
||||
# and copying and commiting the updated index.yaml to the github.io one.
|
||||
# See below for more context:
|
||||
# - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2
|
||||
# - https://github.com/actions/actions-runner-controller/pull/2452
|
||||
- name: Commit and push to actions/actions-runner-controller
|
||||
run: |
|
||||
git checkout gh-pages
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
git add .
|
||||
git commit -m "Update index.yaml"
|
||||
git push
|
||||
working-directory: ${{ github.workspace }}
|
||||
|
||||
# Chart Release was never intended to publish to a different repo
|
||||
# this workaround is intended to move the index.yaml to the target repo
|
||||
# where the github pages are hosted
|
||||
- name: Checkout pages repository
|
||||
- name: Checkout target repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
|
||||
@@ -188,7 +206,7 @@ jobs:
|
||||
run: |
|
||||
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
|
||||
|
||||
- name: Commit and push
|
||||
- name: Commit and push to target repository
|
||||
run: |
|
||||
git config user.name "$GITHUB_ACTOR"
|
||||
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
|
||||
|
||||
4
.github/workflows/validate-gha-chart.yaml
vendored
4
.github/workflows/validate-gha-chart.yaml
vendored
@@ -71,7 +71,7 @@ jobs:
|
||||
git clone https://github.com/helm/chart-testing
|
||||
cd chart-testing
|
||||
unset CT_CONFIG_DIR
|
||||
goreleaser build --clean --skip-validate
|
||||
goreleaser build --clean --skip-validate
|
||||
./dist/chart-testing_linux_amd64_v1/ct version
|
||||
echo 'Adding ct directory to PATH...'
|
||||
echo "$RUNNER_TEMP/chart-testing/dist/chart-testing_linux_amd64_v1" >> "$GITHUB_PATH"
|
||||
@@ -107,7 +107,7 @@ jobs:
|
||||
load: true
|
||||
build-args: |
|
||||
DOCKER_IMAGE_NAME=test-arc
|
||||
VERSION=dev
|
||||
VERSION=dev
|
||||
tags: |
|
||||
test-arc:dev
|
||||
cache-from: type=gha
|
||||
|
||||
@@ -52,6 +52,9 @@ type AutoscalingListenerSpec struct {
|
||||
// Required
|
||||
Image string `json:"image,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullPolicy corev1.PullPolicy `json:"imagePullPolicy,omitempty"`
|
||||
|
||||
// Required
|
||||
ImagePullSecrets []corev1.LocalObjectReference `json:"imagePullSecrets,omitempty"`
|
||||
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.4.0"
|
||||
|
||||
home: https://github.com/actions/actions-runner-controller
|
||||
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -68,14 +68,11 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: "{{ .Values.image.pullPolicy | default "IfNotPresent" }}"
|
||||
{{- with .Values.env }}
|
||||
{{- if kindIs "slice" .Values.env }}
|
||||
{{- toYaml .Values.env | nindent 8 }}
|
||||
{{- else }}
|
||||
{{- range $key, $val := .Values.env }}
|
||||
- name: {{ $key }}
|
||||
value: {{ $val | quote }}
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" . }}
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- with .Values.resources }}
|
||||
|
||||
@@ -133,4 +133,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -81,4 +81,4 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -114,4 +114,5 @@ rules:
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
{{- end }}
|
||||
- patch
|
||||
{{- end }}
|
||||
|
||||
@@ -349,13 +349,16 @@ func TestTemplate_ControllerDeployment_Defaults(t *testing.T) {
|
||||
assert.Equal(t, "--auto-scaling-runner-set-only", deployment.Spec.Template.Spec.Containers[0].Args[0])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -390,6 +393,8 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
"imagePullSecrets[0].name": "dockerhub",
|
||||
"nameOverride": "gha-runner-scale-set-controller-override",
|
||||
"fullnameOverride": "gha-runner-scale-set-controller-fullname-override",
|
||||
"env[0].name": "ENV_VAR_NAME_1",
|
||||
"env[0].value": "ENV_VAR_VALUE_1",
|
||||
"serviceAccount.name": "gha-runner-scale-set-controller-sa",
|
||||
"podAnnotations.foo": "bar",
|
||||
"podSecurityContext.fsGroup": "1000",
|
||||
@@ -432,6 +437,9 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
assert.Equal(t, "bar", deployment.Spec.Template.Annotations["foo"])
|
||||
assert.Equal(t, "manager", deployment.Spec.Template.Annotations["kubectl.kubernetes.io/default-container"])
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.ImagePullSecrets, 1)
|
||||
assert.Equal(t, "dockerhub", deployment.Spec.Template.Spec.ImagePullSecrets[0].Name)
|
||||
assert.Equal(t, "gha-runner-scale-set-controller-sa", deployment.Spec.Template.Spec.ServiceAccountName)
|
||||
@@ -467,10 +475,16 @@ func TestTemplate_ControllerDeployment_Customize(t *testing.T) {
|
||||
assert.Equal(t, "--auto-scaler-image-pull-secrets=dockerhub", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 4)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "Always", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
@@ -690,13 +704,16 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Equal(t, "--log-level=debug", deployment.Spec.Template.Spec.Containers[0].Args[1])
|
||||
assert.Equal(t, "--watch-single-namespace=demo", deployment.Spec.Template.Spec.Containers[0].Args[2])
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 2)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 3)
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_CONTAINER_IMAGE", deployment.Spec.Template.Spec.Containers[0].Env[0].Name)
|
||||
assert.Equal(t, managerImage, deployment.Spec.Template.Spec.Containers[0].Env[0].Value)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_POD_NAMESPACE", deployment.Spec.Template.Spec.Containers[0].Env[1].Name)
|
||||
assert.Equal(t, "metadata.namespace", deployment.Spec.Template.Spec.Containers[0].Env[1].ValueFrom.FieldRef.FieldPath)
|
||||
|
||||
assert.Equal(t, "CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY", deployment.Spec.Template.Spec.Containers[0].Env[2].Name)
|
||||
assert.Equal(t, "IfNotPresent", deployment.Spec.Template.Spec.Containers[0].Env[2].Value) // default value. Needs to align with controllers/actions.github.com/resourcebuilder.go
|
||||
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Resources)
|
||||
assert.Nil(t, deployment.Spec.Template.Spec.Containers[0].SecurityContext)
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
|
||||
@@ -704,6 +721,52 @@ func TestTemplate_ControllerDeployment_WatchSingleNamespace(t *testing.T) {
|
||||
assert.Equal(t, "/tmp", deployment.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
|
||||
}
|
||||
|
||||
func TestTemplate_ControllerContainerEnvironmentVariables(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set-controller")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-arc"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"env[0].Name": "ENV_VAR_NAME_1",
|
||||
"env[0].Value": "ENV_VAR_VALUE_1",
|
||||
"env[1].Name": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Key": "ENV_VAR_NAME_2",
|
||||
"env[1].ValueFrom.SecretKeyRef.Name": "secret-name",
|
||||
"env[1].ValueFrom.SecretKeyRef.Optional": "true",
|
||||
"env[2].Name": "ENV_VAR_NAME_3",
|
||||
"env[2].Value": "",
|
||||
"env[3].Name": "ENV_VAR_NAME_4",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/deployment.yaml"})
|
||||
|
||||
var deployment appsv1.Deployment
|
||||
helm.UnmarshalK8SYaml(t, output, &deployment)
|
||||
|
||||
assert.Equal(t, namespaceName, deployment.Namespace)
|
||||
assert.Equal(t, "test-arc-gha-runner-scale-set-controller", deployment.Name)
|
||||
|
||||
assert.Len(t, deployment.Spec.Template.Spec.Containers[0].Env, 7)
|
||||
assert.Equal(t, "ENV_VAR_NAME_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Name)
|
||||
assert.Equal(t, "ENV_VAR_VALUE_1", deployment.Spec.Template.Spec.Containers[0].Env[3].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].Name)
|
||||
assert.Equal(t, "secret-name", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Name)
|
||||
assert.Equal(t, "ENV_VAR_NAME_2", deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Key)
|
||||
assert.True(t, *deployment.Spec.Template.Spec.Containers[0].Env[4].ValueFrom.SecretKeyRef.Optional)
|
||||
assert.Equal(t, "ENV_VAR_NAME_3", deployment.Spec.Template.Spec.Containers[0].Env[5].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[5].Value)
|
||||
assert.Equal(t, "ENV_VAR_NAME_4", deployment.Spec.Template.Spec.Containers[0].Env[6].Name)
|
||||
assert.Empty(t, deployment.Spec.Template.Spec.Containers[0].Env[6].ValueFrom)
|
||||
}
|
||||
|
||||
func TestTemplate_WatchSingleNamespace_NotCreateManagerClusterRole(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@@ -18,6 +18,17 @@ imagePullSecrets: []
|
||||
nameOverride: ""
|
||||
fullnameOverride: ""
|
||||
|
||||
env:
|
||||
## Define environment variables for the controller pod
|
||||
# - name: "ENV_VAR_NAME_1"
|
||||
# value: "ENV_VAR_VALUE_1"
|
||||
# - name: "ENV_VAR_NAME_2"
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# key: ENV_VAR_NAME_2
|
||||
# name: secret-name
|
||||
# optional: true
|
||||
|
||||
serviceAccount:
|
||||
# Specifies whether a service account should be created for running the controller pod
|
||||
create: true
|
||||
@@ -31,27 +42,27 @@ serviceAccount:
|
||||
podAnnotations: {}
|
||||
|
||||
podSecurityContext: {}
|
||||
# fsGroup: 2000
|
||||
# fsGroup: 2000
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
|
||||
resources: {}
|
||||
# We usually recommend not to specify default resources and to leave this as a conscious
|
||||
# choice for the user. This also increases chances charts run on environments with little
|
||||
# resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
## We usually recommend not to specify default resources and to leave this as a conscious
|
||||
## choice for the user. This also increases chances charts run on environments with little
|
||||
## resources, such as Minikube. If you do want to specify resources, uncomment the following
|
||||
## lines, adjust them as necessary, and remove the curly braces after 'resources:'.
|
||||
# limits:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
# requests:
|
||||
# cpu: 100m
|
||||
# memory: 128Mi
|
||||
|
||||
nodeSelector: {}
|
||||
|
||||
@@ -69,6 +80,6 @@ flags:
|
||||
# Defaults to "debug".
|
||||
logLevel: "debug"
|
||||
|
||||
# Restricts the controller to only watch resources in the desired namespace.
|
||||
# Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
## Restricts the controller to only watch resources in the desired namespace.
|
||||
## Defaults to watch all namespaces when unset.
|
||||
# watchSingleNamespace: ""
|
||||
|
||||
@@ -15,13 +15,13 @@ type: application
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.3.0
|
||||
version: 0.4.0
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "0.3.0"
|
||||
appVersion: "0.4.0"
|
||||
|
||||
home: https://github.com/actions/dev-arc
|
||||
|
||||
|
||||
@@ -11,17 +11,9 @@ We truncate at 63 chars because some Kubernetes name fields are limited to this
|
||||
If release name contains chart name it will be used as a full name.
|
||||
*/}}
|
||||
{{- define "gha-runner-scale-set.fullname" -}}
|
||||
{{- if .Values.fullnameOverride }}
|
||||
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name .Values.nameOverride }}
|
||||
{{- if contains $name .Release.Name }}
|
||||
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
|
||||
{{- else }}
|
||||
{{- $name := default .Chart.Name }}
|
||||
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
Create chart name and version as used by the chart label.
|
||||
@@ -41,6 +33,8 @@ app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
|
||||
{{- end }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/part-of: gha-runner-scale-set
|
||||
actions.github.com/scale-set-name: {{ .Release.Name }}
|
||||
actions.github.com/scale-set-namespace: {{ .Release.Namespace }}
|
||||
{{- end }}
|
||||
|
||||
{{/*
|
||||
@@ -71,6 +65,10 @@ app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-role-binding
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.kubeModeServiceAccountName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-kube-mode-service-account
|
||||
{{- end }}
|
||||
@@ -433,7 +431,7 @@ volumeMounts:
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role
|
||||
{{- end }}
|
||||
|
||||
{{- define "gha-runner-scale-set.managerRoleBinding" -}}
|
||||
{{- define "gha-runner-scale-set.managerRoleBindingName" -}}
|
||||
{{- include "gha-runner-scale-set.fullname" . }}-manager-role-binding
|
||||
{{- end }}
|
||||
|
||||
|
||||
@@ -12,6 +12,21 @@ metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: "autoscaling-runner-set"
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
annotations:
|
||||
{{- $containerMode := .Values.containerMode }}
|
||||
{{- if not (kindIs "string" .Values.githubConfigSecret) }}
|
||||
actions.github.com/cleanup-github-secret-name: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
{{- end }}
|
||||
actions.github.com/cleanup-manager-role-binding: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
actions.github.com/cleanup-manager-role-name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
{{- if and $containerMode (eq $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-binding-name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-role-name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
actions.github.com/cleanup-kubernetes-mode-service-account-name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
{{- end }}
|
||||
{{- if and (ne $containerMode.type "kubernetes") (not .Values.template.spec.serviceAccountName) }}
|
||||
actions.github.com/cleanup-no-permission-service-account-name: {{ include "gha-runner-scale-set.noPermissionServiceAccountName" . }}
|
||||
{{- end }}
|
||||
spec:
|
||||
githubConfigUrl: {{ required ".Values.githubConfigUrl is required" (trimSuffix "/" .Values.githubConfigUrl) }}
|
||||
githubConfigSecret: {{ include "gha-runner-scale-set.githubsecret" . }}
|
||||
|
||||
@@ -7,7 +7,7 @@ metadata:
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/secret-protection
|
||||
- actions.github.com/cleanup-protection
|
||||
data:
|
||||
{{- $hasToken := false }}
|
||||
{{- $hasAppId := false }}
|
||||
@@ -36,4 +36,4 @@ data:
|
||||
{{- if and $hasAppId (or (not $hasInstallationId) (not $hasPrivateKey)) }}
|
||||
{{- fail "A valid .Values.githubConfigSecret is required for setting auth with GitHub server, provide .Values.githubConfigSecret.github_app_installation_id and .Values.githubConfigSecret.github_app_private_key." }}
|
||||
{{- end }}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
@@ -6,6 +6,8 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
|
||||
@@ -3,8 +3,10 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleName" . }}
|
||||
name: {{ include "gha-runner-scale-set.kubeModeRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
|
||||
@@ -5,6 +5,8 @@ kind: ServiceAccount
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.kubeModeServiceAccountName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
{{- end }}
|
||||
|
||||
@@ -3,6 +3,11 @@ kind: Role
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
@@ -29,6 +34,17 @@ rules:
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- rbac.authorization.k8s.io
|
||||
resources:
|
||||
@@ -56,4 +72,4 @@ rules:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
||||
@@ -1,8 +1,13 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBinding" . }}
|
||||
name: {{ include "gha-runner-scale-set.managerRoleBindingName" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
app.kubernetes.io/component: manager-role-binding
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
@@ -10,4 +15,4 @@ roleRef:
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: {{ include "gha-runner-scale-set.managerServiceAccountName" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
namespace: {{ include "gha-runner-scale-set.managerServiceAccountNamespace" . | nindent 4 }}
|
||||
|
||||
@@ -7,4 +7,6 @@ metadata:
|
||||
namespace: {{ .Release.Namespace }}
|
||||
labels:
|
||||
{{- include "gha-runner-scale-set.labels" . | nindent 4 }}
|
||||
finalizers:
|
||||
- actions.github.com/cleanup-protection
|
||||
{{- end }}
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
v1alpha1 "github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
actionsgithubcom "github.com/actions/actions-runner-controller/controllers/actions.github.com"
|
||||
"github.com/gruntwork-io/terratest/modules/helm"
|
||||
"github.com/gruntwork-io/terratest/modules/k8s"
|
||||
"github.com/gruntwork-io/terratest/modules/random"
|
||||
@@ -43,7 +45,7 @@ func TestTemplateRenderedGitHubSecretWithGitHubToken(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, githubSecret.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-github-secret", githubSecret.Name)
|
||||
assert.Equal(t, "gh_token12345", string(githubSecret.Data["github_token"]))
|
||||
assert.Equal(t, "actions.github.com/secret-protection", githubSecret.Finalizers[0])
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", githubSecret.Finalizers[0])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedGitHubSecretWithGitHubApp(t *testing.T) {
|
||||
@@ -188,6 +190,7 @@ func TestTemplateRenderedSetServiceAccountToNoPermission(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-no-permission-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName]) // no finalizer protections in place
|
||||
}
|
||||
|
||||
func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
@@ -217,6 +220,7 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, serviceAccount.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", serviceAccount.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/kube_mode_role.yaml"})
|
||||
var role rbacv1.Role
|
||||
@@ -224,6 +228,9 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, role.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", role.Name)
|
||||
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", role.Finalizers[0])
|
||||
|
||||
assert.Len(t, role.Rules, 5, "kube mode role should have 5 rules")
|
||||
assert.Equal(t, "pods", role.Rules[0].Resources[0])
|
||||
assert.Equal(t, "pods/exec", role.Rules[1].Resources[0])
|
||||
@@ -236,18 +243,21 @@ func TestTemplateRenderedSetServiceAccountToKubeMode(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &roleBinding)
|
||||
|
||||
assert.Equal(t, namespaceName, roleBinding.Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role-binding", roleBinding.Name)
|
||||
assert.Len(t, roleBinding.Subjects, 1)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", roleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, namespaceName, roleBinding.Subjects[0].Namespace)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-role", roleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "Role", roleBinding.RoleRef.Kind)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", serviceAccount.Finalizers[0])
|
||||
|
||||
output = helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-kube-mode-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
expectedServiceAccountName := "test-runners-gha-runner-scale-set-kube-mode-service-account"
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Equal(t, expectedServiceAccountName, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
@@ -279,6 +289,7 @@ func TestTemplateRenderedUserProvideSetServiceAccount(t *testing.T) {
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
|
||||
assert.Equal(t, "test-service-account", ars.Spec.Template.Spec.ServiceAccountName)
|
||||
assert.Empty(t, ars.Annotations[actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName])
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoScalingRunnerSet(t *testing.T) {
|
||||
@@ -1458,7 +1469,11 @@ func TestTemplate_CreateManagerRole(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 5, len(managerRole.Rules))
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
|
||||
var ars v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &ars)
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
@@ -1489,8 +1504,9 @@ func TestTemplate_CreateManagerRole_UseConfigMaps(t *testing.T) {
|
||||
|
||||
assert.Equal(t, namespaceName, managerRole.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRole.Name)
|
||||
assert.Equal(t, 6, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[5].Resources[0])
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRole.Finalizers[0])
|
||||
assert.Equal(t, 7, len(managerRole.Rules))
|
||||
assert.Equal(t, "configmaps", managerRole.Rules[6].Resources[0])
|
||||
}
|
||||
|
||||
func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
@@ -1521,6 +1537,7 @@ func TestTemplate_CreateManagerRoleBinding(t *testing.T) {
|
||||
assert.Equal(t, namespaceName, managerRoleBinding.Namespace, "namespace should match the namespace of the Helm release")
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role-binding", managerRoleBinding.Name)
|
||||
assert.Equal(t, "test-runners-gha-runner-scale-set-manager-role", managerRoleBinding.RoleRef.Name)
|
||||
assert.Equal(t, "actions.github.com/cleanup-protection", managerRoleBinding.Finalizers[0])
|
||||
assert.Equal(t, "arc", managerRoleBinding.Subjects[0].Name)
|
||||
assert.Equal(t, "arc-system", managerRoleBinding.Subjects[0].Namespace)
|
||||
}
|
||||
@@ -1692,3 +1709,103 @@ func TestTemplateRenderedAutoScalingRunnerSet_KubeModeMergePodSpec(t *testing.T)
|
||||
assert.Equal(t, "others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].Name, "VolumeMount name should be others")
|
||||
assert.Equal(t, "/others", ars.Spec.Template.Spec.Containers[0].VolumeMounts[1].MountPath, "VolumeMount mountPath should be /others")
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_GitHubSecret(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
annotationExpectedTests := map[string]*helm.Options{
|
||||
"GitHub token": {
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
"GitHub app": {
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_app_id": "10",
|
||||
"githubConfigSecret.github_app_installation_id": "100",
|
||||
"githubConfigSecret.github_app_private_key": "private_key",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
},
|
||||
}
|
||||
|
||||
for name, options := range annotationExpectedTests {
|
||||
t.Run("Annotation set: "+name, func(t *testing.T) {
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.NotEmpty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
t.Run("Annotation should not be set", func(t *testing.T) {
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret": "pre-defined-secret",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
assert.Empty(t, autoscalingRunnerSet.Annotations[actionsgithubcom.AnnotationKeyGitHubSecretName])
|
||||
})
|
||||
}
|
||||
|
||||
func TestTemplateRenderedAutoscalingRunnerSetAnnotation_KubernetesModeCleanup(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
// Path to the helm chart we will test
|
||||
helmChartPath, err := filepath.Abs("../../gha-runner-scale-set")
|
||||
require.NoError(t, err)
|
||||
|
||||
releaseName := "test-runners"
|
||||
namespaceName := "test-" + strings.ToLower(random.UniqueId())
|
||||
|
||||
options := &helm.Options{
|
||||
SetValues: map[string]string{
|
||||
"githubConfigUrl": "https://github.com/actions",
|
||||
"githubConfigSecret.github_token": "gh_token12345",
|
||||
"controllerServiceAccount.name": "arc",
|
||||
"controllerServiceAccount.namespace": "arc-system",
|
||||
"containerMode.type": "kubernetes",
|
||||
},
|
||||
KubectlOptions: k8s.NewKubectlOptions("", "", namespaceName),
|
||||
}
|
||||
|
||||
output := helm.RenderTemplate(t, options, helmChartPath, releaseName, []string{"templates/autoscalingrunnerset.yaml"})
|
||||
var autoscalingRunnerSet v1alpha1.AutoscalingRunnerSet
|
||||
helm.UnmarshalK8SYaml(t, output, &autoscalingRunnerSet)
|
||||
|
||||
annotationValues := map[string]string{
|
||||
actionsgithubcom.AnnotationKeyGitHubSecretName: "test-runners-gha-runner-scale-set-github-secret",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleName: "test-runners-gha-runner-scale-set-manager-role",
|
||||
actionsgithubcom.AnnotationKeyManagerRoleBindingName: "test-runners-gha-runner-scale-set-manager-role-binding",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeServiceAccountName: "test-runners-gha-runner-scale-set-kube-mode-service-account",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleName: "test-runners-gha-runner-scale-set-kube-mode-role",
|
||||
actionsgithubcom.AnnotationKeyKubernetesModeRoleBindingName: "test-runners-gha-runner-scale-set-kube-mode-role-binding",
|
||||
}
|
||||
|
||||
for annotation, value := range annotationValues {
|
||||
assert.Equal(t, value, autoscalingRunnerSet.Annotations[annotation], fmt.Sprintf("Annotation %q does not match the expected value", annotation))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ githubConfigSecret:
|
||||
# certificateFrom:
|
||||
# configMapKeyRef:
|
||||
# name: config-map-name
|
||||
# key: ca.pem
|
||||
# key: ca.crt
|
||||
# runnerMountPath: /usr/local/share/ca-certificates/
|
||||
|
||||
# containerMode:
|
||||
|
||||
@@ -80,6 +80,9 @@ spec:
|
||||
image:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullPolicy:
|
||||
description: Required
|
||||
type: string
|
||||
imagePullSecrets:
|
||||
description: Required
|
||||
items:
|
||||
|
||||
@@ -56,6 +56,8 @@ spec:
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY
|
||||
value: IfNotPresent
|
||||
volumeMounts:
|
||||
- name: controller-manager
|
||||
mountPath: "/etc/actions-runner-controller"
|
||||
|
||||
@@ -27,6 +27,7 @@ import (
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
kerrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
@@ -42,11 +43,12 @@ import (
|
||||
|
||||
const (
|
||||
// TODO: Replace with shared image.
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||
autoscalingRunnerSetOwnerKey = ".metadata.controller"
|
||||
LabelKeyRunnerSpecHash = "runner-spec-hash"
|
||||
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
|
||||
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
|
||||
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
|
||||
autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
|
||||
)
|
||||
|
||||
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
|
||||
@@ -113,6 +115,17 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
requeue, err := r.removeFinalizersFromDependentResources(ctx, autoscalingRunnerSet, log)
|
||||
if err != nil {
|
||||
log.Error(err, "Failed to remove finalizers on dependent resources")
|
||||
return ctrl.Result{}, err
|
||||
}
|
||||
|
||||
if requeue {
|
||||
log.Info("Waiting for dependent resources to be deleted")
|
||||
return ctrl.Result{Requeue: true}, nil
|
||||
}
|
||||
|
||||
log.Info("Removing finalizer")
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetFinalizerName)
|
||||
@@ -305,6 +318,29 @@ func (r *AutoscalingRunnerSetReconciler) deleteEphemeralRunnerSets(ctx context.C
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) removeFinalizersFromDependentResources(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (requeue bool, err error) {
|
||||
c := autoscalingRunnerSetFinalizerDependencyCleaner{
|
||||
client: r.Client,
|
||||
autoscalingRunnerSet: autoscalingRunnerSet,
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
c.removeKubernetesModeRoleBindingFinalizer(ctx)
|
||||
c.removeKubernetesModeRoleFinalizer(ctx)
|
||||
c.removeKubernetesModeServiceAccountFinalizer(ctx)
|
||||
c.removeNoPermissionServiceAccountFinalizer(ctx)
|
||||
c.removeGitHubSecretFinalizer(ctx)
|
||||
c.removeManagerRoleBindingFinalizer(ctx)
|
||||
c.removeManagerRoleFinalizer(ctx)
|
||||
|
||||
requeue, err = c.result()
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to cleanup finalizer from dependent resource")
|
||||
return true, err
|
||||
}
|
||||
return requeue, nil
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) createRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) (ctrl.Result, error) {
|
||||
logger.Info("Creating a new runner scale set")
|
||||
actionsClient, err := r.actionsClientFor(ctx, autoscalingRunnerSet)
|
||||
@@ -467,12 +503,28 @@ func (r *AutoscalingRunnerSetReconciler) updateRunnerScaleSetName(ctx context.Co
|
||||
}
|
||||
|
||||
func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, logger logr.Logger) error {
|
||||
scaleSetId, ok := autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]
|
||||
if !ok {
|
||||
// Annotation not being present can occur in 3 scenarios
|
||||
// 1. Scale set is never created.
|
||||
// In this case, we don't need to fetch the actions client to delete the scale set that does not exist
|
||||
//
|
||||
// 2. The scale set has been deleted by the controller.
|
||||
// In that case, the controller will clean up annotation because the scale set does not exist anymore.
|
||||
// Removal of the scale set id is also useful because permission cleanup will eventually lose permission
|
||||
// assigned to it on a GitHub secret, causing actions client from secret to result in permission denied
|
||||
//
|
||||
// 3. Annotation is removed manually.
|
||||
// In this case, the controller will treat this as if the scale set is being removed from the actions service
|
||||
// Then, manual deletion of the scale set is required.
|
||||
return nil
|
||||
}
|
||||
logger.Info("Deleting the runner scale set from Actions service")
|
||||
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
|
||||
runnerScaleSetId, err := strconv.Atoi(scaleSetId)
|
||||
if err != nil {
|
||||
// If the annotation is not set correctly, or if it does not exist, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never get to the point to create runner set. But then, manual cleanup
|
||||
// would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
// If the annotation is not set correctly, we are going to get stuck in a loop trying to parse the scale set id.
|
||||
// If the configuration is invalid (secret does not exist for example), we never got to the point to create runner set.
|
||||
// But then, manual cleanup would get stuck finalizing the resource trying to parse annotation indefinitely
|
||||
logger.Info("autoscaling runner set does not have annotation describing scale set id. Skip deletion", "err", err.Error())
|
||||
return nil
|
||||
}
|
||||
@@ -489,6 +541,14 @@ func (r *AutoscalingRunnerSetReconciler) deleteRunnerScaleSet(ctx context.Contex
|
||||
return err
|
||||
}
|
||||
|
||||
err = patch(ctx, r.Client, autoscalingRunnerSet, func(obj *v1alpha1.AutoscalingRunnerSet) {
|
||||
delete(obj.Annotations, runnerScaleSetIdAnnotationKey)
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to patch autoscaling runner set with annotation removed", "annotation", runnerScaleSetIdAnnotationKey)
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Deleted the runner scale set from Actions service")
|
||||
return nil
|
||||
}
|
||||
@@ -658,6 +718,328 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
|
||||
Complete(r)
|
||||
}
|
||||
|
||||
type autoscalingRunnerSetFinalizerDependencyCleaner struct {
|
||||
// configuration fields
|
||||
client client.Client
|
||||
autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet
|
||||
logger logr.Logger
|
||||
|
||||
// fields to operate on
|
||||
requeue bool
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) result() (requeue bool, err error) {
|
||||
return c.requeue, c.err
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role binding", "name", roleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role binding has already been deleted", "name", roleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes role", "name", roleName)
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes role")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes role has already been deleted", "name", roleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up kubernetes mode role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyKubernetesModeServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from container mode kubernetes service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from container mode kubernetes service account")
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch kubernetes mode service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Container mode kubernetes service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServiceAccountFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
serviceAccountName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyNoPermissionServiceAccountName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from no permission service account", "name", serviceAccountName)
|
||||
|
||||
serviceAccount := new(corev1.ServiceAccount)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from no permission service account", "name", serviceAccountName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch service account: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("No permission service account has already been deleted", "name", serviceAccountName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
githubSecretName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up no permission service account",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyGitHubSecretName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from GitHub secret", "name", githubSecretName)
|
||||
|
||||
githubSecret := new(corev1.Secret)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from GitHub secret", "name", githubSecretName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err) && !kerrors.IsForbidden(err):
|
||||
c.err = fmt.Errorf("failed to fetch GitHub secret: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("GitHub secret has already been deleted", "name", githubSecretName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindingFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleBindingName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role binding",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleBindingName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
|
||||
roleBinding := new(rbacv1.RoleBinding)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role binding", "name", managerRoleBindingName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role binding: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role binding has already been deleted", "name", managerRoleBindingName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinalizer(ctx context.Context) {
|
||||
if c.requeue || c.err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
managerRoleName, ok := c.autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName]
|
||||
if !ok {
|
||||
c.logger.Info(
|
||||
"Skipping cleaning up manager role",
|
||||
"reason",
|
||||
fmt.Sprintf("annotation key %q not present", AnnotationKeyManagerRoleName),
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Info("Removing finalizer from manager role", "name", managerRoleName)
|
||||
|
||||
role := new(rbacv1.Role)
|
||||
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
|
||||
switch {
|
||||
case err == nil:
|
||||
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) {
|
||||
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
|
||||
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName)
|
||||
})
|
||||
if err != nil {
|
||||
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)
|
||||
return
|
||||
}
|
||||
c.requeue = true
|
||||
c.logger.Info("Removed finalizer from manager role", "name", managerRoleName)
|
||||
return
|
||||
case err != nil && !kerrors.IsNotFound(err):
|
||||
c.err = fmt.Errorf("failed to fetch manager role: %w", err)
|
||||
return
|
||||
default:
|
||||
c.logger.Info("Manager role has already been deleted", "name", managerRoleName)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: if this is logic should be used for other resources,
|
||||
// consider using generics
|
||||
type EphemeralRunnerSets struct {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"time"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
|
||||
@@ -23,6 +24,7 @@ import (
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/actions/actions-runner-controller/apis/actions.github.com/v1alpha1"
|
||||
"github.com/actions/actions-runner-controller/github/actions"
|
||||
@@ -571,6 +573,7 @@ var _ = Describe("Test AutoScalingController updates", func() {
|
||||
|
||||
update := autoscalingRunnerSet.DeepCopy()
|
||||
update.Spec.RunnerScaleSetName = "testset_update"
|
||||
|
||||
err = k8sClient.Patch(ctx, update, client.MergeFrom(autoscalingRunnerSet))
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to update AutoScalingRunnerSet")
|
||||
|
||||
@@ -1036,7 +1039,7 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(listener.Spec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "listener does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed(), "tls config is incorrect")
|
||||
})
|
||||
|
||||
@@ -1093,8 +1096,372 @@ var _ = Describe("Test Client optional configuration", func() {
|
||||
g.Expect(runnerSet.Spec.EphemeralRunnerSpec.GitHubServerTLS).To(BeEquivalentTo(autoscalingRunnerSet.Spec.GitHubServerTLS), "EphemeralRunnerSpec does not have TLS config")
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingListenerTestInterval,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(Succeed())
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
var _ = Describe("Test external permissions cleanup", func() {
|
||||
It("Should clean up kubernetes mode permissions", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
configSecret := createDefaultSecret(GinkgoT(), k8sClient, autoscalingNS.Name)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyKubernetesModeRoleBindingName: "kube-mode-role-binding",
|
||||
AnnotationKeyKubernetesModeRoleName: "kube-mode-role",
|
||||
AnnotationKeyKubernetesModeServiceAccountName: "kube-mode-service-account",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
GitHubConfigSecret: configSecret.Name,
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role")
|
||||
|
||||
serviceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode service account")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Subjects: []rbacv1.Subject{
|
||||
{
|
||||
Kind: "ServiceAccount",
|
||||
Name: serviceAccount.Name,
|
||||
Namespace: serviceAccount.Namespace,
|
||||
},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
// Kind is the type of resource being referenced
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode role")
|
||||
|
||||
err = k8sClient.Delete(ctx, serviceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete kubernetes mode service account")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
|
||||
It("Should clean up manager permissions and no-permission service account", func() {
|
||||
ctx := context.Background()
|
||||
autoscalingNS, mgr := createNamespace(GinkgoT(), k8sClient)
|
||||
|
||||
controller := &AutoscalingRunnerSetReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
Scheme: mgr.GetScheme(),
|
||||
Log: logf.Log,
|
||||
ControllerNamespace: autoscalingNS.Name,
|
||||
DefaultRunnerScaleSetListenerImage: "ghcr.io/actions/arc",
|
||||
ActionsClient: fake.NewMultiClient(),
|
||||
}
|
||||
err := controller.SetupWithManager(mgr)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to setup controller")
|
||||
|
||||
startManagers(GinkgoT(), mgr)
|
||||
|
||||
min := 1
|
||||
max := 10
|
||||
autoscalingRunnerSet := &v1alpha1.AutoscalingRunnerSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-asrs",
|
||||
Namespace: autoscalingNS.Name,
|
||||
Labels: map[string]string{
|
||||
"app.kubernetes.io/name": "gha-runner-scale-set",
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
AnnotationKeyManagerRoleName: "manager-role",
|
||||
AnnotationKeyManagerRoleBindingName: "manager-role-binding",
|
||||
AnnotationKeyGitHubSecretName: "gh-secret-name",
|
||||
AnnotationKeyNoPermissionServiceAccountName: "no-permission-sa",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.AutoscalingRunnerSetSpec{
|
||||
GitHubConfigUrl: "https://github.com/owner/repo",
|
||||
MaxRunners: &max,
|
||||
MinRunners: &min,
|
||||
RunnerGroup: "testgroup",
|
||||
Template: corev1.PodTemplateSpec{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "runner",
|
||||
Image: "ghcr.io/actions/runner",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
secret := &corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"github_token": []byte(defaultGitHubToken),
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(context.Background(), secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create github secret")
|
||||
|
||||
autoscalingRunnerSet.Spec.GitHubConfigSecret = secret.Name
|
||||
|
||||
role := &rbacv1.Role{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role")
|
||||
|
||||
roleBinding := &rbacv1.RoleBinding{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
RoleRef: rbacv1.RoleRef{
|
||||
APIGroup: rbacv1.GroupName,
|
||||
Kind: "Role",
|
||||
Name: role.Name,
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create manager role binding")
|
||||
|
||||
noPermissionServiceAccount := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
|
||||
Namespace: autoscalingRunnerSet.Namespace,
|
||||
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName},
|
||||
},
|
||||
}
|
||||
|
||||
err = k8sClient.Create(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create no permission service account")
|
||||
|
||||
err = k8sClient.Create(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to create AutoScalingRunnerSet")
|
||||
|
||||
Eventually(
|
||||
func() (string, error) {
|
||||
created := new(v1alpha1.AutoscalingRunnerSet)
|
||||
err := k8sClient.Get(ctx, client.ObjectKey{Name: autoscalingRunnerSet.Name, Namespace: autoscalingRunnerSet.Namespace}, created)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(created.Finalizers) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return created.Finalizers[0], nil
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeEquivalentTo(autoscalingRunnerSetFinalizerName), "AutoScalingRunnerSet should have a finalizer")
|
||||
|
||||
err = k8sClient.Delete(ctx, autoscalingRunnerSet)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete autoscaling runner set")
|
||||
|
||||
err = k8sClient.Delete(ctx, noPermissionServiceAccount)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete no permission service account")
|
||||
|
||||
err = k8sClient.Delete(ctx, secret)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete GitHub secret")
|
||||
|
||||
err = k8sClient.Delete(ctx, roleBinding)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role binding")
|
||||
|
||||
err = k8sClient.Delete(ctx, role)
|
||||
Expect(err).NotTo(HaveOccurred(), "failed to delete manager role")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.ServiceAccount)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: noPermissionServiceAccount.Name,
|
||||
Namespace: noPermissionServiceAccount.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected no permission service account to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(corev1.Secret)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: secret.Name,
|
||||
Namespace: secret.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.RoleBinding)
|
||||
err := k8sClient.Get(ctx, types.NamespacedName{
|
||||
Name: roleBinding.Name,
|
||||
Namespace: roleBinding.Namespace,
|
||||
}, r)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role binding to be cleaned up")
|
||||
|
||||
Eventually(
|
||||
func() bool {
|
||||
r := new(rbacv1.Role)
|
||||
err := k8sClient.Get(
|
||||
ctx,
|
||||
types.NamespacedName{
|
||||
Name: role.Name,
|
||||
Namespace: role.Namespace,
|
||||
},
|
||||
r,
|
||||
)
|
||||
|
||||
return errors.IsNotFound(err)
|
||||
},
|
||||
autoscalingRunnerSetTestTimeout,
|
||||
autoscalingRunnerSetTestInterval,
|
||||
).Should(BeTrue(), "Expected role to be cleaned up")
|
||||
})
|
||||
})
|
||||
|
||||
@@ -43,6 +43,17 @@ const (
|
||||
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
|
||||
)
|
||||
|
||||
// Annotations applied for later cleanup of resources
|
||||
const (
|
||||
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
|
||||
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
|
||||
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
|
||||
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
|
||||
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
|
||||
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
|
||||
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
|
||||
)
|
||||
|
||||
var commonLabelKeys = [...]string{
|
||||
LabelKeyKubernetesPartOf,
|
||||
LabelKeyKubernetesComponent,
|
||||
@@ -56,6 +67,21 @@ var commonLabelKeys = [...]string{
|
||||
|
||||
const labelValueKubernetesPartOf = "gha-runner-scale-set"
|
||||
|
||||
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
|
||||
|
||||
// scaleSetListenerImagePullPolicy is applied to all listeners
|
||||
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
|
||||
|
||||
func SetListenerImagePullPolicy(pullPolicy string) bool {
|
||||
switch p := corev1.PullPolicy(pullPolicy); p {
|
||||
case corev1.PullAlways, corev1.PullNever, corev1.PullIfNotPresent:
|
||||
scaleSetListenerImagePullPolicy = p
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
type resourceBuilder struct{}
|
||||
|
||||
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
|
||||
@@ -150,7 +176,7 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
|
||||
Name: autoscalingListenerContainerName,
|
||||
Image: autoscalingListener.Spec.Image,
|
||||
Env: listenerEnv,
|
||||
ImagePullPolicy: corev1.PullIfNotPresent,
|
||||
ImagePullPolicy: autoscalingListener.Spec.ImagePullPolicy,
|
||||
Command: []string{
|
||||
"/github-runnerscaleset-listener",
|
||||
},
|
||||
@@ -364,6 +390,7 @@ func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.
|
||||
MinRunners: effectiveMinRunners,
|
||||
MaxRunners: effectiveMaxRunners,
|
||||
Image: image,
|
||||
ImagePullPolicy: scaleSetListenerImagePullPolicy,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
Proxy: autoscalingRunnerSet.Spec.Proxy,
|
||||
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
|
||||
|
||||
@@ -285,16 +285,20 @@ func secretDataToGitHubClientConfig(data map[string][]byte) (*github.Config, err
|
||||
|
||||
appID := string(data["github_app_id"])
|
||||
|
||||
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if appID != "" {
|
||||
conf.AppID, err = strconv.ParseInt(appID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
instID := string(data["github_app_installation_id"])
|
||||
|
||||
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if instID != "" {
|
||||
conf.AppInstallationID, err = strconv.ParseInt(instID, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conf.AppPrivateKey = string(data["github_app_private_key"])
|
||||
|
||||
@@ -36,7 +36,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--namespace "${NAMESPACE}" \
|
||||
--create-namespace \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
||||
--version 0.3.0
|
||||
--version 0.4.0
|
||||
```
|
||||
|
||||
1. Generate a Personal Access Token (PAT) or create and install a GitHub App. See [Creating a personal access token](https://docs.github.com/en/github/authenticating-to-github/creating-a-personal-access-token) and [Creating a GitHub App](https://docs.github.com/en/developers/apps/creating-a-github-app).
|
||||
@@ -57,7 +57,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--create-namespace \
|
||||
--set githubConfigUrl="${GITHUB_CONFIG_URL}" \
|
||||
--set githubConfigSecret.github_token="${GITHUB_PAT}" \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -75,7 +75,7 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
--set githubConfigSecret.github_app_id="${GITHUB_APP_ID}" \
|
||||
--set githubConfigSecret.github_app_installation_id="${GITHUB_APP_INSTALLATION_ID}" \
|
||||
--set githubConfigSecret.github_app_private_key="${GITHUB_APP_PRIVATE_KEY}" \
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.3.0
|
||||
oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set --version 0.4.0
|
||||
```
|
||||
|
||||
1. Check your installation. If everything went well, you should see the following:
|
||||
@@ -84,8 +84,8 @@ https://user-images.githubusercontent.com/568794/212668313-8946ddc5-60c1-461f-a7
|
||||
$ helm list -n "${NAMESPACE}"
|
||||
|
||||
NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION
|
||||
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.3.0 preview
|
||||
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.3.0 0.3.0
|
||||
arc arc-systems 1 2023-01-18 10:03:36.610534934 +0000 UTC deployed gha-runner-scale-set-controller-0.4.0 preview
|
||||
arc-runner-set arc-systems 1 2023-01-18 10:20:14.795285645 +0000 UTC deployed gha-runner-scale-set-0.4.0 0.4.0
|
||||
```
|
||||
|
||||
```bash
|
||||
@@ -140,7 +140,7 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
|
||||
|
||||
```bash
|
||||
helm pull oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller \
|
||||
--version 0.3.0 \
|
||||
--version 0.4.0 \
|
||||
--untar && \
|
||||
kubectl replace -f <PATH>/gha-runner-scale-set-controller/crds/
|
||||
```
|
||||
@@ -149,6 +149,24 @@ Upgrading actions-runner-controller requires a few extra steps because CRDs will
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### I'm using the charts from the `master` branch and the controller is not working
|
||||
|
||||
The `master` branch is highly unstable! We offer no guarantees that the charts in the `master` branch will work at any given time. If you're using the charts from the `master` branch, you should expect to encounter issues. Please use the latest release instead.
|
||||
|
||||
### Controller pod is running but the runner set listener pod is not
|
||||
|
||||
You need to inspect the logs of the controller first and see if there are any errors. If there are no errors, and the runner set listener pod is still not running, you need to make sure that the **controller pod has access to the Kubernetes API server in your cluster!**
|
||||
|
||||
You'll see something similar to the following in the logs of the controller pod:
|
||||
|
||||
```log
|
||||
kubectl logs <controller_pod_name> -c manager
|
||||
17:35:28.661069 1 request.go:690] Waited for 1.032376652s due to client-side throttling, not priority and fairness, request: GET:https://10.0.0.1:443/apis/monitoring.coreos.com/v1alpha1?timeout=32s
|
||||
2023-03-15T17:35:29Z INFO starting manager
|
||||
```
|
||||
|
||||
If you have a proxy configured or you're using a sidecar proxy that's automatically injected (think [Istio](https://istio.io/)), you need to make sure it's configured appropriately to allow traffic from the controller container (manager) to the Kubernetes API server.
|
||||
|
||||
### Check the logs
|
||||
|
||||
You can check the logs of the controller pod using the following command:
|
||||
@@ -219,6 +237,35 @@ To fix this, you can either:
|
||||
|
||||
## Changelog
|
||||
|
||||
### v0.4.0
|
||||
|
||||
#### ⚠️ Warning
|
||||
|
||||
This release contains a major change related to the way permissions are
|
||||
applied to the manager ([#2276](https://github.com/actions/actions-runner-controller/pull/2276) and [#2363](https://github.com/actions/actions-runner-controller/pull/2363)).
|
||||
|
||||
Please evaluate these changes carefully before upgrading.
|
||||
|
||||
#### Major changes
|
||||
|
||||
1. Surface EphemeralRunnerSet stats to AutoscalingRunnerSet [#2382](https://github.com/actions/actions-runner-controller/pull/2382)
|
||||
1. Improved security posture by removing list/watch secrets permission from manager cluster role
|
||||
[#2276](https://github.com/actions/actions-runner-controller/pull/2276)
|
||||
1. Improved security posture by delaying role/rolebinding creation to gha-runner-scale-set during installation
|
||||
[#2363](https://github.com/actions/actions-runner-controller/pull/2363)
|
||||
1. Improved security posture by supporting watching a single namespace from the controller
|
||||
[#2374](https://github.com/actions/actions-runner-controller/pull/2374)
|
||||
1. Added labels to AutoscalingRunnerSet subresources to allow easier inspection [#2391](https://github.com/actions/actions-runner-controller/pull/2391)
|
||||
1. Fixed bug preventing env variables from being specified
|
||||
[#2450](https://github.com/actions/actions-runner-controller/pull/2450)
|
||||
1. Enhance quickstart troubleshooting guides
|
||||
[#2435](https://github.com/actions/actions-runner-controller/pull/2435)
|
||||
1. Fixed ignore extra dind container when container mode type is "dind"
|
||||
[#2418](https://github.com/actions/actions-runner-controller/pull/2418)
|
||||
1. Added additional cleanup finalizers [#2433](https://github.com/actions/actions-runner-controller/pull/2433)
|
||||
1. gha-runner-scale-set listener pod inherits the ImagePullPolicy from the manager pod [#2477](https://github.com/actions/actions-runner-controller/pull/2477)
|
||||
1. Treat `.ghe.com` domain as hosted environment [#2480](https://github.com/actions/actions-runner-controller/pull/2480)
|
||||
|
||||
### v0.3.0
|
||||
|
||||
#### Major changes
|
||||
|
||||
@@ -3,6 +3,7 @@ package actions
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
@@ -34,9 +35,7 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
isHosted := u.Host == "github.com" ||
|
||||
u.Host == "www.github.com" ||
|
||||
u.Host == "github.localhost"
|
||||
isHosted := isHostedGitHubURL(u)
|
||||
|
||||
configURL := &GitHubConfig{
|
||||
ConfigURL: u,
|
||||
@@ -76,23 +75,35 @@ func ParseGitHubConfigFromURL(in string) (*GitHubConfig, error) {
|
||||
func (c *GitHubConfig) GitHubAPIURL(path string) *url.URL {
|
||||
result := &url.URL{
|
||||
Scheme: c.ConfigURL.Scheme,
|
||||
Host: c.ConfigURL.Host, // default for Enterprise mode
|
||||
Path: "/api/v3", // default for Enterprise mode
|
||||
}
|
||||
|
||||
switch c.ConfigURL.Host {
|
||||
// Hosted
|
||||
case "github.com", "github.localhost":
|
||||
result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host)
|
||||
// re-routing www.github.com to api.github.com
|
||||
case "www.github.com":
|
||||
result.Host = "api.github.com"
|
||||
isHosted := isHostedGitHubURL(c.ConfigURL)
|
||||
|
||||
// Enterprise
|
||||
default:
|
||||
result.Host = c.ConfigURL.Host
|
||||
result.Path = "/api/v3"
|
||||
if isHosted {
|
||||
result.Host = fmt.Sprintf("api.%s", c.ConfigURL.Host)
|
||||
result.Path = ""
|
||||
|
||||
if strings.EqualFold("www.github.com", c.ConfigURL.Host) {
|
||||
// re-routing www.github.com to api.github.com
|
||||
result.Host = "api.github.com"
|
||||
}
|
||||
}
|
||||
|
||||
result.Path += path
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func isHostedGitHubURL(u *url.URL) bool {
|
||||
_, forceGhes := os.LookupEnv("GITHUB_ACTIONS_FORCE_GHES")
|
||||
if forceGhes {
|
||||
return false
|
||||
}
|
||||
|
||||
return strings.EqualFold(u.Host, "github.com") ||
|
||||
strings.EqualFold(u.Host, "www.github.com") ||
|
||||
strings.EqualFold(u.Host, "github.localhost") ||
|
||||
strings.HasSuffix(u.Host, ".ghe.com")
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package actions_test
|
||||
import (
|
||||
"errors"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
@@ -117,6 +118,16 @@ func TestGitHubConfig(t *testing.T) {
|
||||
IsHosted: false,
|
||||
},
|
||||
},
|
||||
{
|
||||
configURL: "https://my-ghes.ghe.com/org/",
|
||||
expected: &actions.GitHubConfig{
|
||||
Scope: actions.GitHubScopeOrganization,
|
||||
Enterprise: "",
|
||||
Organization: "org",
|
||||
Repository: "",
|
||||
IsHosted: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -151,9 +162,35 @@ func TestGitHubConfig_GitHubAPIURL(t *testing.T) {
|
||||
t.Run("when hosted", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://github.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://api.github.com/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted", func(t *testing.T) {})
|
||||
t.Run("when hosted with ghe.com", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://github.ghe.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.True(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://api.github.ghe.com/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted", func(t *testing.T) {
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://ghes.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://ghes.com/api/v3/some/path", result.String())
|
||||
})
|
||||
t.Run("when not hosted with ghe.com", func(t *testing.T) {
|
||||
os.Setenv("GITHUB_ACTIONS_FORCE_GHES", "1")
|
||||
defer os.Unsetenv("GITHUB_ACTIONS_FORCE_GHES")
|
||||
config, err := actions.ParseGitHubConfigFromURL("https://test.ghe.com/org/repo")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, config.IsHosted)
|
||||
|
||||
result := config.GitHubAPIURL("/some/path")
|
||||
assert.Equal(t, "https://test.ghe.com/api/v3/some/path", result.String())
|
||||
})
|
||||
}
|
||||
|
||||
7
main.go
7
main.go
@@ -170,6 +170,13 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
listenerPullPolicy := os.Getenv("CONTROLLER_MANAGER_LISTENER_IMAGE_PULL_POLICY")
|
||||
if ok := actionsgithubcom.SetListenerImagePullPolicy(listenerPullPolicy); ok {
|
||||
log.Info("AutoscalingListener image pull policy changed", "ImagePullPolicy", listenerPullPolicy)
|
||||
} else {
|
||||
log.Info("Using default AutoscalingListener image pull policy", "ImagePullPolicy", actionsgithubcom.DefaultScaleSetListenerImagePullPolicy)
|
||||
}
|
||||
|
||||
mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{
|
||||
Scheme: scheme,
|
||||
NewCache: newCache,
|
||||
|
||||
@@ -136,12 +136,27 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
|
||||
// job_conclusion -> (neutral, success, skipped, cancelled, timed_out, action_required, failure)
|
||||
githubWorkflowJobConclusionsTotal.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Inc()
|
||||
|
||||
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
|
||||
if err != nil {
|
||||
log.Error(err, "reading workflow job log")
|
||||
return
|
||||
} else {
|
||||
log.Info("reading workflow_job logs", keysAndValues...)
|
||||
var (
|
||||
exitCode = "na"
|
||||
runTimeSeconds *float64
|
||||
)
|
||||
|
||||
// We need to do our best not to fail the whole event processing
|
||||
// when the user provided no GitHub API credentials.
|
||||
// See https://github.com/actions/actions-runner-controller/issues/2424
|
||||
if reader.GitHubClient != nil {
|
||||
parseResult, err := reader.fetchAndParseWorkflowJobLogs(ctx, e)
|
||||
if err != nil {
|
||||
log.Error(err, "reading workflow job log")
|
||||
return
|
||||
}
|
||||
|
||||
exitCode = parseResult.ExitCode
|
||||
|
||||
s := parseResult.RunTime.Seconds()
|
||||
runTimeSeconds = &s
|
||||
|
||||
log.WithValues(keysAndValues...).Info("reading workflow_job logs", "exit_code", exitCode)
|
||||
}
|
||||
|
||||
if *e.WorkflowJob.Conclusion == "failure" {
|
||||
@@ -167,18 +182,20 @@ func (reader *EventReader) ProcessWorkflowJobEvent(ctx context.Context, event in
|
||||
}
|
||||
if *conclusion == "timed_out" {
|
||||
failedStep = fmt.Sprint(i)
|
||||
parseResult.ExitCode = "timed_out"
|
||||
exitCode = "timed_out"
|
||||
break
|
||||
}
|
||||
}
|
||||
githubWorkflowJobFailuresTotal.With(
|
||||
extraLabel("failed_step", failedStep,
|
||||
extraLabel("exit_code", parseResult.ExitCode, labels),
|
||||
extraLabel("exit_code", exitCode, labels),
|
||||
),
|
||||
).Inc()
|
||||
}
|
||||
|
||||
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(parseResult.RunTime.Seconds())
|
||||
if runTimeSeconds != nil {
|
||||
githubWorkflowJobRunDurationSeconds.With(extraLabel("job_conclusion", *e.WorkflowJob.Conclusion, labels)).Observe(*runTimeSeconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,17 +5,17 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
const letterBytes = "abcdefghijklmnopqrstuvwxyz"
|
||||
|
||||
var (
|
||||
random = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
)
|
||||
|
||||
// Copied from https://stackoverflow.com/a/31832326 with thanks
|
||||
func RandStringBytesRmndr(n int) string {
|
||||
b := make([]byte, n)
|
||||
for i := range b {
|
||||
b[i] = letterBytes[rand.Int63()%int64(len(letterBytes))]
|
||||
b[i] = letterBytes[random.Int63()%int64(len(letterBytes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user