Compare commits

...

8 Commits

Author SHA1 Message Date
Yusuke Kuoka
3e0bc3f7be Fix docker.sock permission error for non-dind Ubuntu 20.04 runners since v0.27.2 (#2499)
#2490 has been happening since v0.27.2 for non-dind runners based on Ubuntu 20.04 runner images. It does not affect Ubuntu 22.04 non-dind runners(i.e. runners with dockerd sidecars) and Ubuntu 20.04/22.04 dind runners(i.e. runners without dockerd sidecars). However, presuming many folks are still using Ubuntu 20.04 runners and non-dind runners, we should fix it.

This change tries to fix it by defaulting to the docker group id 1001 used by Ubuntu 20.04 runners, and use gid 121 for Ubuntu 22.04 runners. We use the image tag to see which Ubuntu version the runner is based on. The algorithm is so simple- we assume it's Ubuntu-22.04-based if the image tag contains "22.04".

This might be a breaking change for folks who have already upgraded to Ubuntu 22.04 runners using their own custom runner images. Note again; we rely on the image tag to detect Ubuntu 22.04 runner images and use the proper docker gid- Folks using our official Ubuntu 22.04 runner images are not affected. It is a breaking change anyway, so I have added a remedy-

ARC got a new flag, `--docker-gid`, which defaults to `1001` but can be set to `121` or whatever gid the operator/admin likes. This can be set to `--docker-gid=121`, for example, if you are using your own custom runner image based on Ubuntu 22.04 and the image tag does not contain "22.04".

Fixes #2490
2023-04-17 21:30:41 +09:00
Nikola Jokic
ba1ac0990b Reordering methods and constants so it is easier to look it up (#2501) 2023-04-12 09:50:23 +02:00
Nikola Jokic
76fe43e8e0 Update limit manager role permissions ADR (#2500)
Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2023-04-11 16:25:43 +02:00
Nikola Jokic
8869ad28bb Fix e2e tests infinite looping when waiting for resources (#2496)
Co-authored-by: Tingluo Huang <tingluohuang@github.com>
2023-04-10 21:03:02 +02:00
Nikola Jokic
b86af190f7 Extend manager roles to accept ephemeralrunnerset/finalizers (#2493) 2023-04-10 08:49:32 +02:00
Bassem Dghaidi
1a491cbfe5 Fix the publish chart workflow (#2489)
Co-authored-by: Nikola Jokic <jokicnikola07@gmail.com>
2023-04-06 08:01:48 -04:00
Yusuke Kuoka
087f20fd5d Fix chart publishing workflow (#2487) 2023-04-05 12:20:12 -04:00
Hidetake Iwata
a880114e57 chart: Bump version to 0.23.1 (#2483) 2023-04-05 22:39:29 +09:00
24 changed files with 767 additions and 481 deletions

View File

@@ -5,7 +5,7 @@ on:
branches: branches:
- master - master
pull_request: pull_request:
branches: branches:
- master - master
workflow_dispatch: workflow_dispatch:
@@ -21,6 +21,7 @@ env:
jobs: jobs:
default-setup: default-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
@@ -55,11 +56,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -84,11 +86,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -107,6 +110,7 @@ jobs:
single-namespace-setup: single-namespace-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
@@ -143,11 +147,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -172,11 +177,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -195,6 +201,7 @@ jobs:
dind-mode-setup: dind-mode-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: arc-test-dind-workflow.yaml WORKFLOW_FILE: arc-test-dind-workflow.yaml
@@ -229,11 +236,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -259,11 +267,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -282,6 +291,7 @@ jobs:
kubernetes-mode-setup: kubernetes-mode-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml" WORKFLOW_FILE: "arc-test-kubernetes-workflow.yaml"
@@ -321,11 +331,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -355,11 +366,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -378,6 +390,7 @@ jobs:
auth-proxy-setup: auth-proxy-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
@@ -412,11 +425,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -453,11 +467,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -476,6 +491,7 @@ jobs:
anonymous-proxy-setup: anonymous-proxy-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
@@ -510,11 +526,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -545,11 +562,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -568,6 +586,7 @@ jobs:
self-signed-ca-setup: self-signed-ca-setup:
runs-on: ubuntu-latest runs-on: ubuntu-latest
timeout-minutes: 20
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.id == github.repository_id
env: env:
WORKFLOW_FILE: "arc-test-workflow.yaml" WORKFLOW_FILE: "arc-test-workflow.yaml"
@@ -602,11 +621,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller" echo "Timeout waiting for controller pod with label app.kubernetes.io/name=gha-runner-scale-set-controller"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l app.kubernetes.io/name=gha-runner-scale-set-controller
kubectl get pod -n arc-systems kubectl get pod -n arc-systems
@@ -629,11 +649,12 @@ jobs:
cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem cat ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for mitmproxy generate its CA cert" echo "Timeout waiting for mitmproxy generate its CA cert"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt sudo cp ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.pem ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt sudo chown runner ${{ github.workspace }}/mitmproxy/mitmproxy-ca-cert.crt
@@ -661,11 +682,12 @@ jobs:
echo "Pod found: $POD_NAME" echo "Pod found: $POD_NAME"
break break
fi fi
if [ "$count" -ge 10 ]; then if [ "$count" -ge 60 ]; then
echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME" echo "Timeout waiting for listener pod with label actions.github.com/scale-set-name=$ARC_NAME"
exit 1 exit 1
fi fi
sleep 1 sleep 1
count=$((count+1))
done done
kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME kubectl wait --timeout=30s --for=condition=ready pod -n arc-systems -l actions.github.com/scale-set-name=$ARC_NAME
kubectl get pod -n arc-systems kubectl get pod -n arc-systems

View File

@@ -5,15 +5,21 @@ name: Publish Helm Chart
on: on:
push: push:
branches: branches:
- master - master
paths: paths:
- 'charts/**' - 'charts/**'
- '.github/workflows/publish-chart.yaml' - '.github/workflows/publish-chart.yaml'
- '!charts/actions-runner-controller/docs/**' - '!charts/actions-runner-controller/docs/**'
- '!charts/gha-runner-scale-set-controller/**' - '!charts/gha-runner-scale-set-controller/**'
- '!charts/gha-runner-scale-set/**' - '!charts/gha-runner-scale-set/**'
- '!**.md' - '!**.md'
workflow_dispatch: workflow_dispatch:
inputs:
force:
description: 'Force publish even if the chart version is not bumped'
type: boolean
required: true
default: false
env: env:
KUBE_SCORE_VERSION: 1.10.0 KUBE_SCORE_VERSION: 1.10.0
@@ -29,91 +35,86 @@ jobs:
outputs: outputs:
publish-chart: ${{ steps.publish-chart-step.outputs.publish }} publish-chart: ${{ steps.publish-chart-step.outputs.publish }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Set up Helm - name: Set up Helm
uses: azure/setup-helm@v3.4 uses: azure/setup-helm@v3.4
with: with:
version: ${{ env.HELM_VERSION }} version: ${{ env.HELM_VERSION }}
- name: Set up kube-score - name: Set up kube-score
run: | run: |
wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score wget https://github.com/zegl/kube-score/releases/download/v${{ env.KUBE_SCORE_VERSION }}/kube-score_${{ env.KUBE_SCORE_VERSION }}_linux_amd64 -O kube-score
chmod 755 kube-score chmod 755 kube-score
- name: Kube-score generated manifests - name: Kube-score generated manifests
run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - run: helm template --values charts/.ci/values-kube-score.yaml charts/* | ./kube-score score - --ignore-test pod-networkpolicy --ignore-test deployment-has-poddisruptionbudget --ignore-test deployment-has-host-podantiaffinity --ignore-test container-security-context --ignore-test pod-probes --ignore-test container-image-tag --enable-optional-test container-security-context-privileged --enable-optional-test container-security-context-readonlyrootfilesystem
--ignore-test pod-networkpolicy
--ignore-test deployment-has-poddisruptionbudget
--ignore-test deployment-has-host-podantiaffinity
--ignore-test container-security-context
--ignore-test pod-probes
--ignore-test container-image-tag
--enable-optional-test container-security-context-privileged
--enable-optional-test container-security-context-readonlyrootfilesystem
# python is a requirement for the chart-testing action below (supports yamllint among other tests) # python is a requirement for the chart-testing action below (supports yamllint among other tests)
- uses: actions/setup-python@v4 - uses: actions/setup-python@v4
with: with:
python-version: '3.7' python-version: '3.11'
- name: Set up chart-testing - name: Set up chart-testing
uses: helm/chart-testing-action@v2.3.1 uses: helm/chart-testing-action@v2.3.1
- name: Run chart-testing (list-changed) - name: Run chart-testing (list-changed)
id: list-changed id: list-changed
run: | run: |
changed=$(ct list-changed --config charts/.ci/ct-config.yaml) changed=$(ct list-changed --config charts/.ci/ct-config.yaml)
if [[ -n "$changed" ]]; then if [[ -n "$changed" ]]; then
echo "::set-output name=changed::true" echo "::set-output name=changed::true"
fi fi
- name: Run chart-testing (lint) - name: Run chart-testing (lint)
run: | run: |
ct lint --config charts/.ci/ct-config.yaml ct lint --config charts/.ci/ct-config.yaml
- name: Create kind cluster - name: Create kind cluster
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
uses: helm/kind-action@v1.4.0 uses: helm/kind-action@v1.4.0
# We need cert-manager already installed in the cluster because we assume the CRDs exist # We need cert-manager already installed in the cluster because we assume the CRDs exist
- name: Install cert-manager - name: Install cert-manager
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: | run: |
helm repo add jetstack https://charts.jetstack.io --force-update helm repo add jetstack https://charts.jetstack.io --force-update
helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait helm install cert-manager jetstack/cert-manager --set installCRDs=true --wait
- name: Run chart-testing (install) - name: Run chart-testing (install)
if: steps.list-changed.outputs.changed == 'true' if: steps.list-changed.outputs.changed == 'true'
run: ct install --config charts/.ci/ct-config.yaml run: ct install --config charts/.ci/ct-config.yaml
# WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml # WARNING: This relies on the latest release being at the top of the JSON from GitHub and a clean chart.yaml
- name: Check if Chart Publish is Needed - name: Check if Chart Publish is Needed
id: publish-chart-step id: publish-chart-step
run: | run: |
CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml) CHART_TEXT=$(curl -fs https://raw.githubusercontent.com/${{ github.repository }}/master/charts/actions-runner-controller/Chart.yaml)
NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2) NEW_CHART_VERSION=$(echo "$CHART_TEXT" | grep version: | cut -d ' ' -f 2)
RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4) RELEASE_LIST=$(curl -fs https://api.github.com/repos/${{ github.repository }}/releases | jq .[].tag_name | grep actions-runner-controller | cut -d '"' -f 2 | cut -d '-' -f 4)
LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1) LATEST_RELEASED_CHART_VERSION=$(echo $RELEASE_LIST | cut -d ' ' -f 1)
echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION ]]; then
echo "publish=true" >> $GITHUB_OUTPUT
else
echo "publish=false" >> $GITHUB_OUTPUT
fi
- name: Job summary echo "CHART_VERSION_IN_MASTER=$NEW_CHART_VERSION" >> $GITHUB_ENV
run: | echo "LATEST_CHART_VERSION=$LATEST_RELEASED_CHART_VERSION" >> $GITHUB_ENV
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY # Always publish if force is true
echo "**Status:**" >> $GITHUB_STEP_SUMMARY if [[ $NEW_CHART_VERSION != $LATEST_RELEASED_CHART_VERSION || "${{ inputs.force }}" == "true" ]]; then
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY echo "publish=true" >> $GITHUB_OUTPUT
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY else
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY echo "publish=false" >> $GITHUB_OUTPUT
fi
- name: Job summary
run: |
echo "Chart linting has been completed." >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- chart version in master: ${{ env.CHART_VERSION_IN_MASTER }}" >> $GITHUB_STEP_SUMMARY
echo "- latest chart version: ${{ env.LATEST_CHART_VERSION }}" >> $GITHUB_STEP_SUMMARY
echo "- publish new chart: ${{ steps.publish-chart-step.outputs.publish }}" >> $GITHUB_STEP_SUMMARY
publish-chart: publish-chart:
if: needs.lint-chart.outputs.publish-chart == 'true' if: needs.lint-chart.outputs.publish-chart == 'true'
@@ -121,103 +122,86 @@ jobs:
name: Publish Chart name: Publish Chart
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions: permissions:
contents: write # for helm/chart-releaser-action to push chart release and create a release contents: write # for helm/chart-releaser-action to push chart release and create a release
env: env:
CHART_TARGET_ORG: actions-runner-controller CHART_TARGET_ORG: actions-runner-controller
CHART_TARGET_REPO: actions-runner-controller.github.io CHART_TARGET_REPO: actions-runner-controller.github.io
CHART_TARGET_BRANCH: master CHART_TARGET_BRANCH: master
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v3
with: with:
fetch-depth: 0 fetch-depth: 0
- name: Configure Git - name: Configure Git
run: | run: |
git config user.name "$GITHUB_ACTOR" git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Get Token - name: Get Token
id: get_workflow_token id: get_workflow_token
uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db uses: peter-murray/workflow-application-token-action@8e1ba3bf1619726336414f1014e37f17fbadf1db
with: with:
application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }} application_id: ${{ secrets.ACTIONS_ACCESS_APP_ID }}
application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }} application_private_key: ${{ secrets.ACTIONS_ACCESS_PK }}
organization: ${{ env.CHART_TARGET_ORG }} organization: ${{ env.CHART_TARGET_ORG }}
- name: Install chart-releaser - name: Install chart-releaser
uses: helm/chart-releaser-action@v1.4.1 uses: helm/chart-releaser-action@v1.4.1
with: with:
install_only: true install_only: true
install_dir: ${{ github.workspace }}/bin install_dir: ${{ github.workspace }}/bin
- name: Package and upload release assets - name: Package and upload release assets
run: | run: |
cr package \ cr package \
${{ github.workspace }}/charts/actions-runner-controller/ \ ${{ github.workspace }}/charts/actions-runner-controller/ \
--package-path .cr-release-packages --package-path .cr-release-packages
cr upload \ cr upload \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--package-path .cr-release-packages \ --package-path .cr-release-packages \
--token ${{ secrets.GITHUB_TOKEN }} --token ${{ secrets.GITHUB_TOKEN }}
- name: Generate updated index.yaml - name: Generate updated index.yaml
run: | run: |
cr index \ cr index \
--owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \ --owner "$(echo ${{ github.repository }} | cut -d '/' -f 1)" \
--git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \ --git-repo "$(echo ${{ github.repository }} | cut -d '/' -f 2)" \
--index-path ${{ github.workspace }}/index.yaml \ --index-path ${{ github.workspace }}/index.yaml \
--pages-branch 'gh-pages' \ --push \
--pages-index-path 'index.yaml' --pages-branch 'gh-pages' \
--pages-index-path 'index.yaml'
# This step is required to not throw away changes made to the index.yaml on every new chart release. # Chart Release was never intended to publish to a different repo
# # this workaround is intended to move the index.yaml to the target repo
# We update the index.yaml in the actions-runner-controller.github.io repo # where the github pages are hosted
# by appending the new chart version to the index.yaml saved in actions-runner-controller repo - name: Checkout target repository
# and copying and commiting the updated index.yaml to the github.io one. uses: actions/checkout@v3
# See below for more context: with:
# - https://github.com/actions-runner-controller/actions-runner-controller.github.io/pull/2 repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
# - https://github.com/actions/actions-runner-controller/pull/2452 path: ${{ env.CHART_TARGET_REPO }}
- name: Commit and push to actions/actions-runner-controller ref: ${{ env.CHART_TARGET_BRANCH }}
run: | token: ${{ steps.get_workflow_token.outputs.token }}
git checkout gh-pages
git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}
# Chart Release was never intended to publish to a different repo - name: Copy index.yaml
# this workaround is intended to move the index.yaml to the target repo run: |
# where the github pages are hosted cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml
- name: Checkout target repository
uses: actions/checkout@v3
with:
repository: ${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}
path: ${{ env.CHART_TARGET_REPO }}
ref: ${{ env.CHART_TARGET_BRANCH }}
token: ${{ steps.get_workflow_token.outputs.token }}
- name: Copy index.yaml - name: Commit and push to target repository
run: | run: |
cp ${{ github.workspace }}/index.yaml ${{ env.CHART_TARGET_REPO }}/actions-runner-controller/index.yaml git config user.name "$GITHUB_ACTOR"
git config user.email "$GITHUB_ACTOR@users.noreply.github.com"
- name: Commit and push to target repository git add .
run: | git commit -m "Update index.yaml"
git config user.name "$GITHUB_ACTOR" git push
git config user.email "$GITHUB_ACTOR@users.noreply.github.com" working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
git add .
git commit -m "Update index.yaml"
git push
working-directory: ${{ github.workspace }}/${{ env.CHART_TARGET_REPO }}
- name: Job summary - name: Job summary
run: | run: |
echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY echo "New helm chart has been published" >> $GITHUB_STEP_SUMMARY
echo "" >> $GITHUB_STEP_SUMMARY echo "" >> $GITHUB_STEP_SUMMARY
echo "**Status:**" >> $GITHUB_STEP_SUMMARY echo "**Status:**" >> $GITHUB_STEP_SUMMARY
echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY echo "- New [index.yaml](https://github.com/${{ env.CHART_TARGET_ORG }}/${{ env.CHART_TARGET_REPO }}/tree/main/actions-runner-controller) pushed" >> $GITHUB_STEP_SUMMARY

View File

@@ -15,10 +15,10 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes # This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version. # to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/) # Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.23.0 version: 0.23.1
# Used as the default manager tag value when no tag property is provided in the values.yaml # Used as the default manager tag value when no tag property is provided in the values.yaml
appVersion: 0.27.1 appVersion: 0.27.2
home: https://github.com/actions/actions-runner-controller home: https://github.com/actions/actions-runner-controller

View File

@@ -78,6 +78,13 @@ rules:
- get - get
- patch - patch
- update - update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:

View File

@@ -52,6 +52,13 @@ rules:
- get - get
- patch - patch
- update - update
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:

View File

@@ -169,7 +169,7 @@ func TestTemplate_CreateManagerClusterRole(t *testing.T) {
assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace") assert.Empty(t, managerClusterRole.Namespace, "ClusterRole should not have a namespace")
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-cluster-role", managerClusterRole.Name)
assert.Equal(t, 15, len(managerClusterRole.Rules)) assert.Equal(t, 16, len(managerClusterRole.Rules))
_, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"}) _, err = helm.RenderTemplateE(t, options, helmChartPath, releaseName, []string{"templates/manager_single_namespace_controller_role.yaml"})
assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped") assert.ErrorContains(t, err, "could not find template templates/manager_single_namespace_controller_role.yaml in chart", "We should get an error because the template should be skipped")
@@ -843,7 +843,7 @@ func TestTemplate_CreateManagerSingleNamespaceRole(t *testing.T) {
assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name) assert.Equal(t, "test-arc-gha-runner-scale-set-controller-manager-single-namespace-role", managerSingleNamespaceWatchRole.Name)
assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace) assert.Equal(t, "demo", managerSingleNamespaceWatchRole.Namespace)
assert.Equal(t, 13, len(managerSingleNamespaceWatchRole.Rules)) assert.Equal(t, 14, len(managerSingleNamespaceWatchRole.Rules))
} }
func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) { func TestTemplate_ManagerSingleNamespaceRoleBinding(t *testing.T) {

View File

@@ -102,6 +102,13 @@ rules:
- patch - patch
- update - update
- watch - watch
- apiGroups:
- actions.github.com
resources:
- ephemeralrunnersets/finalizers
verbs:
- patch
- update
- apiGroups: - apiGroups:
- actions.github.com - actions.github.com
resources: resources:

View File

@@ -41,7 +41,6 @@ import (
const ( const (
autoscalingListenerContainerName = "autoscaler" autoscalingListenerContainerName = "autoscaler"
autoscalingListenerOwnerKey = ".metadata.controller"
autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer" autoscalingListenerFinalizerName = "autoscalinglistener.actions.github.com/finalizer"
) )
@@ -246,65 +245,6 @@ func (r *AutoscalingListenerReconciler) Reconcile(ctx context.Context, req ctrl.
return ctrl.Result{}, nil return ctrl.Result{}, nil
} }
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, autoscalingListenerOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
namespace, ok := labels["auto-scaling-listener-namespace"]
if !ok {
return nil
}
name, ok := labels["auto-scaling-listener-name"]
if !ok {
return nil
}
requests = append(requests,
reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
},
)
return requests
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}
func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) { func (r *AutoscalingListenerReconciler) cleanupResources(ctx context.Context, autoscalingListener *v1alpha1.AutoscalingListener, logger logr.Logger) (done bool, err error) {
logger.Info("Cleaning up the listener pod") logger.Info("Cleaning up the listener pod")
listenerPod := new(corev1.Pod) listenerPod := new(corev1.Pod)
@@ -615,3 +555,62 @@ func (r *AutoscalingListenerReconciler) createRoleBindingForListener(ctx context
"serviceAccount", serviceAccount.Name) "serviceAccount", serviceAccount.Name)
return ctrl.Result{Requeue: true}, nil return ctrl.Result{Requeue: true}, nil
} }
// SetupWithManager sets up the controller with the Manager.
func (r *AutoscalingListenerReconciler) SetupWithManager(mgr ctrl.Manager) error {
groupVersionIndexer := func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String()
owner := metav1.GetControllerOf(rawObj)
if owner == nil {
return nil
}
// ...make sure it is owned by this controller
if owner.APIVersion != groupVersion || owner.Kind != "AutoscalingListener" {
return nil
}
// ...and if so, return it
return []string{owner.Name}
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.Pod{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &corev1.ServiceAccount{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err
}
labelBasedWatchFunc := func(obj client.Object) []reconcile.Request {
var requests []reconcile.Request
labels := obj.GetLabels()
namespace, ok := labels["auto-scaling-listener-namespace"]
if !ok {
return nil
}
name, ok := labels["auto-scaling-listener-name"]
if !ok {
return nil
}
requests = append(requests,
reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: namespace,
},
},
)
return requests
}
return ctrl.NewControllerManagedBy(mgr).
For(&v1alpha1.AutoscalingListener{}).
Owns(&corev1.Pod{}).
Owns(&corev1.ServiceAccount{}).
Watches(&source.Kind{Type: &rbacv1.Role{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
Watches(&source.Kind{Type: &rbacv1.RoleBinding{}}, handler.EnqueueRequestsFromMapFunc(labelBasedWatchFunc)).
WithEventFilter(predicate.ResourceVersionChangedPredicate{}).
Complete(r)
}

View File

@@ -213,7 +213,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() error { func() error {
podList := new(corev1.PodList) podList := new(corev1.PodList)
err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) err := k8sClient.List(ctx, podList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
if err != nil { if err != nil {
return err return err
} }
@@ -231,7 +231,7 @@ var _ = Describe("Test AutoScalingListener controller", func() {
Eventually( Eventually(
func() error { func() error {
serviceAccountList := new(corev1.ServiceAccountList) serviceAccountList := new(corev1.ServiceAccountList)
err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingListener.Name}) err := k8sClient.List(ctx, serviceAccountList, client.InNamespace(autoscalingListener.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingListener.Name})
if err != nil { if err != nil {
return err return err
} }

View File

@@ -42,13 +42,10 @@ import (
) )
const ( const (
// TODO: Replace with shared image. labelKeyRunnerSpecHash = "runner-spec-hash"
autoscalingRunnerSetOwnerKey = ".metadata.controller" autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer"
LabelKeyRunnerSpecHash = "runner-spec-hash" runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
autoscalingRunnerSetFinalizerName = "autoscalingrunnerset.actions.github.com/finalizer" runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
runnerScaleSetIdAnnotationKey = "runner-scale-set-id"
runnerScaleSetNameAnnotationKey = "runner-scale-set-name"
autoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
) )
// AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object // AutoscalingRunnerSetReconciler reconciles a AutoscalingRunnerSet object
@@ -201,10 +198,10 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash() desiredSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
for _, runnerSet := range existingRunnerSets.all() { for _, runnerSet := range existingRunnerSets.all() {
log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[LabelKeyRunnerSpecHash]) log.Info("Find existing ephemeral runner set", "name", runnerSet.Name, "specHash", runnerSet.Labels[labelKeyRunnerSpecHash])
} }
if desiredSpecHash != latestRunnerSet.Labels[LabelKeyRunnerSpecHash] { if desiredSpecHash != latestRunnerSet.Labels[labelKeyRunnerSpecHash] {
log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set") log.Info("Latest runner set spec hash does not match the current autoscaling runner set. Creating a new runner set")
return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log) return r.createEphemeralRunnerSet(ctx, autoscalingRunnerSet, log)
} }
@@ -232,7 +229,7 @@ func (r *AutoscalingRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl
} }
// Our listener pod is out of date, so we need to delete it to get a new recreate. // Our listener pod is out of date, so we need to delete it to get a new recreate.
if listener.Labels[LabelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() { if listener.Labels[labelKeyRunnerSpecHash] != autoscalingRunnerSet.ListenerSpecHash() {
log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name) log.Info("RunnerScaleSetListener is out of date. Deleting it so that it is recreated", "name", listener.Name)
if err := r.Delete(ctx, listener); err != nil { if err := r.Delete(ctx, listener); err != nil {
if kerrors.IsNotFound(err) { if kerrors.IsNotFound(err) {
@@ -601,7 +598,7 @@ func (r *AutoscalingRunnerSetReconciler) createAutoScalingListenerForRunnerSet(c
func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) { func (r *AutoscalingRunnerSetReconciler) listEphemeralRunnerSets(ctx context.Context, autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*EphemeralRunnerSets, error) {
list := new(v1alpha1.EphemeralRunnerSetList) list := new(v1alpha1.EphemeralRunnerSetList)
if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{autoscalingRunnerSetOwnerKey: autoscalingRunnerSet.Name}); err != nil { if err := r.List(ctx, list, client.InNamespace(autoscalingRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: autoscalingRunnerSet.Name}); err != nil {
return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err) return nil, fmt.Errorf("failed to list ephemeral runner sets: %v", err)
} }
@@ -694,7 +691,7 @@ func (r *AutoscalingRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) erro
return []string{owner.Name} return []string{owner.Name}
} }
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, autoscalingRunnerSetOwnerKey, groupVersionIndexer); err != nil { if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunnerSet{}, resourceOwnerKey, groupVersionIndexer); err != nil {
return err return err
} }
@@ -754,12 +751,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) err := c.client.Get(ctx, types.NamespacedName{Name: roleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName) c.logger.Info("Kubernetes mode role binding finalizer has already been removed", "name", roleBindingName)
return return
} }
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err) c.err = fmt.Errorf("failed to patch kubernetes mode role binding without finalizer: %w", err)
@@ -797,12 +794,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeRol
err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) err := c.client.Get(ctx, types.NamespacedName{Name: roleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName) c.logger.Info("Kubernetes mode role finalizer has already been removed", "name", roleName)
return return
} }
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err) c.err = fmt.Errorf("failed to patch kubernetes mode role without finalizer: %w", err)
@@ -841,12 +838,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeKubernetesModeSer
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName) c.logger.Info("Kubernetes mode service account finalizer has already been removed", "name", serviceAccountName)
return return
} }
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err) c.err = fmt.Errorf("failed to patch kubernetes mode service account without finalizer: %w", err)
@@ -885,12 +882,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeNoPermissionServi
err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount) err := c.client.Get(ctx, types.NamespacedName{Name: serviceAccountName, Namespace: c.autoscalingRunnerSet.Namespace}, serviceAccount)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(serviceAccount, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(serviceAccount, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName) c.logger.Info("No permission service account finalizer has already been removed", "name", serviceAccountName)
return return
} }
err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) { err = patch(ctx, c.client, serviceAccount, func(obj *corev1.ServiceAccount) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err) c.err = fmt.Errorf("failed to patch service account without finalizer: %w", err)
@@ -929,12 +926,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeGitHubSecretFinal
err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret) err := c.client.Get(ctx, types.NamespacedName{Name: githubSecretName, Namespace: c.autoscalingRunnerSet.Namespace}, githubSecret)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(githubSecret, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(githubSecret, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName) c.logger.Info("GitHub secret finalizer has already been removed", "name", githubSecretName)
return return
} }
err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) { err = patch(ctx, c.client, githubSecret, func(obj *corev1.Secret) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err) c.err = fmt.Errorf("failed to patch GitHub secret without finalizer: %w", err)
@@ -973,12 +970,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleBindin
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding) err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleBindingName, Namespace: c.autoscalingRunnerSet.Namespace}, roleBinding)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(roleBinding, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(roleBinding, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName) c.logger.Info("Manager role binding finalizer has already been removed", "name", managerRoleBindingName)
return return
} }
err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) { err = patch(ctx, c.client, roleBinding, func(obj *rbacv1.RoleBinding) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err) c.err = fmt.Errorf("failed to patch manager role binding without finalizer: %w", err)
@@ -1017,12 +1014,12 @@ func (c *autoscalingRunnerSetFinalizerDependencyCleaner) removeManagerRoleFinali
err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role) err := c.client.Get(ctx, types.NamespacedName{Name: managerRoleName, Namespace: c.autoscalingRunnerSet.Namespace}, role)
switch { switch {
case err == nil: case err == nil:
if !controllerutil.ContainsFinalizer(role, autoscalingRunnerSetCleanupFinalizerName) { if !controllerutil.ContainsFinalizer(role, AutoscalingRunnerSetCleanupFinalizerName) {
c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName) c.logger.Info("Manager role finalizer has already been removed", "name", managerRoleName)
return return
} }
err = patch(ctx, c.client, role, func(obj *rbacv1.Role) { err = patch(ctx, c.client, role, func(obj *rbacv1.Role) {
controllerutil.RemoveFinalizer(obj, autoscalingRunnerSetCleanupFinalizerName) controllerutil.RemoveFinalizer(obj, AutoscalingRunnerSetCleanupFinalizerName)
}) })
if err != nil { if err != nil {
c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err) c.err = fmt.Errorf("failed to patch manager role without finalizer: %w", err)

View File

@@ -280,10 +280,10 @@ var _ = Describe("Test AutoScalingRunnerSet controller", func() {
return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items)) return "", fmt.Errorf("We should have only 1 EphemeralRunnerSet, but got %v", len(runnerSetList.Items))
} }
return runnerSetList.Items[0].Labels[LabelKeyRunnerSpecHash], nil return runnerSetList.Items[0].Labels[labelKeyRunnerSpecHash], nil
}, },
autoscalingRunnerSetTestTimeout, autoscalingRunnerSetTestTimeout,
autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[LabelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created") autoscalingRunnerSetTestInterval).ShouldNot(BeEquivalentTo(runnerSet.Labels[labelKeyRunnerSpecHash]), "New EphemeralRunnerSet should be created")
// We should create a new listener // We should create a new listener
Eventually( Eventually(
@@ -1160,7 +1160,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
} }
@@ -1171,7 +1171,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeServiceAccountName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
} }
@@ -1182,7 +1182,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyKubernetesModeRoleBindingName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
Subjects: []rbacv1.Subject{ Subjects: []rbacv1.Subject{
{ {
@@ -1317,7 +1317,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubSecretName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
Data: map[string][]byte{ Data: map[string][]byte{
"github_token": []byte(defaultGitHubToken), "github_token": []byte(defaultGitHubToken),
@@ -1333,7 +1333,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
} }
@@ -1344,7 +1344,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyManagerRoleBindingName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
RoleRef: rbacv1.RoleRef{ RoleRef: rbacv1.RoleRef{
APIGroup: rbacv1.GroupName, APIGroup: rbacv1.GroupName,
@@ -1360,7 +1360,7 @@ var _ = Describe("Test external permissions cleanup", func() {
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName], Name: autoscalingRunnerSet.Annotations[AnnotationKeyNoPermissionServiceAccountName],
Namespace: autoscalingRunnerSet.Namespace, Namespace: autoscalingRunnerSet.Namespace,
Finalizers: []string{autoscalingRunnerSetCleanupFinalizerName}, Finalizers: []string{AutoscalingRunnerSetCleanupFinalizerName},
}, },
} }

View File

@@ -1,5 +1,7 @@
package actionsgithubcom package actionsgithubcom
import corev1 "k8s.io/api/core/v1"
const ( const (
LabelKeyRunnerTemplateHash = "runner-template-hash" LabelKeyRunnerTemplateHash = "runner-template-hash"
LabelKeyPodTemplateHash = "pod-template-hash" LabelKeyPodTemplateHash = "pod-template-hash"
@@ -16,3 +18,47 @@ const (
EnvVarHTTPSProxy = "https_proxy" EnvVarHTTPSProxy = "https_proxy"
EnvVarNoProxy = "no_proxy" EnvVarNoProxy = "no_proxy"
) )
// Labels applied to resources
const (
// Kubernetes labels
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
// Github labels
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
LabelKeyGitHubOrganization = "actions.github.com/organization"
LabelKeyGitHubRepository = "actions.github.com/repository"
)
// Finalizer used to protect resources from deletion while AutoscalingRunnerSet is running
const AutoscalingRunnerSetCleanupFinalizerName = "actions.github.com/cleanup-protection"
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
// Labels applied to listener roles
const (
labelKeyListenerName = "auto-scaling-listener-name"
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
)
// Annotations applied for later cleanup of resources
const (
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
)
// DefaultScaleSetListenerImagePullPolicy is the default pull policy applied
// to the listener when ImagePullPolicy is not specified
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
// ownerKey is field selector matching the owner name of a particular resource
const resourceOwnerKey = ".metadata.controller"

View File

@@ -40,8 +40,7 @@ import (
) )
const ( const (
ephemeralRunnerSetReconcilerOwnerKey = ".metadata.controller" ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
ephemeralRunnerSetFinalizerName = "ephemeralrunner.actions.github.com/finalizer"
) )
// EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object // EphemeralRunnerSetReconciler reconciles a EphemeralRunnerSet object
@@ -56,6 +55,7 @@ type EphemeralRunnerSetReconciler struct {
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/status,verbs=get;update;patch
// +kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunnersets/finalizers,verbs=update;patch
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners,verbs=get;list;watch;create;update;patch;delete
//+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get //+kubebuilder:rbac:groups=actions.github.com,resources=ephemeralrunners/status,verbs=get
@@ -146,7 +146,7 @@ func (r *EphemeralRunnerSetReconciler) Reconcile(ctx context.Context, req ctrl.R
ctx, ctx,
ephemeralRunnerList, ephemeralRunnerList,
client.InNamespace(req.Namespace), client.InNamespace(req.Namespace),
client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: req.Name}, client.MatchingFields{resourceOwnerKey: req.Name},
) )
if err != nil { if err != nil {
log.Error(err, "Unable to list child ephemeral runners") log.Error(err, "Unable to list child ephemeral runners")
@@ -242,7 +242,7 @@ func (r *EphemeralRunnerSetReconciler) cleanUpProxySecret(ctx context.Context, e
func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) { func (r *EphemeralRunnerSetReconciler) cleanUpEphemeralRunners(ctx context.Context, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, log logr.Logger) (bool, error) {
ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList) ephemeralRunnerList := new(v1alpha1.EphemeralRunnerList)
err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{ephemeralRunnerSetReconcilerOwnerKey: ephemeralRunnerSet.Name}) err := r.List(ctx, ephemeralRunnerList, client.InNamespace(ephemeralRunnerSet.Namespace), client.MatchingFields{resourceOwnerKey: ephemeralRunnerSet.Name})
if err != nil { if err != nil {
return false, fmt.Errorf("failed to list child ephemeral runners: %v", err) return false, fmt.Errorf("failed to list child ephemeral runners: %v", err)
} }
@@ -521,7 +521,7 @@ func (r *EphemeralRunnerSetReconciler) actionsClientOptionsFor(ctx context.Conte
// SetupWithManager sets up the controller with the Manager. // SetupWithManager sets up the controller with the Manager.
func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *EphemeralRunnerSetReconciler) SetupWithManager(mgr ctrl.Manager) error {
// Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups. // Index EphemeralRunner owned by EphemeralRunnerSet so we can perform faster look ups.
if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, ephemeralRunnerSetReconcilerOwnerKey, func(rawObj client.Object) []string { if err := mgr.GetFieldIndexer().IndexField(context.Background(), &v1alpha1.EphemeralRunner{}, resourceOwnerKey, func(rawObj client.Object) []string {
groupVersion := v1alpha1.GroupVersion.String() groupVersion := v1alpha1.GroupVersion.String()
// grab the job object, extract the owner... // grab the job object, extract the owner...

View File

@@ -20,40 +20,6 @@ const (
jitTokenKey = "jitToken" jitTokenKey = "jitToken"
) )
// Labels applied to resources
const (
// Kubernetes labels
LabelKeyKubernetesPartOf = "app.kubernetes.io/part-of"
LabelKeyKubernetesComponent = "app.kubernetes.io/component"
LabelKeyKubernetesVersion = "app.kubernetes.io/version"
// Github labels
LabelKeyGitHubScaleSetName = "actions.github.com/scale-set-name"
LabelKeyGitHubScaleSetNamespace = "actions.github.com/scale-set-namespace"
LabelKeyGitHubEnterprise = "actions.github.com/enterprise"
LabelKeyGitHubOrganization = "actions.github.com/organization"
LabelKeyGitHubRepository = "actions.github.com/repository"
)
const AnnotationKeyGitHubRunnerGroupName = "actions.github.com/runner-group-name"
// Labels applied to listener roles
const (
labelKeyListenerName = "auto-scaling-listener-name"
labelKeyListenerNamespace = "auto-scaling-listener-namespace"
)
// Annotations applied for later cleanup of resources
const (
AnnotationKeyManagerRoleBindingName = "actions.github.com/cleanup-manager-role-binding"
AnnotationKeyManagerRoleName = "actions.github.com/cleanup-manager-role-name"
AnnotationKeyKubernetesModeRoleName = "actions.github.com/cleanup-kubernetes-mode-role-name"
AnnotationKeyKubernetesModeRoleBindingName = "actions.github.com/cleanup-kubernetes-mode-role-binding-name"
AnnotationKeyKubernetesModeServiceAccountName = "actions.github.com/cleanup-kubernetes-mode-service-account-name"
AnnotationKeyGitHubSecretName = "actions.github.com/cleanup-github-secret-name"
AnnotationKeyNoPermissionServiceAccountName = "actions.github.com/cleanup-no-permission-service-account-name"
)
var commonLabelKeys = [...]string{ var commonLabelKeys = [...]string{
LabelKeyKubernetesPartOf, LabelKeyKubernetesPartOf,
LabelKeyKubernetesComponent, LabelKeyKubernetesComponent,
@@ -67,8 +33,6 @@ var commonLabelKeys = [...]string{
const labelValueKubernetesPartOf = "gha-runner-scale-set" const labelValueKubernetesPartOf = "gha-runner-scale-set"
const DefaultScaleSetListenerImagePullPolicy = corev1.PullIfNotPresent
// scaleSetListenerImagePullPolicy is applied to all listeners // scaleSetListenerImagePullPolicy is applied to all listeners
var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy var scaleSetListenerImagePullPolicy = DefaultScaleSetListenerImagePullPolicy
@@ -84,6 +48,62 @@ func SetListenerImagePullPolicy(pullPolicy string) bool {
type resourceBuilder struct{} type resourceBuilder struct{}
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
effectiveMinRunners := 0
effectiveMaxRunners := math.MaxInt32
if autoscalingRunnerSet.Spec.MaxRunners != nil {
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners
}
if autoscalingRunnerSet.Spec.MinRunners != nil {
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners
}
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl)
if err != nil {
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
}
autoscalingListener := &v1alpha1.AutoscalingListener{
ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet),
Namespace: namespace,
Labels: map[string]string{
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-scale-set-listener",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubEnterprise: githubConfig.Enterprise,
LabelKeyGitHubOrganization: githubConfig.Organization,
LabelKeyGitHubRepository: githubConfig.Repository,
labelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(),
},
},
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
MinRunners: effectiveMinRunners,
MaxRunners: effectiveMaxRunners,
Image: image,
ImagePullPolicy: scaleSetListenerImagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
},
}
return autoscalingListener, nil
}
func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod { func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.AutoscalingListener, serviceAccount *corev1.ServiceAccount, secret *corev1.Secret, envs ...corev1.EnvVar) *corev1.Pod {
listenerEnv := []corev1.EnvVar{ listenerEnv := []corev1.EnvVar{
{ {
@@ -207,54 +227,6 @@ func (b *resourceBuilder) newScaleSetListenerPod(autoscalingListener *v1alpha1.A
return newRunnerScaleSetListenerPod return newRunnerScaleSetListenerPod
} }
func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil {
return nil, err
}
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
newLabels := map[string]string{
LabelKeyRunnerSpecHash: runnerSpecHash,
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
LabelKeyKubernetesComponent: "runner-set",
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
}
if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
}
newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
}
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{
GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
Labels: newLabels,
Annotations: newAnnotations,
},
Spec: v1alpha1.EphemeralRunnerSetSpec{
Replicas: 0,
EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
RunnerScaleSetId: runnerScaleSetId,
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
},
},
}
return newEphemeralRunnerSet, nil
}
func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount { func (b *resourceBuilder) newScaleSetListenerServiceAccount(autoscalingListener *v1alpha1.AutoscalingListener) *corev1.ServiceAccount {
return &corev1.ServiceAccount{ return &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -344,60 +316,52 @@ func (b *resourceBuilder) newScaleSetListenerSecretMirror(autoscalingListener *v
return newListenerSecret return newListenerSecret
} }
func (b *resourceBuilder) newAutoScalingListener(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet, ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet, namespace, image string, imagePullSecrets []corev1.LocalObjectReference) (*v1alpha1.AutoscalingListener, error) { func (b *resourceBuilder) newEphemeralRunnerSet(autoscalingRunnerSet *v1alpha1.AutoscalingRunnerSet) (*v1alpha1.EphemeralRunnerSet, error) {
runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey]) runnerScaleSetId, err := strconv.Atoi(autoscalingRunnerSet.Annotations[runnerScaleSetIdAnnotationKey])
if err != nil { if err != nil {
return nil, err return nil, err
} }
runnerSpecHash := autoscalingRunnerSet.RunnerSetSpecHash()
effectiveMinRunners := 0 newLabels := map[string]string{
effectiveMaxRunners := math.MaxInt32 labelKeyRunnerSpecHash: runnerSpecHash,
if autoscalingRunnerSet.Spec.MaxRunners != nil { LabelKeyKubernetesPartOf: labelValueKubernetesPartOf,
effectiveMaxRunners = *autoscalingRunnerSet.Spec.MaxRunners LabelKeyKubernetesComponent: "runner-set",
} LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion],
if autoscalingRunnerSet.Spec.MinRunners != nil { LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name,
effectiveMinRunners = *autoscalingRunnerSet.Spec.MinRunners LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace,
} }
githubConfig, err := actions.ParseGitHubConfigFromURL(autoscalingRunnerSet.Spec.GitHubConfigUrl) if err := applyGitHubURLLabels(autoscalingRunnerSet.Spec.GitHubConfigUrl, newLabels); err != nil {
if err != nil { return nil, fmt.Errorf("failed to apply GitHub URL labels: %v", err)
return nil, fmt.Errorf("failed to parse github config from url: %v", err)
} }
autoscalingListener := &v1alpha1.AutoscalingListener{ newAnnotations := map[string]string{
AnnotationKeyGitHubRunnerGroupName: autoscalingRunnerSet.Annotations[AnnotationKeyGitHubRunnerGroupName],
}
newEphemeralRunnerSet := &v1alpha1.EphemeralRunnerSet{
TypeMeta: metav1.TypeMeta{},
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: scaleSetListenerName(autoscalingRunnerSet), GenerateName: autoscalingRunnerSet.ObjectMeta.Name + "-",
Namespace: namespace, Namespace: autoscalingRunnerSet.ObjectMeta.Namespace,
Labels: map[string]string{ Labels: newLabels,
LabelKeyGitHubScaleSetNamespace: autoscalingRunnerSet.Namespace, Annotations: newAnnotations,
LabelKeyGitHubScaleSetName: autoscalingRunnerSet.Name, },
LabelKeyKubernetesPartOf: labelValueKubernetesPartOf, Spec: v1alpha1.EphemeralRunnerSetSpec{
LabelKeyKubernetesComponent: "runner-scale-set-listener", Replicas: 0,
LabelKeyKubernetesVersion: autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], EphemeralRunnerSpec: v1alpha1.EphemeralRunnerSpec{
LabelKeyGitHubEnterprise: githubConfig.Enterprise, RunnerScaleSetId: runnerScaleSetId,
LabelKeyGitHubOrganization: githubConfig.Organization, GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
LabelKeyGitHubRepository: githubConfig.Repository, GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
LabelKeyRunnerSpecHash: autoscalingRunnerSet.ListenerSpecHash(), Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
PodTemplateSpec: autoscalingRunnerSet.Spec.Template,
}, },
}, },
Spec: v1alpha1.AutoscalingListenerSpec{
GitHubConfigUrl: autoscalingRunnerSet.Spec.GitHubConfigUrl,
GitHubConfigSecret: autoscalingRunnerSet.Spec.GitHubConfigSecret,
RunnerScaleSetId: runnerScaleSetId,
AutoscalingRunnerSetNamespace: autoscalingRunnerSet.Namespace,
AutoscalingRunnerSetName: autoscalingRunnerSet.Name,
EphemeralRunnerSetName: ephemeralRunnerSet.Name,
MinRunners: effectiveMinRunners,
MaxRunners: effectiveMaxRunners,
Image: image,
ImagePullPolicy: scaleSetListenerImagePullPolicy,
ImagePullSecrets: imagePullSecrets,
Proxy: autoscalingRunnerSet.Spec.Proxy,
GitHubServerTLS: autoscalingRunnerSet.Spec.GitHubServerTLS,
},
} }
return autoscalingListener, nil return newEphemeralRunnerSet, nil
} }
func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner { func (b *resourceBuilder) newEphemeralRunner(ephemeralRunnerSet *v1alpha1.EphemeralRunnerSet) *v1alpha1.EphemeralRunner {

View File

@@ -36,7 +36,7 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, ephemeralRunnerSet.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-set", ephemeralRunnerSet.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], ephemeralRunnerSet.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, ephemeralRunnerSet.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", ephemeralRunnerSet.Labels[LabelKeyGitHubEnterprise])
@@ -49,7 +49,7 @@ func TestLabelPropagation(t *testing.T) {
assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf]) assert.Equal(t, labelValueKubernetesPartOf, listener.Labels[LabelKeyKubernetesPartOf])
assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent]) assert.Equal(t, "runner-scale-set-listener", listener.Labels[LabelKeyKubernetesComponent])
assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion]) assert.Equal(t, autoscalingRunnerSet.Labels[LabelKeyKubernetesVersion], listener.Labels[LabelKeyKubernetesVersion])
assert.NotEmpty(t, ephemeralRunnerSet.Labels[LabelKeyRunnerSpecHash]) assert.NotEmpty(t, ephemeralRunnerSet.Labels[labelKeyRunnerSpecHash])
assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName]) assert.Equal(t, autoscalingRunnerSet.Name, listener.Labels[LabelKeyGitHubScaleSetName])
assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace]) assert.Equal(t, autoscalingRunnerSet.Namespace, listener.Labels[LabelKeyGitHubScaleSetNamespace])
assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise]) assert.Equal(t, "", listener.Labels[LabelKeyGitHubEnterprise])

View File

@@ -105,12 +105,14 @@ func SetupIntegrationTest(ctx2 context.Context) *testEnvironment {
Log: logf.Log, Log: logf.Log,
Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"), Recorder: mgr.GetEventRecorderFor("runnerreplicaset-controller"),
GitHubClient: multiClient, GitHubClient: multiClient,
RunnerImage: "example/runner:test",
DockerImage: "example/docker:test",
Name: controllerName("runner"), Name: controllerName("runner"),
RegistrationRecheckInterval: time.Millisecond * 100, RegistrationRecheckInterval: time.Millisecond * 100,
RegistrationRecheckJitter: time.Millisecond * 10, RegistrationRecheckJitter: time.Millisecond * 10,
UnregistrationRetryDelay: 1 * time.Second, UnregistrationRetryDelay: 1 * time.Second,
RunnerPodDefaults: RunnerPodDefaults{
RunnerImage: "example/runner:test",
DockerImage: "example/docker:test",
},
} }
err = runnerController.SetupWithManager(mgr) err = runnerController.SetupWithManager(mgr)
Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller") Expect(err).NotTo(HaveOccurred(), "failed to setup runner controller")

View File

@@ -15,6 +15,21 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/client"
) )
func newRunnerPod(template corev1.Pod, runnerSpec arcv1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
return newRunnerPodWithContainerMode("", template, runnerSpec, githubBaseURL, d)
}
func setEnv(c *corev1.Container, name, value string) {
for j := range c.Env {
e := &c.Env[j]
if e.Name == name {
e.Value = value
return
}
}
}
func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume { func newWorkGenericEphemeralVolume(t *testing.T, storageReq string) corev1.Volume {
GBs, err := resource.ParseQuantity(storageReq) GBs, err := resource.ParseQuantity(storageReq)
if err != nil { if err != nil {
@@ -171,7 +186,7 @@ func TestNewRunnerPod(t *testing.T) {
Env: []corev1.EnvVar{ Env: []corev1.EnvVar{
{ {
Name: "DOCKER_GROUP_GID", Name: "DOCKER_GROUP_GID",
Value: "121", Value: "1234",
}, },
}, },
VolumeMounts: []corev1.VolumeMount{ VolumeMounts: []corev1.VolumeMount{
@@ -397,6 +412,50 @@ func TestNewRunnerPod(t *testing.T) {
config: arcv1alpha1.RunnerConfig{}, config: arcv1alpha1.RunnerConfig{},
want: newTestPod(base, nil), want: newTestPod(base, nil),
}, },
{
description: "it should respect DOCKER_GROUP_GID of the dockerd sidecar container",
template: corev1.Pod{
Spec: corev1.PodSpec{
Containers: []corev1.Container{
{
Name: "docker",
Env: []corev1.EnvVar{
{
Name: "DOCKER_GROUP_GID",
Value: "2345",
},
},
},
},
},
},
config: arcv1alpha1.RunnerConfig{},
want: newTestPod(base, func(p *corev1.Pod) {
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "2345")
}),
},
{
description: "it should add DOCKER_GROUP_GID=1001 to the dockerd sidecar container for Ubuntu 20.04 runners",
template: corev1.Pod{},
config: arcv1alpha1.RunnerConfig{
Image: "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1",
},
want: newTestPod(base, func(p *corev1.Pod) {
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "1001")
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-20.04-20210726-1"
}),
},
{
description: "it should add DOCKER_GROUP_GID=121 to the dockerd sidecar container for Ubuntu 22.04 runners",
template: corev1.Pod{},
config: arcv1alpha1.RunnerConfig{
Image: "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1",
},
want: newTestPod(base, func(p *corev1.Pod) {
setEnv(&p.Spec.Containers[1], "DOCKER_GROUP_GID", "121")
p.Spec.Containers[0].Image = "ghcr.io/summerwind/actions-runner:ubuntu-22.04-20210726-1"
}),
},
{ {
description: "dockerdWithinRunnerContainer=true should set privileged=true and omit the dind sidecar container", description: "dockerdWithinRunnerContainer=true should set privileged=true and omit the dind sidecar container",
template: corev1.Pod{}, template: corev1.Pod{},
@@ -552,7 +611,14 @@ func TestNewRunnerPod(t *testing.T) {
for i := range testcases { for i := range testcases {
tc := testcases[i] tc := testcases[i]
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
got, err := newRunnerPod(tc.template, tc.config, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, false) got, err := newRunnerPod(tc.template, tc.config, githubBaseURL, RunnerPodDefaults{
RunnerImage: defaultRunnerImage,
RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
DockerImage: defaultDockerImage,
DockerRegistryMirror: defaultDockerRegistryMirror,
DockerGID: "1234",
UseRunnerStatusUpdateHook: false,
})
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, tc.want, got) require.Equal(t, tc.want, got)
}) })
@@ -713,7 +779,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
Env: []corev1.EnvVar{ Env: []corev1.EnvVar{
{ {
Name: "DOCKER_GROUP_GID", Name: "DOCKER_GROUP_GID",
Value: "121", Value: "1234",
}, },
}, },
VolumeMounts: []corev1.VolumeMount{ VolumeMounts: []corev1.VolumeMount{
@@ -1171,6 +1237,7 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
defaultRunnerImage = "default-runner-image" defaultRunnerImage = "default-runner-image"
defaultRunnerImagePullSecrets = []string{} defaultRunnerImagePullSecrets = []string{}
defaultDockerImage = "default-docker-image" defaultDockerImage = "default-docker-image"
defaultDockerGID = "1234"
defaultDockerRegistryMirror = "" defaultDockerRegistryMirror = ""
githubBaseURL = "api.github.com" githubBaseURL = "api.github.com"
) )
@@ -1190,12 +1257,15 @@ func TestNewRunnerPodFromRunnerController(t *testing.T) {
t.Run(tc.description, func(t *testing.T) { t.Run(tc.description, func(t *testing.T) {
r := &RunnerReconciler{ r := &RunnerReconciler{
RunnerImage: defaultRunnerImage, GitHubClient: multiClient,
RunnerImagePullSecrets: defaultRunnerImagePullSecrets, Scheme: scheme,
DockerImage: defaultDockerImage, RunnerPodDefaults: RunnerPodDefaults{
DockerRegistryMirror: defaultDockerRegistryMirror, RunnerImage: defaultRunnerImage,
GitHubClient: multiClient, RunnerImagePullSecrets: defaultRunnerImagePullSecrets,
Scheme: scheme, DockerImage: defaultDockerImage,
DockerRegistryMirror: defaultDockerRegistryMirror,
DockerGID: defaultDockerGID,
},
} }
got, err := r.newPod(tc.runner) got, err := r.newPod(tc.runner)
require.NoError(t, err) require.NoError(t, err)

View File

@@ -68,15 +68,24 @@ type RunnerReconciler struct {
Recorder record.EventRecorder Recorder record.EventRecorder
Scheme *runtime.Scheme Scheme *runtime.Scheme
GitHubClient *MultiGitHubClient GitHubClient *MultiGitHubClient
RunnerImage string
RunnerImagePullSecrets []string
DockerImage string
DockerRegistryMirror string
Name string Name string
RegistrationRecheckInterval time.Duration RegistrationRecheckInterval time.Duration
RegistrationRecheckJitter time.Duration RegistrationRecheckJitter time.Duration
UseRunnerStatusUpdateHook bool
UnregistrationRetryDelay time.Duration UnregistrationRetryDelay time.Duration
RunnerPodDefaults RunnerPodDefaults
}
type RunnerPodDefaults struct {
RunnerImage string
RunnerImagePullSecrets []string
DockerImage string
DockerRegistryMirror string
// The default Docker group ID to use for the dockerd sidecar container.
// Ubuntu 20.04 runner images assumes 1001 and the 22.04 variant assumes 121 by default.
DockerGID string
UseRunnerStatusUpdateHook bool
} }
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runners,verbs=get;list;watch;create;update;patch;delete
@@ -145,7 +154,7 @@ func (r *RunnerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
ready := runnerPodReady(&pod) ready := runnerPodReady(&pod)
if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.UseRunnerStatusUpdateHook { if (runner.Status.Phase != phase || runner.Status.Ready != ready) && !r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Status.Phase == "" && r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
if pod.Status.Phase == corev1.PodRunning { if pod.Status.Phase == corev1.PodRunning {
// Seeing this message, you can expect the runner to become `Running` soon. // Seeing this message, you can expect the runner to become `Running` soon.
log.V(1).Info( log.V(1).Info(
@@ -292,7 +301,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
return ctrl.Result{}, err return ctrl.Result{}, err
} }
needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes") needsServiceAccount := runner.Spec.ServiceAccountName == "" && (r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes")
if needsServiceAccount { if needsServiceAccount {
serviceAccount := &corev1.ServiceAccount{ serviceAccount := &corev1.ServiceAccount{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
@@ -306,7 +315,7 @@ func (r *RunnerReconciler) processRunnerCreation(ctx context.Context, runner v1a
rules := []rbacv1.PolicyRule{} rules := []rbacv1.PolicyRule{}
if r.UseRunnerStatusUpdateHook { if r.RunnerPodDefaults.UseRunnerStatusUpdateHook {
rules = append(rules, []rbacv1.PolicyRule{ rules = append(rules, []rbacv1.PolicyRule{
{ {
APIGroups: []string{"actions.summerwind.dev"}, APIGroups: []string{"actions.summerwind.dev"},
@@ -583,7 +592,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
} }
} }
pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, ghc.GithubBaseURL, r.UseRunnerStatusUpdateHook) pod, err := newRunnerPodWithContainerMode(runner.Spec.ContainerMode, template, runner.Spec.RunnerConfig, ghc.GithubBaseURL, r.RunnerPodDefaults)
if err != nil { if err != nil {
return pod, err return pod, err
} }
@@ -634,7 +643,7 @@ func (r *RunnerReconciler) newPod(runner v1alpha1.Runner) (corev1.Pod, error) {
if runnerSpec.ServiceAccountName != "" { if runnerSpec.ServiceAccountName != "" {
pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName pod.Spec.ServiceAccountName = runnerSpec.ServiceAccountName
} else if r.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" { } else if r.RunnerPodDefaults.UseRunnerStatusUpdateHook || runner.Spec.ContainerMode == "kubernetes" {
pod.Spec.ServiceAccountName = runner.ObjectMeta.Name pod.Spec.ServiceAccountName = runner.ObjectMeta.Name
} }
@@ -754,13 +763,19 @@ func runnerHookEnvs(pod *corev1.Pod) ([]corev1.EnvVar, error) {
}, nil }, nil
} }
func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHook bool) (corev1.Pod, error) { func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, githubBaseURL string, d RunnerPodDefaults) (corev1.Pod, error) {
var ( var (
privileged bool = true privileged bool = true
dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer dockerdInRunner bool = runnerSpec.DockerdWithinRunnerContainer != nil && *runnerSpec.DockerdWithinRunnerContainer
dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled dockerEnabled bool = runnerSpec.DockerEnabled == nil || *runnerSpec.DockerEnabled
ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral ephemeral bool = runnerSpec.Ephemeral == nil || *runnerSpec.Ephemeral
dockerdInRunnerPrivileged bool = dockerdInRunner dockerdInRunnerPrivileged bool = dockerdInRunner
defaultRunnerImage = d.RunnerImage
defaultRunnerImagePullSecrets = d.RunnerImagePullSecrets
defaultDockerImage = d.DockerImage
defaultDockerRegistryMirror = d.DockerRegistryMirror
useRunnerStatusUpdateHook = d.UseRunnerStatusUpdateHook
) )
if containerMode == "kubernetes" { if containerMode == "kubernetes" {
@@ -1013,10 +1028,22 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
// for actions-runner-controller) so typically should not need to be // for actions-runner-controller) so typically should not need to be
// overridden // overridden
if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok { if ok, _ := envVarPresent("DOCKER_GROUP_GID", dockerdContainer.Env); !ok {
gid := d.DockerGID
// We default to gid 121 for Ubuntu 22.04 images
// See below for more details
// - https://github.com/actions/actions-runner-controller/issues/2490#issuecomment-1501561923
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-20.04.dockerfile#L14
// - https://github.com/actions/actions-runner-controller/blob/8869ad28bb5a1daaedefe0e988571fe1fb36addd/runner/actions-runner.ubuntu-22.04.dockerfile#L12
if strings.Contains(runnerContainer.Image, "22.04") {
gid = "121"
} else if strings.Contains(runnerContainer.Image, "20.04") {
gid = "1001"
}
dockerdContainer.Env = append(dockerdContainer.Env, dockerdContainer.Env = append(dockerdContainer.Env,
corev1.EnvVar{ corev1.EnvVar{
Name: "DOCKER_GROUP_GID", Name: "DOCKER_GROUP_GID",
Value: "121", Value: gid,
}) })
} }
dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)") dockerdContainer.Args = append(dockerdContainer.Args, "--group=$(DOCKER_GROUP_GID)")
@@ -1240,10 +1267,6 @@ func newRunnerPodWithContainerMode(containerMode string, template corev1.Pod, ru
return *pod, nil return *pod, nil
} }
func newRunnerPod(template corev1.Pod, runnerSpec v1alpha1.RunnerConfig, defaultRunnerImage string, defaultRunnerImagePullSecrets []string, defaultDockerImage, defaultDockerRegistryMirror string, githubBaseURL string, useRunnerStatusUpdateHookEphemeralRole bool) (corev1.Pod, error) {
return newRunnerPodWithContainerMode("", template, runnerSpec, defaultRunnerImage, defaultRunnerImagePullSecrets, defaultDockerImage, defaultDockerRegistryMirror, githubBaseURL, useRunnerStatusUpdateHookEphemeralRole)
}
func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error { func (r *RunnerReconciler) SetupWithManager(mgr ctrl.Manager) error {
name := "runner-controller" name := "runner-controller"
if r.Name != "" { if r.Name != "" {

View File

@@ -45,13 +45,10 @@ type RunnerSetReconciler struct {
Recorder record.EventRecorder Recorder record.EventRecorder
Scheme *runtime.Scheme Scheme *runtime.Scheme
CommonRunnerLabels []string CommonRunnerLabels []string
GitHubClient *MultiGitHubClient GitHubClient *MultiGitHubClient
RunnerImage string
RunnerImagePullSecrets []string RunnerPodDefaults RunnerPodDefaults
DockerImage string
DockerRegistryMirror string
UseRunnerStatusUpdateHook bool
} }
// +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete // +kubebuilder:rbac:groups=actions.summerwind.dev,resources=runnersets,verbs=get;list;watch;create;update;patch;delete
@@ -231,7 +228,7 @@ func (r *RunnerSetReconciler) newStatefulSet(ctx context.Context, runnerSet *v1a
githubBaseURL := ghc.GithubBaseURL githubBaseURL := ghc.GithubBaseURL
pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, r.RunnerImage, r.RunnerImagePullSecrets, r.DockerImage, r.DockerRegistryMirror, githubBaseURL, r.UseRunnerStatusUpdateHook) pod, err := newRunnerPodWithContainerMode(runnerSet.Spec.RunnerConfig.ContainerMode, template, runnerSet.Spec.RunnerConfig, githubBaseURL, r.RunnerPodDefaults)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@@ -86,4 +86,4 @@ Or for example if they're having problems specifically with runners:
This way users don't have to understand ARC moving parts but we still have a This way users don't have to understand ARC moving parts but we still have a
way to target them specifically if we need to. way to target them specifically if we need to.
[^1]: Superseded by [ADR 2023-04-14](2023-04-14-adding-labels-k8s-resources.md) [^1]: Superseded by [ADR 2023-03-14](2023-03-14-adding-labels-k8s-resources.md)

View File

@@ -2,7 +2,7 @@
**Date**: 2023-02-10 **Date**: 2023-02-10
**Status**: Done **Status**: Superceded [^1]
## Context ## Context
@@ -136,3 +136,5 @@ The downside of this mode:
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other. - When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster. - You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
[^1]: Superseded by [ADR 2023-04-11](2023-04-11-limit-manager-role-permission.md)

View File

@@ -0,0 +1,167 @@
# ADR 2023-04-11: Limit Permissions for Service Accounts in Actions-Runner-Controller
**Date**: 2023-04-11
**Status**: Done [^1]
## Context
- `actions-runner-controller` is a Kubernetes CRD (with controller) built using https://github.com/kubernetes-sigs/controller-runtime
- [controller-runtime](https://github.com/kubernetes-sigs/controller-runtime) has a default cache based k8s API client.Reader to make query k8s API server more efficiency.
- The cache-based API client requires cluster scope `list` and `watch` permission for any resource the controller may query.
- This documentation only scopes to the AutoscalingRunnerSet CRD and its controller.
## Service accounts and their role binding in actions-runner-controller
There are 3 service accounts involved for a working `AutoscalingRunnerSet` based `actions-runner-controller`
1. Service account for each Ephemeral runner Pod
This should have the lowest privilege (not any `RoleBinding` nor `ClusterRoleBinding`) by default, in the case of `containerMode=kubernetes`, it will get certain write permission with `RoleBinding` to limit the permission to a single namespace.
> References:
>
> - ./charts/gha-runner-scale-set/templates/no_permission_serviceaccount.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_role_binding.yaml
> - ./charts/gha-runner-scale-set/templates/kube_mode_serviceaccount.yaml
2. Service account for AutoScalingListener Pod
This has a `RoleBinding` to a single namespace with a `Role` that has permission to `PATCH` `EphemeralRunnerSet` and `EphemeralRunner`.
3. Service account for the controller manager
Since the CRD controller is a singleton installed in the cluster that manages the CRD across multiple namespaces by default, the service account of the controller manager pod has a `ClusterRoleBinding` to a `ClusterRole` with broader permissions.
The current `ClusterRole` has the following permissions:
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `Pods` (with `Status` sub-resource)
- **Get/List/Create/Delete/Update/Patch/Watch on `Secrets`**
- Get/List/Create/Delete/Update/Patch/Watch on `Roles`
- Get/List/Create/Delete/Update/Patch/Watch on `RoleBindings`
- Get/List/Create/Delete/Update/Patch/Watch on `ServiceAccounts`
> Full list can be found at: https://github.com/actions/actions-runner-controller/blob/facae69e0b189d3b5dd659f36df8a829516d2896/charts/actions-runner-controller-2/templates/manager_role.yaml
## Limit cluster role permission on Secrets
The cluster scope `List` `Secrets` permission might be a blocker for adopting `actions-runner-controller` for certain customers as they may have certain restriction in their cluster that simply doesn't allow any service account to have cluster scope `List Secrets` permission.
To help these customers and improve security for `actions-runner-controller` in general, we will try to limit the `ClusterRole` permission of the controller manager's service account down to the following:
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingRunnerSets` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `AutoScalingListeners` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunnerSets` (with `Status` and `Finalizer` sub-resource)
- Get/List/Create/Delete/Update/Patch/Watch on `EphemeralRunners` (with `Status` and `Finalizer` sub-resource)
- List/Watch on `Pods`
- List/Watch/Patch on `Roles`
- List/Watch on `RoleBindings`
- List/Watch on `ServiceAccounts`
> We will change the default cache-based client to bypass cache on reading `Secrets` and `ConfigMaps`(ConfigMap is used when you configure `githubServerTLS`), so we can eliminate the need for `List` and `Watch` `Secrets` permission in cluster scope.
Introduce a new `Role` for the controller and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace the controller is deployed. This role will grant the controller's service account required permission to work with `AutoScalingListeners` in the controller namespace.
- Get/Create/Delete on `Pods`
- Get on `Pods/status`
- Get/Create/Delete/Update/Patch on `Secrets`
- Get/Create/Delete/Update/Patch on `ServiceAccounts`
The `Role` and `RoleBinding` creation will happen during the `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`
During `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller`, we will store the controller's service account info as labels on the controller `Deployment`.
Ex:
```yaml
actions.github.com/controller-service-account-namespace: {{ .Release.Namespace }}
actions.github.com/controller-service-account-name: {{ include "gha-runner-scale-set-controller.serviceAccountName" . }}
```
Introduce a new `Role` per `AutoScalingRunnerSet` installation and `RoleBinding` the `Role` with the controller's `ServiceAccount` in the namespace that each `AutoScalingRunnerSet` deployed with the following permission.
- Get/Create/Delete/Update/Patch/List on `Secrets`
- Create/Delete on `Pods`
- Get on `Pods/status`
- Get/Create/Delete/Update/Patch on `Roles`
- Get/Create/Delete/Update/Patch on `RoleBindings`
- Get on `ConfigMaps`
The `Role` and `RoleBinding` creation will happen during `helm install demo oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set` to grant the controller's service account required permissions to operate in the namespace the `AutoScalingRunnerSet` deployed.
The `gha-runner-scale-set` helm chart will try to find the `Deployment` of the controller using `helm lookup`, and get the service account info from the labels of the controller `Deployment` (`actions.github.com/controller-service-account-namespace` and `actions.github.com/controller-service-account-name`).
The `gha-runner-scale-set` helm chart will use this service account to properly render the `RoleBinding` template.
The `gha-runner-scale-set` helm chart will also allow customers to explicitly provide the controller service account info, in case the `helm lookup` couldn't locate the right controller `Deployment`.
New sections in `values.yaml` of `gha-runner-scale-set`:
```yaml
## Optional controller service account that needs to have required Role and RoleBinding
## to operate this gha-runner-scale-set installation.
## The helm chart will try to find the controller deployment and its service account at installation time.
## In case the helm chart can't find the right service account, you can explicitly pass in the following value
## to help it finish RoleBinding with the right service account.
## Note: if your controller is installed to only watch a single namespace, you have to pass these values explicitly.
controllerServiceAccount:
namespace: arc-system
name: test-arc-gha-runner-scale-set-controller
```
## Install ARC to only watch/react resources in a single namespace
In case the user doesn't want to have any `ClusterRole`, they can choose to install the `actions-runner-controller` in a mode that only requires a `Role` with `RoleBinding` in a particular namespace.
In this mode, the `actions-runner-controller` will only be able to watch the `AutoScalingRunnerSet` resource in a single namespace.
If you want to deploy multiple `AutoScalingRunnerSet` into different namespaces, you will need to install `actions-runner-controller` in this mode multiple times as well and have each installation watch the namespace you want to deploy an `AutoScalingRunnerSet`
You will install `actions-runner-controller` with something like `helm install arc --namespace arc-system --set watchSingleNamespace=test-namespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set-controller` (the `test-namespace` namespace needs to be created first).
You will deploy the `AutoScalingRunnerSet` with something like `helm install demo --namespace TestNamespace oci://ghcr.io/actions/actions-runner-controller-charts/gha-runner-scale-set`
In this mode, you will end up with a manager `Role` that has all Get/List/Create/Delete/Update/Patch/Watch permissions on resources we need, and a `RoleBinding` to bind the `Role` with the controller `ServiceAccount` in the watched single namespace and the controller namespace, ex: `test-namespace` and `arc-system` in the above example.
The downside of this mode:
- When you have multiple controllers deployed, they will still use the same version of the CRD. So you will need to make sure every controller you deployed has to be the same version as each other.
- You can't mismatch install both `actions-runner-controller` in this mode (watchSingleNamespace) with the regular installation mode (watchAllClusterNamespaces) in your cluster.
## Cleanup process
We will apply following annotations during the installation that are going to be used in the cleanup process (`helm uninstall`). If annotation is not present, cleanup of that resource is going to be skipped.
The cleanup only patches the resource removing the `actions.github.com/cleanup-protection` finalizer. The client that created a resource is responsible for deleting them. Keep in mind, `helm uninstall` will automatically delete resources, causing the cleanup procedure to be complete.
Annotations applied to the `AutoscalingRunnerSet` used in the cleanup procedure
are:
- `actions.github.com/cleanup-github-secret-name`
- `actions.github.com/cleanup-manager-role-binding`
- `actions.github.com/cleanup-manager-role-name`
- `actions.github.com/cleanup-kubernetes-mode-role-binding-name`
- `actions.github.com/cleanup-kubernetes-mode-role-name`
- `actions.github.com/cleanup-kubernetes-mode-service-account-name`
- `actions.github.com/cleanup-no-permission-service-account-name`
The order in which resources are being patched to remove finalizers:
1. Kubernetes mode `RoleBinding`
1. Kubernetes mode `Role`
1. Kubernetes mode `ServiceAccount`
1. No permission `ServiceAccount`
1. GitHub `Secret`
1. Manager `RoleBinding`
1. Manager `Role`
[^1]: Supersedes [ADR 2023-02-10](2023-02-10-limit-manager-role-permission.md)

54
main.go
View File

@@ -45,6 +45,7 @@ import (
const ( const (
defaultRunnerImage = "summerwind/actions-runner:latest" defaultRunnerImage = "summerwind/actions-runner:latest"
defaultDockerImage = "docker:dind" defaultDockerImage = "docker:dind"
defaultDockerGID = "1001"
) )
var scheme = runtime.NewScheme() var scheme = runtime.NewScheme()
@@ -76,18 +77,15 @@ func main() {
autoScalingRunnerSetOnly bool autoScalingRunnerSetOnly bool
enableLeaderElection bool enableLeaderElection bool
disableAdmissionWebhook bool disableAdmissionWebhook bool
runnerStatusUpdateHook bool
leaderElectionId string leaderElectionId string
port int port int
syncPeriod time.Duration syncPeriod time.Duration
defaultScaleDownDelay time.Duration defaultScaleDownDelay time.Duration
runnerImage string
runnerImagePullSecrets stringSlice runnerImagePullSecrets stringSlice
runnerPodDefaults actionssummerwindnet.RunnerPodDefaults
dockerImage string
dockerRegistryMirror string
namespace string namespace string
logLevel string logLevel string
logFormat string logFormat string
@@ -108,10 +106,11 @@ func main() {
flag.BoolVar(&enableLeaderElection, "enable-leader-election", false, flag.BoolVar(&enableLeaderElection, "enable-leader-election", false,
"Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.") "Enable leader election for controller manager. Enabling this will ensure there is only one active controller manager.")
flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.") flag.StringVar(&leaderElectionId, "leader-election-id", "actions-runner-controller", "Controller id for leader election.")
flag.StringVar(&runnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.") flag.StringVar(&runnerPodDefaults.RunnerImage, "runner-image", defaultRunnerImage, "The image name of self-hosted runner container to use by default if one isn't defined in yaml.")
flag.StringVar(&dockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.") flag.StringVar(&runnerPodDefaults.DockerImage, "docker-image", defaultDockerImage, "The image name of docker sidecar container to use by default if one isn't defined in yaml.")
flag.StringVar(&runnerPodDefaults.DockerGID, "docker-gid", defaultDockerGID, "The default GID of docker group in the docker sidecar container. Use 1001 for dockerd sidecars of Ubuntu 20.04 runners 121 for Ubuntu 22.04.")
flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.") flag.Var(&runnerImagePullSecrets, "runner-image-pull-secret", "The default image-pull secret name for self-hosted runner container.")
flag.StringVar(&dockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.") flag.StringVar(&runnerPodDefaults.DockerRegistryMirror, "docker-registry-mirror", "", "The default Docker Registry Mirror used by runners.")
flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.") flag.StringVar(&c.Token, "github-token", c.Token, "The personal access token of GitHub.")
flag.StringVar(&c.EnterpriseURL, "github-enterprise-url", c.EnterpriseURL, "Enterprise URL to be used for your GitHub API calls") flag.StringVar(&c.EnterpriseURL, "github-enterprise-url", c.EnterpriseURL, "Enterprise URL to be used for your GitHub API calls")
flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.") flag.Int64Var(&c.AppID, "github-app-id", c.AppID, "The application ID of GitHub App.")
@@ -122,7 +121,7 @@ func main() {
flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API") flag.StringVar(&c.BasicauthUsername, "github-basicauth-username", c.BasicauthUsername, "Username for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API") flag.StringVar(&c.BasicauthPassword, "github-basicauth-password", c.BasicauthPassword, "Password for GitHub basic auth to use instead of PAT or GitHub APP in case it's running behind a proxy API")
flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration") flag.StringVar(&c.RunnerGitHubURL, "runner-github-url", c.RunnerGitHubURL, "GitHub URL to be used by runners during registration")
flag.BoolVar(&runnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).") flag.BoolVar(&runnerPodDefaults.UseRunnerStatusUpdateHook, "runner-status-update-hook", false, "Use custom RBAC for runners (role, role binding and service account).")
flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)") flag.DurationVar(&defaultScaleDownDelay, "default-scale-down-delay", actionssummerwindnet.DefaultScaleDownDelay, "The approximate delay for a scale down followed by a scale up, used to prevent flapping (down->up->down->... loop)")
flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind") flag.IntVar(&port, "port", 9443, "The port to which the admission webhook endpoint should bind")
flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.") flag.DurationVar(&syncPeriod, "sync-period", 1*time.Minute, "Determines the minimum frequency at which K8s resources managed by this controller are reconciled.")
@@ -135,6 +134,8 @@ func main() {
flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.") flag.Var(&autoScalerImagePullSecrets, "auto-scaler-image-pull-secrets", "The default image-pull secret name for auto-scaler listener container.")
flag.Parse() flag.Parse()
runnerPodDefaults.RunnerImagePullSecrets = runnerImagePullSecrets
log, err := logging.NewLogger(logLevel, logFormat) log, err := logging.NewLogger(logLevel, logFormat)
if err != nil { if err != nil {
fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err) fmt.Fprintf(os.Stderr, "Error: creating logger: %v\n", err)
@@ -255,16 +256,11 @@ func main() {
) )
runnerReconciler := &actionssummerwindnet.RunnerReconciler{ runnerReconciler := &actionssummerwindnet.RunnerReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Log: log.WithName("runner"), Log: log.WithName("runner"),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
GitHubClient: multiClient, GitHubClient: multiClient,
DockerImage: dockerImage, RunnerPodDefaults: runnerPodDefaults,
DockerRegistryMirror: dockerRegistryMirror,
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
// Defaults for self-hosted runner containers
RunnerImage: runnerImage,
RunnerImagePullSecrets: runnerImagePullSecrets,
} }
if err = runnerReconciler.SetupWithManager(mgr); err != nil { if err = runnerReconciler.SetupWithManager(mgr); err != nil {
@@ -296,17 +292,12 @@ func main() {
} }
runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{ runnerSetReconciler := &actionssummerwindnet.RunnerSetReconciler{
Client: mgr.GetClient(), Client: mgr.GetClient(),
Log: log.WithName("runnerset"), Log: log.WithName("runnerset"),
Scheme: mgr.GetScheme(), Scheme: mgr.GetScheme(),
CommonRunnerLabels: commonRunnerLabels, CommonRunnerLabels: commonRunnerLabels,
DockerImage: dockerImage, GitHubClient: multiClient,
DockerRegistryMirror: dockerRegistryMirror, RunnerPodDefaults: runnerPodDefaults,
GitHubClient: multiClient,
// Defaults for self-hosted runner containers
RunnerImage: runnerImage,
RunnerImagePullSecrets: runnerImagePullSecrets,
UseRunnerStatusUpdateHook: runnerStatusUpdateHook,
} }
if err = runnerSetReconciler.SetupWithManager(mgr); err != nil { if err = runnerSetReconciler.SetupWithManager(mgr); err != nil {
@@ -319,8 +310,9 @@ func main() {
"version", build.Version, "version", build.Version,
"default-scale-down-delay", defaultScaleDownDelay, "default-scale-down-delay", defaultScaleDownDelay,
"sync-period", syncPeriod, "sync-period", syncPeriod,
"default-runner-image", runnerImage, "default-runner-image", runnerPodDefaults.RunnerImage,
"default-docker-image", dockerImage, "default-docker-image", runnerPodDefaults.DockerImage,
"default-docker-gid", runnerPodDefaults.DockerGID,
"common-runnner-labels", commonRunnerLabels, "common-runnner-labels", commonRunnerLabels,
"leader-election-enabled", enableLeaderElection, "leader-election-enabled", enableLeaderElection,
"leader-election-id", leaderElectionId, "leader-election-id", leaderElectionId,